repo_name
stringlengths
8
38
pr_number
int64
3
47.1k
pr_title
stringlengths
8
175
pr_description
stringlengths
2
19.8k
author
null
date_created
stringlengths
25
25
date_merged
stringlengths
25
25
filepath
stringlengths
6
136
before_content
stringlengths
54
884k
after_content
stringlengths
56
884k
pr_author
stringlengths
3
21
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
comment
stringlengths
2
25.4k
comment_author
stringlengths
3
29
__index_level_0__
int64
0
5.1k
moby/moby
42,152
info: unset cgroup-related fields when CgroupDriver == none
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> Fix #42151 **- What I did** unset cgroup-related fields when CgroupDriver == none **- How I did it** Modified `daemon/info_unix.goP **- How to verify it** - Boot the host with cgroup v1 - `dockerd-rootless-setuptool.sh install` - `docker info --format {{ json. }}` ```json { ... "MemoryLimit": false, "SwapLimit": false, "KernelMemory": false, "KernelMemoryTCP": false, "CpuCfsPeriod": false, "CpuCfsQuota": false, "CPUShares": false, "CPUSet": false, "PidsLimit": false, ... "CgroupDriver": "none", ... } ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> info: unset cgroup-related fields when CgroupDriver == none **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-03-16 07:46:33+00:00
2021-11-08 22:45:11+00:00
daemon/info_unix.go
//go:build !windows // +build !windows package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "os/exec" "path/filepath" "strings" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/sysinfo" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // fillPlatformInfo fills the platform related info. func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { v.CgroupDriver = daemon.getCgroupDriver() v.CgroupVersion = "1" if sysInfo.CgroupUnified { v.CgroupVersion = "2" } v.MemoryLimit = sysInfo.MemoryLimit v.SwapLimit = sysInfo.SwapLimit v.KernelMemory = sysInfo.KernelMemory v.KernelMemoryTCP = sysInfo.KernelMemoryTCP v.OomKillDisable = sysInfo.OomKillDisable v.CPUCfsPeriod = sysInfo.CPUCfs v.CPUCfsQuota = sysInfo.CPUCfs v.CPUShares = sysInfo.CPUShares v.CPUSet = sysInfo.Cpuset v.PidsLimit = sysInfo.PidsLimit v.Runtimes = daemon.configStore.GetAllRuntimes() v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() v.InitBinary = daemon.configStore.GetInitPath() v.RuncCommit.ID = "N/A" v.ContainerdCommit.ID = "N/A" v.InitCommit.ID = "N/A" defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.RuncCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.ContainerdCommit.ID = rv.Revision } else { logrus.Warnf("failed to retrieve containerd version: %v", err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if _, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.InitCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } // Set expected and actual commits to the same value to prevent the client // showing that the version does not match the "expected" version/commit. v.RuncCommit.Expected = v.RuncCommit.ID v.ContainerdCommit.Expected = v.ContainerdCommit.ID v.InitCommit.Expected = v.InitCommit.ID if v.CgroupDriver == cgroupNoneDriver { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.") } else { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.") } } else { if !v.MemoryLimit { v.Warnings = append(v.Warnings, "WARNING: No memory limit support") } if !v.SwapLimit { v.Warnings = append(v.Warnings, "WARNING: No swap limit support") } if !v.KernelMemoryTCP && v.CgroupVersion == "1" { // kernel memory is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No kernel memory TCP limit support") } if !v.OomKillDisable && v.CgroupVersion == "1" { // oom kill disable is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No oom kill disable support") } if !v.CPUCfsQuota { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs quota support") } if !v.CPUCfsPeriod { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs period support") } if !v.CPUShares { v.Warnings = append(v.Warnings, "WARNING: No cpu shares support") } if !v.CPUSet { v.Warnings = append(v.Warnings, "WARNING: No cpuset support") } // TODO add fields for these options in types.Info if !sysInfo.BlkioWeight && v.CgroupVersion == "2" { // blkio weight is not available on cgroup v1 since kernel 5.0. // Warning is not printed on cgroup v1, because there is no action user can take. // On cgroup v2, blkio weight is implemented using io.weight v.Warnings = append(v.Warnings, "WARNING: No io.weight support") } if !sysInfo.BlkioWeightDevice && v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.weight (per device) support") } if !sysInfo.BlkioReadBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (rbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_bps_device support") } } if !sysInfo.BlkioWriteBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_bps_device support") } } if !sysInfo.BlkioReadIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (riops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_iops_device support") } } if !sysInfo.BlkioWriteIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wiops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_iops_device support") } } } if !v.IPv4Forwarding { v.Warnings = append(v.Warnings, "WARNING: IPv4 forwarding is disabled") } if !v.BridgeNfIptables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-iptables is disabled") } if !v.BridgeNfIP6tables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-ip6tables is disabled") } } func (daemon *Daemon) fillPlatformVersion(v *types.Version) { if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.Components = append(v.Components, types.ComponentVersion{ Name: "containerd", Version: rv.Version, Details: map[string]string{ "GitCommit": rv.Revision, }, }) } defaultRuntime := daemon.configStore.GetDefaultRuntimeName() defaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: defaultRuntime, Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if ver, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: filepath.Base(defaultInitBinary), Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } } func fillDriverWarnings(v *types.Info) { for _, pair := range v.DriverStatus { if pair[0] == "Data loop file" { msg := fmt.Sprintf("WARNING: %s: usage of loopback devices is "+ "strongly discouraged for production use.\n "+ "Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.", v.Driver) v.Warnings = append(v.Warnings, msg) continue } if pair[0] == "Supports d_type" && pair[1] == "false" { backingFs := getBackingFs(v) msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", v.Driver, backingFs) if backingFs == "xfs" { msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" } msg += " Running without d_type support will not be supported in future releases." v.Warnings = append(v.Warnings, msg) continue } } } func getBackingFs(v *types.Info) string { for _, pair := range v.DriverStatus { if pair[0] == "Backing Filesystem" { return pair[1] } } return "" } // parseInitVersion parses a Tini version string, and extracts the "version" // and "git commit" from the output. // // Output example from `docker-init --version`: // // tini version 0.18.0 - git.fec3683 func parseInitVersion(v string) (version string, commit string, err error) { parts := strings.Split(v, " - ") if len(parts) >= 2 { gitParts := strings.Split(strings.TrimSpace(parts[1]), ".") if len(gitParts) == 2 && gitParts[0] == "git" { commit = gitParts[1] } } parts[0] = strings.TrimSpace(parts[0]) if strings.HasPrefix(parts[0], "tini version ") { version = strings.TrimPrefix(parts[0], "tini version ") } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return version, commit, err } // parseRuntimeVersion parses the output of `[runtime] --version` and extracts the // "name", "version" and "git commit" from the output. // // Output example from `runc --version`: // // runc version 1.0.0-rc5+dev // commit: 69663f0bd4b60df09991c08812a60108003fa340 // spec: 1.0.0 func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) { lines := strings.Split(strings.TrimSpace(v), "\n") for _, line := range lines { if strings.Contains(line, "version") { s := strings.Split(line, "version") runtime = strings.TrimSpace(s[0]) version = strings.TrimSpace(s[len(s)-1]) continue } if strings.HasPrefix(line, "commit:") { commit = strings.TrimSpace(strings.TrimPrefix(line, "commit:")) continue } } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return runtime, version, commit, err } func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool { return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate() } // Rootless returns true if daemon is running in rootless mode func (daemon *Daemon) Rootless() bool { return daemon.configStore.Rootless }
//go:build !windows // +build !windows package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "os/exec" "path/filepath" "strings" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/sysinfo" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // fillPlatformInfo fills the platform related info. func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { v.CgroupDriver = daemon.getCgroupDriver() v.CgroupVersion = "1" if sysInfo.CgroupUnified { v.CgroupVersion = "2" } if v.CgroupDriver != cgroupNoneDriver { v.MemoryLimit = sysInfo.MemoryLimit v.SwapLimit = sysInfo.SwapLimit v.KernelMemory = sysInfo.KernelMemory v.KernelMemoryTCP = sysInfo.KernelMemoryTCP v.OomKillDisable = sysInfo.OomKillDisable v.CPUCfsPeriod = sysInfo.CPUCfs v.CPUCfsQuota = sysInfo.CPUCfs v.CPUShares = sysInfo.CPUShares v.CPUSet = sysInfo.Cpuset v.PidsLimit = sysInfo.PidsLimit } v.Runtimes = daemon.configStore.GetAllRuntimes() v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() v.InitBinary = daemon.configStore.GetInitPath() v.RuncCommit.ID = "N/A" v.ContainerdCommit.ID = "N/A" v.InitCommit.ID = "N/A" defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.RuncCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.ContainerdCommit.ID = rv.Revision } else { logrus.Warnf("failed to retrieve containerd version: %v", err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if _, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.InitCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } // Set expected and actual commits to the same value to prevent the client // showing that the version does not match the "expected" version/commit. v.RuncCommit.Expected = v.RuncCommit.ID v.ContainerdCommit.Expected = v.ContainerdCommit.ID v.InitCommit.Expected = v.InitCommit.ID if v.CgroupDriver == cgroupNoneDriver { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.") } else { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.") } } else { if !v.MemoryLimit { v.Warnings = append(v.Warnings, "WARNING: No memory limit support") } if !v.SwapLimit { v.Warnings = append(v.Warnings, "WARNING: No swap limit support") } if !v.KernelMemoryTCP && v.CgroupVersion == "1" { // kernel memory is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No kernel memory TCP limit support") } if !v.OomKillDisable && v.CgroupVersion == "1" { // oom kill disable is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No oom kill disable support") } if !v.CPUCfsQuota { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs quota support") } if !v.CPUCfsPeriod { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs period support") } if !v.CPUShares { v.Warnings = append(v.Warnings, "WARNING: No cpu shares support") } if !v.CPUSet { v.Warnings = append(v.Warnings, "WARNING: No cpuset support") } // TODO add fields for these options in types.Info if !sysInfo.BlkioWeight && v.CgroupVersion == "2" { // blkio weight is not available on cgroup v1 since kernel 5.0. // Warning is not printed on cgroup v1, because there is no action user can take. // On cgroup v2, blkio weight is implemented using io.weight v.Warnings = append(v.Warnings, "WARNING: No io.weight support") } if !sysInfo.BlkioWeightDevice && v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.weight (per device) support") } if !sysInfo.BlkioReadBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (rbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_bps_device support") } } if !sysInfo.BlkioWriteBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_bps_device support") } } if !sysInfo.BlkioReadIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (riops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_iops_device support") } } if !sysInfo.BlkioWriteIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wiops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_iops_device support") } } } if !v.IPv4Forwarding { v.Warnings = append(v.Warnings, "WARNING: IPv4 forwarding is disabled") } if !v.BridgeNfIptables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-iptables is disabled") } if !v.BridgeNfIP6tables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-ip6tables is disabled") } } func (daemon *Daemon) fillPlatformVersion(v *types.Version) { if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.Components = append(v.Components, types.ComponentVersion{ Name: "containerd", Version: rv.Version, Details: map[string]string{ "GitCommit": rv.Revision, }, }) } defaultRuntime := daemon.configStore.GetDefaultRuntimeName() defaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: defaultRuntime, Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if ver, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: filepath.Base(defaultInitBinary), Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } } func fillDriverWarnings(v *types.Info) { for _, pair := range v.DriverStatus { if pair[0] == "Data loop file" { msg := fmt.Sprintf("WARNING: %s: usage of loopback devices is "+ "strongly discouraged for production use.\n "+ "Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.", v.Driver) v.Warnings = append(v.Warnings, msg) continue } if pair[0] == "Supports d_type" && pair[1] == "false" { backingFs := getBackingFs(v) msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", v.Driver, backingFs) if backingFs == "xfs" { msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" } msg += " Running without d_type support will not be supported in future releases." v.Warnings = append(v.Warnings, msg) continue } } } func getBackingFs(v *types.Info) string { for _, pair := range v.DriverStatus { if pair[0] == "Backing Filesystem" { return pair[1] } } return "" } // parseInitVersion parses a Tini version string, and extracts the "version" // and "git commit" from the output. // // Output example from `docker-init --version`: // // tini version 0.18.0 - git.fec3683 func parseInitVersion(v string) (version string, commit string, err error) { parts := strings.Split(v, " - ") if len(parts) >= 2 { gitParts := strings.Split(strings.TrimSpace(parts[1]), ".") if len(gitParts) == 2 && gitParts[0] == "git" { commit = gitParts[1] } } parts[0] = strings.TrimSpace(parts[0]) if strings.HasPrefix(parts[0], "tini version ") { version = strings.TrimPrefix(parts[0], "tini version ") } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return version, commit, err } // parseRuntimeVersion parses the output of `[runtime] --version` and extracts the // "name", "version" and "git commit" from the output. // // Output example from `runc --version`: // // runc version 1.0.0-rc5+dev // commit: 69663f0bd4b60df09991c08812a60108003fa340 // spec: 1.0.0 func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) { lines := strings.Split(strings.TrimSpace(v), "\n") for _, line := range lines { if strings.Contains(line, "version") { s := strings.Split(line, "version") runtime = strings.TrimSpace(s[0]) version = strings.TrimSpace(s[len(s)-1]) continue } if strings.HasPrefix(line, "commit:") { commit = strings.TrimSpace(strings.TrimPrefix(line, "commit:")) continue } } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return runtime, version, commit, err } func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool { return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate() } // Rootless returns true if daemon is running in rootless mode func (daemon *Daemon) Rootless() bool { return daemon.configStore.Rootless }
AkihiroSuda
2d93da12af549c18db87d83fff4dd2770e4c0601
65cc84abc522a564699bb171ca54ea1857256d10
@AkihiroSuda ^^
thaJeztah
4,898
moby/moby
42,152
info: unset cgroup-related fields when CgroupDriver == none
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> Fix #42151 **- What I did** unset cgroup-related fields when CgroupDriver == none **- How I did it** Modified `daemon/info_unix.goP **- How to verify it** - Boot the host with cgroup v1 - `dockerd-rootless-setuptool.sh install` - `docker info --format {{ json. }}` ```json { ... "MemoryLimit": false, "SwapLimit": false, "KernelMemory": false, "KernelMemoryTCP": false, "CpuCfsPeriod": false, "CpuCfsQuota": false, "CPUShares": false, "CPUSet": false, "PidsLimit": false, ... "CgroupDriver": "none", ... } ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> info: unset cgroup-related fields when CgroupDriver == none **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-03-16 07:46:33+00:00
2021-11-08 22:45:11+00:00
daemon/info_unix.go
//go:build !windows // +build !windows package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "os/exec" "path/filepath" "strings" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/sysinfo" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // fillPlatformInfo fills the platform related info. func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { v.CgroupDriver = daemon.getCgroupDriver() v.CgroupVersion = "1" if sysInfo.CgroupUnified { v.CgroupVersion = "2" } v.MemoryLimit = sysInfo.MemoryLimit v.SwapLimit = sysInfo.SwapLimit v.KernelMemory = sysInfo.KernelMemory v.KernelMemoryTCP = sysInfo.KernelMemoryTCP v.OomKillDisable = sysInfo.OomKillDisable v.CPUCfsPeriod = sysInfo.CPUCfs v.CPUCfsQuota = sysInfo.CPUCfs v.CPUShares = sysInfo.CPUShares v.CPUSet = sysInfo.Cpuset v.PidsLimit = sysInfo.PidsLimit v.Runtimes = daemon.configStore.GetAllRuntimes() v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() v.InitBinary = daemon.configStore.GetInitPath() v.RuncCommit.ID = "N/A" v.ContainerdCommit.ID = "N/A" v.InitCommit.ID = "N/A" defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.RuncCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.ContainerdCommit.ID = rv.Revision } else { logrus.Warnf("failed to retrieve containerd version: %v", err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if _, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.InitCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } // Set expected and actual commits to the same value to prevent the client // showing that the version does not match the "expected" version/commit. v.RuncCommit.Expected = v.RuncCommit.ID v.ContainerdCommit.Expected = v.ContainerdCommit.ID v.InitCommit.Expected = v.InitCommit.ID if v.CgroupDriver == cgroupNoneDriver { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.") } else { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.") } } else { if !v.MemoryLimit { v.Warnings = append(v.Warnings, "WARNING: No memory limit support") } if !v.SwapLimit { v.Warnings = append(v.Warnings, "WARNING: No swap limit support") } if !v.KernelMemoryTCP && v.CgroupVersion == "1" { // kernel memory is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No kernel memory TCP limit support") } if !v.OomKillDisable && v.CgroupVersion == "1" { // oom kill disable is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No oom kill disable support") } if !v.CPUCfsQuota { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs quota support") } if !v.CPUCfsPeriod { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs period support") } if !v.CPUShares { v.Warnings = append(v.Warnings, "WARNING: No cpu shares support") } if !v.CPUSet { v.Warnings = append(v.Warnings, "WARNING: No cpuset support") } // TODO add fields for these options in types.Info if !sysInfo.BlkioWeight && v.CgroupVersion == "2" { // blkio weight is not available on cgroup v1 since kernel 5.0. // Warning is not printed on cgroup v1, because there is no action user can take. // On cgroup v2, blkio weight is implemented using io.weight v.Warnings = append(v.Warnings, "WARNING: No io.weight support") } if !sysInfo.BlkioWeightDevice && v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.weight (per device) support") } if !sysInfo.BlkioReadBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (rbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_bps_device support") } } if !sysInfo.BlkioWriteBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_bps_device support") } } if !sysInfo.BlkioReadIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (riops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_iops_device support") } } if !sysInfo.BlkioWriteIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wiops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_iops_device support") } } } if !v.IPv4Forwarding { v.Warnings = append(v.Warnings, "WARNING: IPv4 forwarding is disabled") } if !v.BridgeNfIptables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-iptables is disabled") } if !v.BridgeNfIP6tables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-ip6tables is disabled") } } func (daemon *Daemon) fillPlatformVersion(v *types.Version) { if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.Components = append(v.Components, types.ComponentVersion{ Name: "containerd", Version: rv.Version, Details: map[string]string{ "GitCommit": rv.Revision, }, }) } defaultRuntime := daemon.configStore.GetDefaultRuntimeName() defaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: defaultRuntime, Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if ver, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: filepath.Base(defaultInitBinary), Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } } func fillDriverWarnings(v *types.Info) { for _, pair := range v.DriverStatus { if pair[0] == "Data loop file" { msg := fmt.Sprintf("WARNING: %s: usage of loopback devices is "+ "strongly discouraged for production use.\n "+ "Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.", v.Driver) v.Warnings = append(v.Warnings, msg) continue } if pair[0] == "Supports d_type" && pair[1] == "false" { backingFs := getBackingFs(v) msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", v.Driver, backingFs) if backingFs == "xfs" { msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" } msg += " Running without d_type support will not be supported in future releases." v.Warnings = append(v.Warnings, msg) continue } } } func getBackingFs(v *types.Info) string { for _, pair := range v.DriverStatus { if pair[0] == "Backing Filesystem" { return pair[1] } } return "" } // parseInitVersion parses a Tini version string, and extracts the "version" // and "git commit" from the output. // // Output example from `docker-init --version`: // // tini version 0.18.0 - git.fec3683 func parseInitVersion(v string) (version string, commit string, err error) { parts := strings.Split(v, " - ") if len(parts) >= 2 { gitParts := strings.Split(strings.TrimSpace(parts[1]), ".") if len(gitParts) == 2 && gitParts[0] == "git" { commit = gitParts[1] } } parts[0] = strings.TrimSpace(parts[0]) if strings.HasPrefix(parts[0], "tini version ") { version = strings.TrimPrefix(parts[0], "tini version ") } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return version, commit, err } // parseRuntimeVersion parses the output of `[runtime] --version` and extracts the // "name", "version" and "git commit" from the output. // // Output example from `runc --version`: // // runc version 1.0.0-rc5+dev // commit: 69663f0bd4b60df09991c08812a60108003fa340 // spec: 1.0.0 func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) { lines := strings.Split(strings.TrimSpace(v), "\n") for _, line := range lines { if strings.Contains(line, "version") { s := strings.Split(line, "version") runtime = strings.TrimSpace(s[0]) version = strings.TrimSpace(s[len(s)-1]) continue } if strings.HasPrefix(line, "commit:") { commit = strings.TrimSpace(strings.TrimPrefix(line, "commit:")) continue } } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return runtime, version, commit, err } func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool { return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate() } // Rootless returns true if daemon is running in rootless mode func (daemon *Daemon) Rootless() bool { return daemon.configStore.Rootless }
//go:build !windows // +build !windows package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "os/exec" "path/filepath" "strings" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/sysinfo" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // fillPlatformInfo fills the platform related info. func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { v.CgroupDriver = daemon.getCgroupDriver() v.CgroupVersion = "1" if sysInfo.CgroupUnified { v.CgroupVersion = "2" } if v.CgroupDriver != cgroupNoneDriver { v.MemoryLimit = sysInfo.MemoryLimit v.SwapLimit = sysInfo.SwapLimit v.KernelMemory = sysInfo.KernelMemory v.KernelMemoryTCP = sysInfo.KernelMemoryTCP v.OomKillDisable = sysInfo.OomKillDisable v.CPUCfsPeriod = sysInfo.CPUCfs v.CPUCfsQuota = sysInfo.CPUCfs v.CPUShares = sysInfo.CPUShares v.CPUSet = sysInfo.Cpuset v.PidsLimit = sysInfo.PidsLimit } v.Runtimes = daemon.configStore.GetAllRuntimes() v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() v.InitBinary = daemon.configStore.GetInitPath() v.RuncCommit.ID = "N/A" v.ContainerdCommit.ID = "N/A" v.InitCommit.ID = "N/A" defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.RuncCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.ContainerdCommit.ID = rv.Revision } else { logrus.Warnf("failed to retrieve containerd version: %v", err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if _, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.InitCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } // Set expected and actual commits to the same value to prevent the client // showing that the version does not match the "expected" version/commit. v.RuncCommit.Expected = v.RuncCommit.ID v.ContainerdCommit.Expected = v.ContainerdCommit.ID v.InitCommit.Expected = v.InitCommit.ID if v.CgroupDriver == cgroupNoneDriver { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.") } else { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.") } } else { if !v.MemoryLimit { v.Warnings = append(v.Warnings, "WARNING: No memory limit support") } if !v.SwapLimit { v.Warnings = append(v.Warnings, "WARNING: No swap limit support") } if !v.KernelMemoryTCP && v.CgroupVersion == "1" { // kernel memory is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No kernel memory TCP limit support") } if !v.OomKillDisable && v.CgroupVersion == "1" { // oom kill disable is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No oom kill disable support") } if !v.CPUCfsQuota { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs quota support") } if !v.CPUCfsPeriod { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs period support") } if !v.CPUShares { v.Warnings = append(v.Warnings, "WARNING: No cpu shares support") } if !v.CPUSet { v.Warnings = append(v.Warnings, "WARNING: No cpuset support") } // TODO add fields for these options in types.Info if !sysInfo.BlkioWeight && v.CgroupVersion == "2" { // blkio weight is not available on cgroup v1 since kernel 5.0. // Warning is not printed on cgroup v1, because there is no action user can take. // On cgroup v2, blkio weight is implemented using io.weight v.Warnings = append(v.Warnings, "WARNING: No io.weight support") } if !sysInfo.BlkioWeightDevice && v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.weight (per device) support") } if !sysInfo.BlkioReadBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (rbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_bps_device support") } } if !sysInfo.BlkioWriteBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_bps_device support") } } if !sysInfo.BlkioReadIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (riops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_iops_device support") } } if !sysInfo.BlkioWriteIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wiops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_iops_device support") } } } if !v.IPv4Forwarding { v.Warnings = append(v.Warnings, "WARNING: IPv4 forwarding is disabled") } if !v.BridgeNfIptables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-iptables is disabled") } if !v.BridgeNfIP6tables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-ip6tables is disabled") } } func (daemon *Daemon) fillPlatformVersion(v *types.Version) { if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.Components = append(v.Components, types.ComponentVersion{ Name: "containerd", Version: rv.Version, Details: map[string]string{ "GitCommit": rv.Revision, }, }) } defaultRuntime := daemon.configStore.GetDefaultRuntimeName() defaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: defaultRuntime, Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if ver, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: filepath.Base(defaultInitBinary), Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } } func fillDriverWarnings(v *types.Info) { for _, pair := range v.DriverStatus { if pair[0] == "Data loop file" { msg := fmt.Sprintf("WARNING: %s: usage of loopback devices is "+ "strongly discouraged for production use.\n "+ "Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.", v.Driver) v.Warnings = append(v.Warnings, msg) continue } if pair[0] == "Supports d_type" && pair[1] == "false" { backingFs := getBackingFs(v) msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", v.Driver, backingFs) if backingFs == "xfs" { msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" } msg += " Running without d_type support will not be supported in future releases." v.Warnings = append(v.Warnings, msg) continue } } } func getBackingFs(v *types.Info) string { for _, pair := range v.DriverStatus { if pair[0] == "Backing Filesystem" { return pair[1] } } return "" } // parseInitVersion parses a Tini version string, and extracts the "version" // and "git commit" from the output. // // Output example from `docker-init --version`: // // tini version 0.18.0 - git.fec3683 func parseInitVersion(v string) (version string, commit string, err error) { parts := strings.Split(v, " - ") if len(parts) >= 2 { gitParts := strings.Split(strings.TrimSpace(parts[1]), ".") if len(gitParts) == 2 && gitParts[0] == "git" { commit = gitParts[1] } } parts[0] = strings.TrimSpace(parts[0]) if strings.HasPrefix(parts[0], "tini version ") { version = strings.TrimPrefix(parts[0], "tini version ") } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return version, commit, err } // parseRuntimeVersion parses the output of `[runtime] --version` and extracts the // "name", "version" and "git commit" from the output. // // Output example from `runc --version`: // // runc version 1.0.0-rc5+dev // commit: 69663f0bd4b60df09991c08812a60108003fa340 // spec: 1.0.0 func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) { lines := strings.Split(strings.TrimSpace(v), "\n") for _, line := range lines { if strings.Contains(line, "version") { s := strings.Split(line, "version") runtime = strings.TrimSpace(s[0]) version = strings.TrimSpace(s[len(s)-1]) continue } if strings.HasPrefix(line, "commit:") { commit = strings.TrimSpace(strings.TrimPrefix(line, "commit:")) continue } } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return runtime, version, commit, err } func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool { return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate() } // Rootless returns true if daemon is running in rootless mode func (daemon *Daemon) Rootless() bool { return daemon.configStore.Rootless }
AkihiroSuda
2d93da12af549c18db87d83fff4dd2770e4c0601
65cc84abc522a564699bb171ca54ea1857256d10
Because `sysInfo` is not aware of the cgroup driver. We could modify (in a separate PR) `sysInfo` to return true only when we can grant write permission, however, the value needs to be negated to false when `v.CgroupDriver==cgroupNoneDriver`.
AkihiroSuda
4,899
moby/moby
42,152
info: unset cgroup-related fields when CgroupDriver == none
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> Fix #42151 **- What I did** unset cgroup-related fields when CgroupDriver == none **- How I did it** Modified `daemon/info_unix.goP **- How to verify it** - Boot the host with cgroup v1 - `dockerd-rootless-setuptool.sh install` - `docker info --format {{ json. }}` ```json { ... "MemoryLimit": false, "SwapLimit": false, "KernelMemory": false, "KernelMemoryTCP": false, "CpuCfsPeriod": false, "CpuCfsQuota": false, "CPUShares": false, "CPUSet": false, "PidsLimit": false, ... "CgroupDriver": "none", ... } ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> info: unset cgroup-related fields when CgroupDriver == none **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-03-16 07:46:33+00:00
2021-11-08 22:45:11+00:00
daemon/info_unix.go
//go:build !windows // +build !windows package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "os/exec" "path/filepath" "strings" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/sysinfo" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // fillPlatformInfo fills the platform related info. func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { v.CgroupDriver = daemon.getCgroupDriver() v.CgroupVersion = "1" if sysInfo.CgroupUnified { v.CgroupVersion = "2" } v.MemoryLimit = sysInfo.MemoryLimit v.SwapLimit = sysInfo.SwapLimit v.KernelMemory = sysInfo.KernelMemory v.KernelMemoryTCP = sysInfo.KernelMemoryTCP v.OomKillDisable = sysInfo.OomKillDisable v.CPUCfsPeriod = sysInfo.CPUCfs v.CPUCfsQuota = sysInfo.CPUCfs v.CPUShares = sysInfo.CPUShares v.CPUSet = sysInfo.Cpuset v.PidsLimit = sysInfo.PidsLimit v.Runtimes = daemon.configStore.GetAllRuntimes() v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() v.InitBinary = daemon.configStore.GetInitPath() v.RuncCommit.ID = "N/A" v.ContainerdCommit.ID = "N/A" v.InitCommit.ID = "N/A" defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.RuncCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.ContainerdCommit.ID = rv.Revision } else { logrus.Warnf("failed to retrieve containerd version: %v", err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if _, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.InitCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } // Set expected and actual commits to the same value to prevent the client // showing that the version does not match the "expected" version/commit. v.RuncCommit.Expected = v.RuncCommit.ID v.ContainerdCommit.Expected = v.ContainerdCommit.ID v.InitCommit.Expected = v.InitCommit.ID if v.CgroupDriver == cgroupNoneDriver { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.") } else { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.") } } else { if !v.MemoryLimit { v.Warnings = append(v.Warnings, "WARNING: No memory limit support") } if !v.SwapLimit { v.Warnings = append(v.Warnings, "WARNING: No swap limit support") } if !v.KernelMemoryTCP && v.CgroupVersion == "1" { // kernel memory is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No kernel memory TCP limit support") } if !v.OomKillDisable && v.CgroupVersion == "1" { // oom kill disable is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No oom kill disable support") } if !v.CPUCfsQuota { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs quota support") } if !v.CPUCfsPeriod { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs period support") } if !v.CPUShares { v.Warnings = append(v.Warnings, "WARNING: No cpu shares support") } if !v.CPUSet { v.Warnings = append(v.Warnings, "WARNING: No cpuset support") } // TODO add fields for these options in types.Info if !sysInfo.BlkioWeight && v.CgroupVersion == "2" { // blkio weight is not available on cgroup v1 since kernel 5.0. // Warning is not printed on cgroup v1, because there is no action user can take. // On cgroup v2, blkio weight is implemented using io.weight v.Warnings = append(v.Warnings, "WARNING: No io.weight support") } if !sysInfo.BlkioWeightDevice && v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.weight (per device) support") } if !sysInfo.BlkioReadBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (rbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_bps_device support") } } if !sysInfo.BlkioWriteBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_bps_device support") } } if !sysInfo.BlkioReadIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (riops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_iops_device support") } } if !sysInfo.BlkioWriteIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wiops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_iops_device support") } } } if !v.IPv4Forwarding { v.Warnings = append(v.Warnings, "WARNING: IPv4 forwarding is disabled") } if !v.BridgeNfIptables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-iptables is disabled") } if !v.BridgeNfIP6tables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-ip6tables is disabled") } } func (daemon *Daemon) fillPlatformVersion(v *types.Version) { if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.Components = append(v.Components, types.ComponentVersion{ Name: "containerd", Version: rv.Version, Details: map[string]string{ "GitCommit": rv.Revision, }, }) } defaultRuntime := daemon.configStore.GetDefaultRuntimeName() defaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: defaultRuntime, Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if ver, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: filepath.Base(defaultInitBinary), Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } } func fillDriverWarnings(v *types.Info) { for _, pair := range v.DriverStatus { if pair[0] == "Data loop file" { msg := fmt.Sprintf("WARNING: %s: usage of loopback devices is "+ "strongly discouraged for production use.\n "+ "Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.", v.Driver) v.Warnings = append(v.Warnings, msg) continue } if pair[0] == "Supports d_type" && pair[1] == "false" { backingFs := getBackingFs(v) msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", v.Driver, backingFs) if backingFs == "xfs" { msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" } msg += " Running without d_type support will not be supported in future releases." v.Warnings = append(v.Warnings, msg) continue } } } func getBackingFs(v *types.Info) string { for _, pair := range v.DriverStatus { if pair[0] == "Backing Filesystem" { return pair[1] } } return "" } // parseInitVersion parses a Tini version string, and extracts the "version" // and "git commit" from the output. // // Output example from `docker-init --version`: // // tini version 0.18.0 - git.fec3683 func parseInitVersion(v string) (version string, commit string, err error) { parts := strings.Split(v, " - ") if len(parts) >= 2 { gitParts := strings.Split(strings.TrimSpace(parts[1]), ".") if len(gitParts) == 2 && gitParts[0] == "git" { commit = gitParts[1] } } parts[0] = strings.TrimSpace(parts[0]) if strings.HasPrefix(parts[0], "tini version ") { version = strings.TrimPrefix(parts[0], "tini version ") } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return version, commit, err } // parseRuntimeVersion parses the output of `[runtime] --version` and extracts the // "name", "version" and "git commit" from the output. // // Output example from `runc --version`: // // runc version 1.0.0-rc5+dev // commit: 69663f0bd4b60df09991c08812a60108003fa340 // spec: 1.0.0 func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) { lines := strings.Split(strings.TrimSpace(v), "\n") for _, line := range lines { if strings.Contains(line, "version") { s := strings.Split(line, "version") runtime = strings.TrimSpace(s[0]) version = strings.TrimSpace(s[len(s)-1]) continue } if strings.HasPrefix(line, "commit:") { commit = strings.TrimSpace(strings.TrimPrefix(line, "commit:")) continue } } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return runtime, version, commit, err } func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool { return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate() } // Rootless returns true if daemon is running in rootless mode func (daemon *Daemon) Rootless() bool { return daemon.configStore.Rootless }
//go:build !windows // +build !windows package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "os/exec" "path/filepath" "strings" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/sysinfo" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // fillPlatformInfo fills the platform related info. func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { v.CgroupDriver = daemon.getCgroupDriver() v.CgroupVersion = "1" if sysInfo.CgroupUnified { v.CgroupVersion = "2" } if v.CgroupDriver != cgroupNoneDriver { v.MemoryLimit = sysInfo.MemoryLimit v.SwapLimit = sysInfo.SwapLimit v.KernelMemory = sysInfo.KernelMemory v.KernelMemoryTCP = sysInfo.KernelMemoryTCP v.OomKillDisable = sysInfo.OomKillDisable v.CPUCfsPeriod = sysInfo.CPUCfs v.CPUCfsQuota = sysInfo.CPUCfs v.CPUShares = sysInfo.CPUShares v.CPUSet = sysInfo.Cpuset v.PidsLimit = sysInfo.PidsLimit } v.Runtimes = daemon.configStore.GetAllRuntimes() v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() v.InitBinary = daemon.configStore.GetInitPath() v.RuncCommit.ID = "N/A" v.ContainerdCommit.ID = "N/A" v.InitCommit.ID = "N/A" defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.RuncCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.ContainerdCommit.ID = rv.Revision } else { logrus.Warnf("failed to retrieve containerd version: %v", err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if _, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.InitCommit.ID = commit } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } // Set expected and actual commits to the same value to prevent the client // showing that the version does not match the "expected" version/commit. v.RuncCommit.Expected = v.RuncCommit.ID v.ContainerdCommit.Expected = v.ContainerdCommit.ID v.InitCommit.Expected = v.InitCommit.ID if v.CgroupDriver == cgroupNoneDriver { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.") } else { v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.") } } else { if !v.MemoryLimit { v.Warnings = append(v.Warnings, "WARNING: No memory limit support") } if !v.SwapLimit { v.Warnings = append(v.Warnings, "WARNING: No swap limit support") } if !v.KernelMemoryTCP && v.CgroupVersion == "1" { // kernel memory is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No kernel memory TCP limit support") } if !v.OomKillDisable && v.CgroupVersion == "1" { // oom kill disable is not available for cgroup v2. // Warning is not printed on cgroup v2, because there is no action user can take. v.Warnings = append(v.Warnings, "WARNING: No oom kill disable support") } if !v.CPUCfsQuota { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs quota support") } if !v.CPUCfsPeriod { v.Warnings = append(v.Warnings, "WARNING: No cpu cfs period support") } if !v.CPUShares { v.Warnings = append(v.Warnings, "WARNING: No cpu shares support") } if !v.CPUSet { v.Warnings = append(v.Warnings, "WARNING: No cpuset support") } // TODO add fields for these options in types.Info if !sysInfo.BlkioWeight && v.CgroupVersion == "2" { // blkio weight is not available on cgroup v1 since kernel 5.0. // Warning is not printed on cgroup v1, because there is no action user can take. // On cgroup v2, blkio weight is implemented using io.weight v.Warnings = append(v.Warnings, "WARNING: No io.weight support") } if !sysInfo.BlkioWeightDevice && v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.weight (per device) support") } if !sysInfo.BlkioReadBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (rbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_bps_device support") } } if !sysInfo.BlkioWriteBpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wbps) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_bps_device support") } } if !sysInfo.BlkioReadIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (riops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_iops_device support") } } if !sysInfo.BlkioWriteIOpsDevice { if v.CgroupVersion == "2" { v.Warnings = append(v.Warnings, "WARNING: No io.max (wiops) support") } else { v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_iops_device support") } } } if !v.IPv4Forwarding { v.Warnings = append(v.Warnings, "WARNING: IPv4 forwarding is disabled") } if !v.BridgeNfIptables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-iptables is disabled") } if !v.BridgeNfIP6tables { v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-ip6tables is disabled") } } func (daemon *Daemon) fillPlatformVersion(v *types.Version) { if rv, err := daemon.containerd.Version(context.Background()); err == nil { v.Components = append(v.Components, types.ComponentVersion{ Name: "containerd", Version: rv.Version, Details: map[string]string{ "GitCommit": rv.Revision, }, }) } defaultRuntime := daemon.configStore.GetDefaultRuntimeName() defaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: defaultRuntime, Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) } defaultInitBinary := daemon.configStore.GetInitPath() if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { if ver, commit, err := parseInitVersion(string(rv)); err != nil { logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err) } else { v.Components = append(v.Components, types.ComponentVersion{ Name: filepath.Base(defaultInitBinary), Version: ver, Details: map[string]string{ "GitCommit": commit, }, }) } } else { logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) } } func fillDriverWarnings(v *types.Info) { for _, pair := range v.DriverStatus { if pair[0] == "Data loop file" { msg := fmt.Sprintf("WARNING: %s: usage of loopback devices is "+ "strongly discouraged for production use.\n "+ "Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.", v.Driver) v.Warnings = append(v.Warnings, msg) continue } if pair[0] == "Supports d_type" && pair[1] == "false" { backingFs := getBackingFs(v) msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", v.Driver, backingFs) if backingFs == "xfs" { msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n" } msg += " Running without d_type support will not be supported in future releases." v.Warnings = append(v.Warnings, msg) continue } } } func getBackingFs(v *types.Info) string { for _, pair := range v.DriverStatus { if pair[0] == "Backing Filesystem" { return pair[1] } } return "" } // parseInitVersion parses a Tini version string, and extracts the "version" // and "git commit" from the output. // // Output example from `docker-init --version`: // // tini version 0.18.0 - git.fec3683 func parseInitVersion(v string) (version string, commit string, err error) { parts := strings.Split(v, " - ") if len(parts) >= 2 { gitParts := strings.Split(strings.TrimSpace(parts[1]), ".") if len(gitParts) == 2 && gitParts[0] == "git" { commit = gitParts[1] } } parts[0] = strings.TrimSpace(parts[0]) if strings.HasPrefix(parts[0], "tini version ") { version = strings.TrimPrefix(parts[0], "tini version ") } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return version, commit, err } // parseRuntimeVersion parses the output of `[runtime] --version` and extracts the // "name", "version" and "git commit" from the output. // // Output example from `runc --version`: // // runc version 1.0.0-rc5+dev // commit: 69663f0bd4b60df09991c08812a60108003fa340 // spec: 1.0.0 func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) { lines := strings.Split(strings.TrimSpace(v), "\n") for _, line := range lines { if strings.Contains(line, "version") { s := strings.Split(line, "version") runtime = strings.TrimSpace(s[0]) version = strings.TrimSpace(s[len(s)-1]) continue } if strings.HasPrefix(line, "commit:") { commit = strings.TrimSpace(strings.TrimPrefix(line, "commit:")) continue } } if version == "" && commit == "" { err = errors.Errorf("unknown output format: %s", v) } return runtime, version, commit, err } func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool { return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate() } // Rootless returns true if daemon is running in rootless mode func (daemon *Daemon) Rootless() bool { return daemon.configStore.Rootless }
AkihiroSuda
2d93da12af549c18db87d83fff4dd2770e4c0601
65cc84abc522a564699bb171ca54ea1857256d10
Could you create a tracking issue for that, in which you describe what needs to be done?
thaJeztah
4,900
moby/moby
42,104
Fix handling for json-file io.UnexpectedEOF
Before this, we essentially end up doubling any buffered data from the json decoder by storing it in both `d.dec` and `d.rdr`. Thanks @tobiasstadler for pointing this error out. Fixes #41820
null
2021-03-01 22:00:29+00:00
2021-03-18 21:18:11+00:00
daemon/logger/jsonfilelog/read.go
package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" import ( "context" "encoding/json" "io" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/tailfile" "github.com/sirupsen/logrus" ) const maxJSONDecodeRetry = 20000 // ReadLogs implements the logger's LogReader interface for the logs // created by this driver. func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { logWatcher := logger.NewLogWatcher() go l.readLogs(logWatcher, config) return logWatcher } func (l *JSONFileLogger) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) { defer close(watcher.Msg) l.mu.Lock() l.readers[watcher] = struct{}{} l.mu.Unlock() l.writer.ReadLogs(config, watcher) l.mu.Lock() delete(l.readers, watcher) l.mu.Unlock() } func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { l.Reset() if err := dec.Decode(l); err != nil { return nil, err } var attrs []backend.LogAttr if len(l.Attrs) != 0 { attrs = make([]backend.LogAttr, 0, len(l.Attrs)) for k, v := range l.Attrs { attrs = append(attrs, backend.LogAttr{Key: k, Value: v}) } } msg := &logger.Message{ Source: l.Stream, Timestamp: l.Created, Line: []byte(l.Log), Attrs: attrs, } return msg, nil } type decoder struct { rdr io.Reader dec *json.Decoder jl *jsonlog.JSONLog } func (d *decoder) Reset(rdr io.Reader) { d.rdr = rdr d.dec = nil if d.jl != nil { d.jl.Reset() } } func (d *decoder) Close() { d.dec = nil d.rdr = nil d.jl = nil } func (d *decoder) Decode() (msg *logger.Message, err error) { if d.dec == nil { d.dec = json.NewDecoder(d.rdr) } if d.jl == nil { d.jl = &jsonlog.JSONLog{} } for retries := 0; retries < maxJSONDecodeRetry; retries++ { msg, err = decodeLogLine(d.dec, d.jl) if err == nil || err == io.EOF { break } logrus.WithError(err).WithField("retries", retries).Warn("got error while decoding json") // try again, could be due to a an incomplete json object as we read if _, ok := err.(*json.SyntaxError); ok { d.dec = json.NewDecoder(d.rdr) continue } // io.ErrUnexpectedEOF is returned from json.Decoder when there is // remaining data in the parser's buffer while an io.EOF occurs. // If the json logger writes a partial json log entry to the disk // while at the same time the decoder tries to decode it, the race condition happens. if err == io.ErrUnexpectedEOF { d.rdr = io.MultiReader(d.dec.Buffered(), d.rdr) d.dec = json.NewDecoder(d.rdr) continue } } return msg, err } // decodeFunc is used to create a decoder for the log file reader func decodeFunc(rdr io.Reader) loggerutils.Decoder { return &decoder{ rdr: rdr, dec: nil, jl: nil, } } func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) { return tailfile.NewTailReader(ctx, r, req) }
package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" import ( "context" "encoding/json" "io" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/tailfile" "github.com/sirupsen/logrus" ) const maxJSONDecodeRetry = 20000 // ReadLogs implements the logger's LogReader interface for the logs // created by this driver. func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { logWatcher := logger.NewLogWatcher() go l.readLogs(logWatcher, config) return logWatcher } func (l *JSONFileLogger) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) { defer close(watcher.Msg) l.mu.Lock() l.readers[watcher] = struct{}{} l.mu.Unlock() l.writer.ReadLogs(config, watcher) l.mu.Lock() delete(l.readers, watcher) l.mu.Unlock() } func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { l.Reset() if err := dec.Decode(l); err != nil { return nil, err } var attrs []backend.LogAttr if len(l.Attrs) != 0 { attrs = make([]backend.LogAttr, 0, len(l.Attrs)) for k, v := range l.Attrs { attrs = append(attrs, backend.LogAttr{Key: k, Value: v}) } } msg := &logger.Message{ Source: l.Stream, Timestamp: l.Created, Line: []byte(l.Log), Attrs: attrs, } return msg, nil } type decoder struct { rdr io.Reader dec *json.Decoder jl *jsonlog.JSONLog maxRetry int } func (d *decoder) Reset(rdr io.Reader) { d.rdr = rdr d.dec = nil if d.jl != nil { d.jl.Reset() } } func (d *decoder) Close() { d.dec = nil d.rdr = nil d.jl = nil } func (d *decoder) Decode() (msg *logger.Message, err error) { if d.dec == nil { d.dec = json.NewDecoder(d.rdr) } if d.jl == nil { d.jl = &jsonlog.JSONLog{} } if d.maxRetry == 0 { // We aren't using maxJSONDecodeRetry directly so we can give a custom value for testing. d.maxRetry = maxJSONDecodeRetry } for retries := 0; retries < d.maxRetry; retries++ { msg, err = decodeLogLine(d.dec, d.jl) if err == nil || err == io.EOF { break } logrus.WithError(err).WithField("retries", retries).Warn("got error while decoding json") // try again, could be due to a an incomplete json object as we read if _, ok := err.(*json.SyntaxError); ok { d.dec = json.NewDecoder(d.rdr) continue } // io.ErrUnexpectedEOF is returned from json.Decoder when there is // remaining data in the parser's buffer while an io.EOF occurs. // If the json logger writes a partial json log entry to the disk // while at the same time the decoder tries to decode it, the race condition happens. if err == io.ErrUnexpectedEOF { d.rdr = combineReaders(d.dec.Buffered(), d.rdr) d.dec = json.NewDecoder(d.rdr) continue } } return msg, err } func combineReaders(pre, rdr io.Reader) io.Reader { return &combinedReader{pre: pre, rdr: rdr} } // combinedReader is a reader which is like `io.MultiReader` where except it does not cache a full EOF. // Once `io.MultiReader` returns EOF, it is always EOF. // // For this usecase we have an underlying reader which is a file which may reach EOF but have more data written to it later. // As such, io.MultiReader does not work for us. type combinedReader struct { pre io.Reader rdr io.Reader } func (r *combinedReader) Read(p []byte) (int, error) { var read int if r.pre != nil { n, err := r.pre.Read(p) if err != nil { if err != io.EOF { return n, err } r.pre = nil } read = n } if read < len(p) { n, err := r.rdr.Read(p[read:]) if n > 0 { read += n } if err != nil { return read, err } } return read, nil } // decodeFunc is used to create a decoder for the log file reader func decodeFunc(rdr io.Reader) loggerutils.Decoder { return &decoder{ rdr: rdr, dec: nil, jl: nil, } } func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) { return tailfile.NewTailReader(ctx, r, req) }
cpuguy83
a84d824c5f23b002669b10cbe7c191508a4b6b21
788f2883d285fc997d25cd27235c5b17cf3c0947
iiuc the issue in here is not the resetting `d.rdr` or not (what looks more correct to me in the old version) but that we use incompatible `Reader` where after it returns `io.EOF` we can read again and it may return more data then. This does not follow the `io.Reader` interface that stdlib defines and therefore the utility functions behavior gets wrong. More specifically `io.MultiReader` caches `io.EOF` https://github.com/golang/go/blob/414fa8c35e7c2f65e2c767d6db2f25791e53b5c1/src/io/multi.go#L41 . So in the previous code if the `MultiReader` has already returned `EOF` once and now is added as a second reader to a new `MultiReader` it never gets called again and new `MultiReader` returns `EOF` without making any new `Read` calls to the original `d.rdr`.
tonistiigi
4,901
moby/moby
42,104
Fix handling for json-file io.UnexpectedEOF
Before this, we essentially end up doubling any buffered data from the json decoder by storing it in both `d.dec` and `d.rdr`. Thanks @tobiasstadler for pointing this error out. Fixes #41820
null
2021-03-01 22:00:29+00:00
2021-03-18 21:18:11+00:00
daemon/logger/jsonfilelog/read.go
package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" import ( "context" "encoding/json" "io" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/tailfile" "github.com/sirupsen/logrus" ) const maxJSONDecodeRetry = 20000 // ReadLogs implements the logger's LogReader interface for the logs // created by this driver. func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { logWatcher := logger.NewLogWatcher() go l.readLogs(logWatcher, config) return logWatcher } func (l *JSONFileLogger) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) { defer close(watcher.Msg) l.mu.Lock() l.readers[watcher] = struct{}{} l.mu.Unlock() l.writer.ReadLogs(config, watcher) l.mu.Lock() delete(l.readers, watcher) l.mu.Unlock() } func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { l.Reset() if err := dec.Decode(l); err != nil { return nil, err } var attrs []backend.LogAttr if len(l.Attrs) != 0 { attrs = make([]backend.LogAttr, 0, len(l.Attrs)) for k, v := range l.Attrs { attrs = append(attrs, backend.LogAttr{Key: k, Value: v}) } } msg := &logger.Message{ Source: l.Stream, Timestamp: l.Created, Line: []byte(l.Log), Attrs: attrs, } return msg, nil } type decoder struct { rdr io.Reader dec *json.Decoder jl *jsonlog.JSONLog } func (d *decoder) Reset(rdr io.Reader) { d.rdr = rdr d.dec = nil if d.jl != nil { d.jl.Reset() } } func (d *decoder) Close() { d.dec = nil d.rdr = nil d.jl = nil } func (d *decoder) Decode() (msg *logger.Message, err error) { if d.dec == nil { d.dec = json.NewDecoder(d.rdr) } if d.jl == nil { d.jl = &jsonlog.JSONLog{} } for retries := 0; retries < maxJSONDecodeRetry; retries++ { msg, err = decodeLogLine(d.dec, d.jl) if err == nil || err == io.EOF { break } logrus.WithError(err).WithField("retries", retries).Warn("got error while decoding json") // try again, could be due to a an incomplete json object as we read if _, ok := err.(*json.SyntaxError); ok { d.dec = json.NewDecoder(d.rdr) continue } // io.ErrUnexpectedEOF is returned from json.Decoder when there is // remaining data in the parser's buffer while an io.EOF occurs. // If the json logger writes a partial json log entry to the disk // while at the same time the decoder tries to decode it, the race condition happens. if err == io.ErrUnexpectedEOF { d.rdr = io.MultiReader(d.dec.Buffered(), d.rdr) d.dec = json.NewDecoder(d.rdr) continue } } return msg, err } // decodeFunc is used to create a decoder for the log file reader func decodeFunc(rdr io.Reader) loggerutils.Decoder { return &decoder{ rdr: rdr, dec: nil, jl: nil, } } func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) { return tailfile.NewTailReader(ctx, r, req) }
package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" import ( "context" "encoding/json" "io" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/tailfile" "github.com/sirupsen/logrus" ) const maxJSONDecodeRetry = 20000 // ReadLogs implements the logger's LogReader interface for the logs // created by this driver. func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { logWatcher := logger.NewLogWatcher() go l.readLogs(logWatcher, config) return logWatcher } func (l *JSONFileLogger) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) { defer close(watcher.Msg) l.mu.Lock() l.readers[watcher] = struct{}{} l.mu.Unlock() l.writer.ReadLogs(config, watcher) l.mu.Lock() delete(l.readers, watcher) l.mu.Unlock() } func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { l.Reset() if err := dec.Decode(l); err != nil { return nil, err } var attrs []backend.LogAttr if len(l.Attrs) != 0 { attrs = make([]backend.LogAttr, 0, len(l.Attrs)) for k, v := range l.Attrs { attrs = append(attrs, backend.LogAttr{Key: k, Value: v}) } } msg := &logger.Message{ Source: l.Stream, Timestamp: l.Created, Line: []byte(l.Log), Attrs: attrs, } return msg, nil } type decoder struct { rdr io.Reader dec *json.Decoder jl *jsonlog.JSONLog maxRetry int } func (d *decoder) Reset(rdr io.Reader) { d.rdr = rdr d.dec = nil if d.jl != nil { d.jl.Reset() } } func (d *decoder) Close() { d.dec = nil d.rdr = nil d.jl = nil } func (d *decoder) Decode() (msg *logger.Message, err error) { if d.dec == nil { d.dec = json.NewDecoder(d.rdr) } if d.jl == nil { d.jl = &jsonlog.JSONLog{} } if d.maxRetry == 0 { // We aren't using maxJSONDecodeRetry directly so we can give a custom value for testing. d.maxRetry = maxJSONDecodeRetry } for retries := 0; retries < d.maxRetry; retries++ { msg, err = decodeLogLine(d.dec, d.jl) if err == nil || err == io.EOF { break } logrus.WithError(err).WithField("retries", retries).Warn("got error while decoding json") // try again, could be due to a an incomplete json object as we read if _, ok := err.(*json.SyntaxError); ok { d.dec = json.NewDecoder(d.rdr) continue } // io.ErrUnexpectedEOF is returned from json.Decoder when there is // remaining data in the parser's buffer while an io.EOF occurs. // If the json logger writes a partial json log entry to the disk // while at the same time the decoder tries to decode it, the race condition happens. if err == io.ErrUnexpectedEOF { d.rdr = combineReaders(d.dec.Buffered(), d.rdr) d.dec = json.NewDecoder(d.rdr) continue } } return msg, err } func combineReaders(pre, rdr io.Reader) io.Reader { return &combinedReader{pre: pre, rdr: rdr} } // combinedReader is a reader which is like `io.MultiReader` where except it does not cache a full EOF. // Once `io.MultiReader` returns EOF, it is always EOF. // // For this usecase we have an underlying reader which is a file which may reach EOF but have more data written to it later. // As such, io.MultiReader does not work for us. type combinedReader struct { pre io.Reader rdr io.Reader } func (r *combinedReader) Read(p []byte) (int, error) { var read int if r.pre != nil { n, err := r.pre.Read(p) if err != nil { if err != io.EOF { return n, err } r.pre = nil } read = n } if read < len(p) { n, err := r.rdr.Read(p[read:]) if n > 0 { read += n } if err != nil { return read, err } } return read, nil } // decodeFunc is used to create a decoder for the log file reader func decodeFunc(rdr io.Reader) loggerutils.Decoder { return &decoder{ rdr: rdr, dec: nil, jl: nil, } } func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) { return tailfile.NewTailReader(ctx, r, req) }
cpuguy83
a84d824c5f23b002669b10cbe7c191508a4b6b21
788f2883d285fc997d25cd27235c5b17cf3c0947
Added a 2nd commit per your suggestion to replace io.MultiReader with a custom implementation that does not cache EOF's so that we can keep the full reader (with the buffered data) in `d.rdr`.
cpuguy83
4,902
moby/moby
42,104
Fix handling for json-file io.UnexpectedEOF
Before this, we essentially end up doubling any buffered data from the json decoder by storing it in both `d.dec` and `d.rdr`. Thanks @tobiasstadler for pointing this error out. Fixes #41820
null
2021-03-01 22:00:29+00:00
2021-03-18 21:18:11+00:00
daemon/logger/jsonfilelog/read.go
package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" import ( "context" "encoding/json" "io" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/tailfile" "github.com/sirupsen/logrus" ) const maxJSONDecodeRetry = 20000 // ReadLogs implements the logger's LogReader interface for the logs // created by this driver. func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { logWatcher := logger.NewLogWatcher() go l.readLogs(logWatcher, config) return logWatcher } func (l *JSONFileLogger) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) { defer close(watcher.Msg) l.mu.Lock() l.readers[watcher] = struct{}{} l.mu.Unlock() l.writer.ReadLogs(config, watcher) l.mu.Lock() delete(l.readers, watcher) l.mu.Unlock() } func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { l.Reset() if err := dec.Decode(l); err != nil { return nil, err } var attrs []backend.LogAttr if len(l.Attrs) != 0 { attrs = make([]backend.LogAttr, 0, len(l.Attrs)) for k, v := range l.Attrs { attrs = append(attrs, backend.LogAttr{Key: k, Value: v}) } } msg := &logger.Message{ Source: l.Stream, Timestamp: l.Created, Line: []byte(l.Log), Attrs: attrs, } return msg, nil } type decoder struct { rdr io.Reader dec *json.Decoder jl *jsonlog.JSONLog } func (d *decoder) Reset(rdr io.Reader) { d.rdr = rdr d.dec = nil if d.jl != nil { d.jl.Reset() } } func (d *decoder) Close() { d.dec = nil d.rdr = nil d.jl = nil } func (d *decoder) Decode() (msg *logger.Message, err error) { if d.dec == nil { d.dec = json.NewDecoder(d.rdr) } if d.jl == nil { d.jl = &jsonlog.JSONLog{} } for retries := 0; retries < maxJSONDecodeRetry; retries++ { msg, err = decodeLogLine(d.dec, d.jl) if err == nil || err == io.EOF { break } logrus.WithError(err).WithField("retries", retries).Warn("got error while decoding json") // try again, could be due to a an incomplete json object as we read if _, ok := err.(*json.SyntaxError); ok { d.dec = json.NewDecoder(d.rdr) continue } // io.ErrUnexpectedEOF is returned from json.Decoder when there is // remaining data in the parser's buffer while an io.EOF occurs. // If the json logger writes a partial json log entry to the disk // while at the same time the decoder tries to decode it, the race condition happens. if err == io.ErrUnexpectedEOF { d.rdr = io.MultiReader(d.dec.Buffered(), d.rdr) d.dec = json.NewDecoder(d.rdr) continue } } return msg, err } // decodeFunc is used to create a decoder for the log file reader func decodeFunc(rdr io.Reader) loggerutils.Decoder { return &decoder{ rdr: rdr, dec: nil, jl: nil, } } func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) { return tailfile.NewTailReader(ctx, r, req) }
package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" import ( "context" "encoding/json" "io" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" "github.com/docker/docker/daemon/logger/loggerutils" "github.com/docker/docker/pkg/tailfile" "github.com/sirupsen/logrus" ) const maxJSONDecodeRetry = 20000 // ReadLogs implements the logger's LogReader interface for the logs // created by this driver. func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { logWatcher := logger.NewLogWatcher() go l.readLogs(logWatcher, config) return logWatcher } func (l *JSONFileLogger) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) { defer close(watcher.Msg) l.mu.Lock() l.readers[watcher] = struct{}{} l.mu.Unlock() l.writer.ReadLogs(config, watcher) l.mu.Lock() delete(l.readers, watcher) l.mu.Unlock() } func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { l.Reset() if err := dec.Decode(l); err != nil { return nil, err } var attrs []backend.LogAttr if len(l.Attrs) != 0 { attrs = make([]backend.LogAttr, 0, len(l.Attrs)) for k, v := range l.Attrs { attrs = append(attrs, backend.LogAttr{Key: k, Value: v}) } } msg := &logger.Message{ Source: l.Stream, Timestamp: l.Created, Line: []byte(l.Log), Attrs: attrs, } return msg, nil } type decoder struct { rdr io.Reader dec *json.Decoder jl *jsonlog.JSONLog maxRetry int } func (d *decoder) Reset(rdr io.Reader) { d.rdr = rdr d.dec = nil if d.jl != nil { d.jl.Reset() } } func (d *decoder) Close() { d.dec = nil d.rdr = nil d.jl = nil } func (d *decoder) Decode() (msg *logger.Message, err error) { if d.dec == nil { d.dec = json.NewDecoder(d.rdr) } if d.jl == nil { d.jl = &jsonlog.JSONLog{} } if d.maxRetry == 0 { // We aren't using maxJSONDecodeRetry directly so we can give a custom value for testing. d.maxRetry = maxJSONDecodeRetry } for retries := 0; retries < d.maxRetry; retries++ { msg, err = decodeLogLine(d.dec, d.jl) if err == nil || err == io.EOF { break } logrus.WithError(err).WithField("retries", retries).Warn("got error while decoding json") // try again, could be due to a an incomplete json object as we read if _, ok := err.(*json.SyntaxError); ok { d.dec = json.NewDecoder(d.rdr) continue } // io.ErrUnexpectedEOF is returned from json.Decoder when there is // remaining data in the parser's buffer while an io.EOF occurs. // If the json logger writes a partial json log entry to the disk // while at the same time the decoder tries to decode it, the race condition happens. if err == io.ErrUnexpectedEOF { d.rdr = combineReaders(d.dec.Buffered(), d.rdr) d.dec = json.NewDecoder(d.rdr) continue } } return msg, err } func combineReaders(pre, rdr io.Reader) io.Reader { return &combinedReader{pre: pre, rdr: rdr} } // combinedReader is a reader which is like `io.MultiReader` where except it does not cache a full EOF. // Once `io.MultiReader` returns EOF, it is always EOF. // // For this usecase we have an underlying reader which is a file which may reach EOF but have more data written to it later. // As such, io.MultiReader does not work for us. type combinedReader struct { pre io.Reader rdr io.Reader } func (r *combinedReader) Read(p []byte) (int, error) { var read int if r.pre != nil { n, err := r.pre.Read(p) if err != nil { if err != io.EOF { return n, err } r.pre = nil } read = n } if read < len(p) { n, err := r.rdr.Read(p[read:]) if n > 0 { read += n } if err != nil { return read, err } } return read, nil } // decodeFunc is used to create a decoder for the log file reader func decodeFunc(rdr io.Reader) loggerutils.Decoder { return &decoder{ rdr: rdr, dec: nil, jl: nil, } } func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io.Reader, int, error) { return tailfile.NewTailReader(ctx, r, req) }
cpuguy83
a84d824c5f23b002669b10cbe7c191508a4b6b21
788f2883d285fc997d25cd27235c5b17cf3c0947
```suggestion func combineReaders(pre, rdr io.Reader) io.Reader { ```
thaJeztah
4,903
moby/moby
42,089
Allow switching Windows runtimes.
This adds support for 2 runtimes on Windows, one that uses the built-in HCSv1 integration and another which uses containerd with the runhcs shim. --- Related to #41455
null
2021-02-26 23:26:59+00:00
2021-09-23 19:08:07+00:00
daemon/config/config_windows.go
package config // import "github.com/docker/docker/daemon/config" import ( "github.com/docker/docker/api/types" ) // BridgeConfig stores all the bridge driver specific // configuration. type BridgeConfig struct { commonBridgeConfig } // Config defines the configuration of a docker daemon. // It includes json tags to deserialize configuration from a file // using the same names that the flags in the command line uses. type Config struct { CommonConfig // Fields below here are platform specific. (There are none presently // for the Windows daemon.) } // GetRuntime returns the runtime path and arguments for a given // runtime name func (conf *Config) GetRuntime(name string) *types.Runtime { return nil } // GetDefaultRuntimeName returns the current default runtime func (conf *Config) GetDefaultRuntimeName() string { return StockRuntimeName } // GetAllRuntimes returns a copy of the runtimes map func (conf *Config) GetAllRuntimes() map[string]types.Runtime { return map[string]types.Runtime{} } // GetExecRoot returns the user configured Exec-root func (conf *Config) GetExecRoot() string { return "" } // GetInitPath returns the configured docker-init path func (conf *Config) GetInitPath() string { return "" } // IsSwarmCompatible defines if swarm mode can be enabled in this config func (conf *Config) IsSwarmCompatible() error { return nil } // ValidatePlatformConfig checks if any platform-specific configuration settings are invalid. func (conf *Config) ValidatePlatformConfig() error { return nil } // IsRootless returns conf.Rootless on Linux but false on Windows func (conf *Config) IsRootless() bool { return false }
package config // import "github.com/docker/docker/daemon/config" import ( "github.com/docker/docker/api/types" ) const ( // This is used by the `default-runtime` flag in dockerd as the default value. // On windows we'd prefer to keep this empty so the value is auto-detected based on other options. StockRuntimeName = "" ) // BridgeConfig stores all the bridge driver specific // configuration. type BridgeConfig struct { commonBridgeConfig } // Config defines the configuration of a docker daemon. // It includes json tags to deserialize configuration from a file // using the same names that the flags in the command line uses. type Config struct { CommonConfig // Fields below here are platform specific. (There are none presently // for the Windows daemon.) } // GetRuntime returns the runtime path and arguments for a given // runtime name func (conf *Config) GetRuntime(name string) *types.Runtime { return nil } // GetAllRuntimes returns a copy of the runtimes map func (conf *Config) GetAllRuntimes() map[string]types.Runtime { return map[string]types.Runtime{} } // GetExecRoot returns the user configured Exec-root func (conf *Config) GetExecRoot() string { return "" } // GetInitPath returns the configured docker-init path func (conf *Config) GetInitPath() string { return "" } // IsSwarmCompatible defines if swarm mode can be enabled in this config func (conf *Config) IsSwarmCompatible() error { return nil } // ValidatePlatformConfig checks if any platform-specific configuration settings are invalid. func (conf *Config) ValidatePlatformConfig() error { return nil } // IsRootless returns conf.Rootless on Linux but false on Windows func (conf *Config) IsRootless() bool { return false }
cpuguy83
8dd2a3ca50faf06fc84394d415113fb802e7452c
ed83e2e20e81ed8da26ee0ef84d70faf0ee49d21
The whole `StockRuntimeName` vs `DefaultRuntimeName` is a bit confusing. Wondering, will setting this to an empty string cause some odd behavior if somehow the option is set to an empty string? https://github.com/moby/moby/blob/7b9275c0da707b030e62c96b679a976f31f929d3/opts/runtime.go#L44-L46
thaJeztah
4,904
moby/moby
42,089
Allow switching Windows runtimes.
This adds support for 2 runtimes on Windows, one that uses the built-in HCSv1 integration and another which uses containerd with the runhcs shim. --- Related to #41455
null
2021-02-26 23:26:59+00:00
2021-09-23 19:08:07+00:00
daemon/config/config_windows.go
package config // import "github.com/docker/docker/daemon/config" import ( "github.com/docker/docker/api/types" ) // BridgeConfig stores all the bridge driver specific // configuration. type BridgeConfig struct { commonBridgeConfig } // Config defines the configuration of a docker daemon. // It includes json tags to deserialize configuration from a file // using the same names that the flags in the command line uses. type Config struct { CommonConfig // Fields below here are platform specific. (There are none presently // for the Windows daemon.) } // GetRuntime returns the runtime path and arguments for a given // runtime name func (conf *Config) GetRuntime(name string) *types.Runtime { return nil } // GetDefaultRuntimeName returns the current default runtime func (conf *Config) GetDefaultRuntimeName() string { return StockRuntimeName } // GetAllRuntimes returns a copy of the runtimes map func (conf *Config) GetAllRuntimes() map[string]types.Runtime { return map[string]types.Runtime{} } // GetExecRoot returns the user configured Exec-root func (conf *Config) GetExecRoot() string { return "" } // GetInitPath returns the configured docker-init path func (conf *Config) GetInitPath() string { return "" } // IsSwarmCompatible defines if swarm mode can be enabled in this config func (conf *Config) IsSwarmCompatible() error { return nil } // ValidatePlatformConfig checks if any platform-specific configuration settings are invalid. func (conf *Config) ValidatePlatformConfig() error { return nil } // IsRootless returns conf.Rootless on Linux but false on Windows func (conf *Config) IsRootless() bool { return false }
package config // import "github.com/docker/docker/daemon/config" import ( "github.com/docker/docker/api/types" ) const ( // This is used by the `default-runtime` flag in dockerd as the default value. // On windows we'd prefer to keep this empty so the value is auto-detected based on other options. StockRuntimeName = "" ) // BridgeConfig stores all the bridge driver specific // configuration. type BridgeConfig struct { commonBridgeConfig } // Config defines the configuration of a docker daemon. // It includes json tags to deserialize configuration from a file // using the same names that the flags in the command line uses. type Config struct { CommonConfig // Fields below here are platform specific. (There are none presently // for the Windows daemon.) } // GetRuntime returns the runtime path and arguments for a given // runtime name func (conf *Config) GetRuntime(name string) *types.Runtime { return nil } // GetAllRuntimes returns a copy of the runtimes map func (conf *Config) GetAllRuntimes() map[string]types.Runtime { return map[string]types.Runtime{} } // GetExecRoot returns the user configured Exec-root func (conf *Config) GetExecRoot() string { return "" } // GetInitPath returns the configured docker-init path func (conf *Config) GetInitPath() string { return "" } // IsSwarmCompatible defines if swarm mode can be enabled in this config func (conf *Config) IsSwarmCompatible() error { return nil } // ValidatePlatformConfig checks if any platform-specific configuration settings are invalid. func (conf *Config) ValidatePlatformConfig() error { return nil } // IsRootless returns conf.Rootless on Linux but false on Windows func (conf *Config) IsRootless() bool { return false }
cpuguy83
8dd2a3ca50faf06fc84394d415113fb802e7452c
ed83e2e20e81ed8da26ee0ef84d70faf0ee49d21
Ah, never mind; looks like we first check if it's not empty; https://github.com/moby/moby/blob/7b9275c0da707b030e62c96b679a976f31f929d3/opts/runtime.go#L39-L41
thaJeztah
4,905
moby/moby
42,089
Allow switching Windows runtimes.
This adds support for 2 runtimes on Windows, one that uses the built-in HCSv1 integration and another which uses containerd with the runhcs shim. --- Related to #41455
null
2021-02-26 23:26:59+00:00
2021-09-23 19:08:07+00:00
daemon/daemon_windows.go
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "math" "path/filepath" "runtime" "strings" "github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim/osversion" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/datastore" winlibnetwork "github.com/docker/docker/libnetwork/drivers/windows" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/options" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/platform" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" ) const ( isWindows = true platformSupported = true windowsMinCPUShares = 1 windowsMaxCPUShares = 10000 windowsMinCPUPercent = 1 windowsMaxCPUPercent = 100 ) // Windows containers are much larger than Linux containers and each of them // have > 20 system processes which why we use much smaller parallelism value. func adjustParallelLimit(n int, limit int) int { return int(math.Max(1, math.Floor(float64(runtime.NumCPU())*.8))) } // Windows has no concept of an execution state directory. So use config.Root here. func getPluginExecRoot(root string) string { return filepath.Join(root, "plugins") } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { return nil } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return nil } func checkKernel() error { return nil } func (daemon *Daemon) getCgroupDriver() string { return "" } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig == nil { return nil } return nil } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, isHyperv bool) (warnings []string, err error) { fixMemorySwappiness(resources) if !isHyperv { // The processor resource controls are mutually exclusive on // Windows Server Containers, the order of precedence is // CPUCount first, then CPUShares, and CPUPercent last. if resources.CPUCount > 0 { if resources.CPUShares > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") resources.CPUShares = 0 } if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } else if resources.CPUShares > 0 { if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } } if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) } if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) } if resources.CPUCount < 0 { return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") } if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUShares > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") } // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.NanoCPUs > 0 && isHyperv && osversion.Build() < osversion.RS3 { leftoverNanoCPUs := resources.NanoCPUs % 1e9 if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) warnings = append(warnings, warningString) } } if len(resources.BlkioDeviceReadBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") } if len(resources.BlkioDeviceReadIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") } if len(resources.BlkioDeviceWriteBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") } if len(resources.BlkioDeviceWriteIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") } if resources.BlkioWeight > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") } if len(resources.BlkioWeightDevice) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") } if resources.CgroupParent != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") } if resources.CPUPeriod != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") } if resources.CpusetCpus != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") } if resources.CpusetMems != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") } if resources.KernelMemory != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") } if resources.MemoryReservation != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") } if resources.MemorySwap != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") } if resources.MemorySwappiness != nil { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") } if resources.OomKillDisable != nil && *resources.OomKillDisable { return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") } if resources.PidsLimit != nil && *resources.PidsLimit != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") } if len(resources.Ulimits) != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") } return warnings, nil } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } hyperv := daemon.runAsHyperVContainer(hostConfig) // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // Prior to RS5, we don't allow process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. if !hyperv && system.IsWindowsClient() && osversion.Build() < osversion.RS5 { return warnings, fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } w, err := verifyPlatformContainerResources(&hostConfig.Resources, hyperv) warnings = append(warnings, w...) return warnings, err } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(config *config.Config) error { return nil } // checkSystem validates platform-specific requirements func checkSystem() error { // Validate the OS version. Note that dockerd.exe must be manifested for this // call to return the correct version. if osversion.Get().MajorVersion < 10 { return fmt.Errorf("This version of Windows does not support the docker daemon") } if osversion.Build() < osversion.RS1 { return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") } vmcompute := windows.NewLazySystemDLL("vmcompute.dll") if vmcompute.Load() != nil { return fmt.Errorf("failed to load vmcompute.dll, ensure that the Containers feature is installed") } // Ensure that the required Host Network Service and vmcompute services // are running. Docker will fail in unexpected ways if this is not present. var requiredServices = []string{"hns", "vmcompute"} if err := ensureServicesInstalled(requiredServices); err != nil { return errors.Wrap(err, "a required service is not installed, ensure the Containers feature is installed") } return nil } func ensureServicesInstalled(services []string) error { m, err := mgr.Connect() if err != nil { return err } defer m.Disconnect() for _, service := range services { s, err := m.OpenService(service) if err != nil { return errors.Wrapf(err, "failed to open service %s", service) } s.Close() } return nil } // configureKernelSecuritySupport configures and validate security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { return nil } // configureMaxThreads sets the Go runtime max threads threshold func configureMaxThreads(config *config.Config) error { return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, nil, nil) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") if err != nil { return nil, err } // Remove networks not present in HNS for _, v := range controller.Networks() { options := v.Info().DriverOptions() hnsid := options[winlibnetwork.HNSID] found := false for _, v := range hnsresponse { if v.Id == hnsid { found = true break } } if !found { // global networks should not be deleted by local HNS if v.Info().Scope() != datastore.GlobalScope { err = v.Delete() if err != nil { logrus.Errorf("Error occurred when removing network %v", err) } } } } _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) if err != nil { return nil, err } defaultNetworkExists := false if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { options := network.Info().DriverOptions() for _, v := range hnsresponse { if options[winlibnetwork.HNSID] == v.Id { defaultNetworkExists = true break } } } // discover and add HNS networks to windows // network that exist are removed and added again for _, v := range hnsresponse { networkTypeNorm := strings.ToLower(v.Type) if networkTypeNorm == "private" || networkTypeNorm == "internal" { continue // workaround for HNS reporting unsupported networks } var n libnetwork.Network s := func(current libnetwork.Network) bool { options := current.Info().DriverOptions() if options[winlibnetwork.HNSID] == v.Id { n = current return true } return false } controller.WalkNetworks(s) drvOptions := make(map[string]string) nid := "" if n != nil { nid = n.ID() // global networks should not be deleted by local HNS if n.Info().Scope() == datastore.GlobalScope { continue } v.Name = n.Name() // This will not cause network delete from HNS as the network // is not yet populated in the libnetwork windows driver // restore option if it existed before drvOptions = n.Info().DriverOptions() n.Delete() } netOption := map[string]string{ winlibnetwork.NetworkName: v.Name, winlibnetwork.HNSID: v.Id, } // add persisted driver options for k, v := range drvOptions { if k != winlibnetwork.NetworkName && k != winlibnetwork.HNSID { netOption[k] = v } } v4Conf := []*libnetwork.IpamConf{} for _, subnet := range v.Subnets { ipamV4Conf := libnetwork.IpamConf{} ipamV4Conf.PreferredPool = subnet.AddressPrefix ipamV4Conf.Gateway = subnet.GatewayAddress v4Conf = append(v4Conf, &ipamV4Conf) } name := v.Name // If there is no nat network create one from the first NAT network // encountered if it doesn't already exist if !defaultNetworkExists && runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && n == nil { name = runconfig.DefaultDaemonNetworkMode().NetworkName() defaultNetworkExists = true } v6Conf := []*libnetwork.IpamConf{} _, err := controller.NewNetwork(strings.ToLower(v.Type), name, nid, libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), ) if err != nil { logrus.Errorf("Error occurred when creating network %v", err) } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } return controller, nil } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { return nil } netOption := map[string]string{ winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), } var ipamOption libnetwork.NetworkOption var subnetPrefix string if config.BridgeConfig.FixedCIDR != "" { subnetPrefix = config.BridgeConfig.FixedCIDR } if subnetPrefix != "" { ipamV4Conf := libnetwork.IpamConf{PreferredPool: subnetPrefix} v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) } _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), ipamOption, ) if err != nil { return fmt.Errorf("Error creating default network: %v", err) } return nil } // registerLinks sets up links between containers and writes the // configuration out for persistence. As of Windows TP4, links are not supported. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { return nil } func (daemon *Daemon) cleanupMountsByID(in string) error { return nil } func (daemon *Daemon) cleanupMounts() error { return nil } func recursiveUnmount(_ string) error { return nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools.Identity) error { config.Root = rootDir // Create the root directory if it doesn't exists if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil { return err } return nil } // runasHyperVContainer returns true if we are going to run as a Hyper-V container func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { if hostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration return daemon.defaultIsolation.IsHyperV() } // Container is requesting an isolation mode. Honour it. return hostConfig.Isolation.IsHyperV() } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not mount if a Hyper-V container as it needs to be mounted inside the // utility VM, not the host. return nil } return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not unmount if a Hyper-V container return nil } return daemon.Unmount(container) } func driverOptions(config *config.Config) []nwconfig.Option { return []nwconfig.Option{} } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } // Obtain the stats from HCS via libcontainerd stats, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } // Start with an empty structure s := &types.StatsJSON{} s.Stats.Read = stats.Read s.Stats.NumProcs = platform.NumProcs() if stats.HCSStats != nil { hcss := stats.HCSStats // Populate the CPU/processor statistics s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: hcss.Processor.TotalRuntime100ns, UsageInKernelmode: hcss.Processor.RuntimeKernel100ns, UsageInUsermode: hcss.Processor.RuntimeUser100ns, }, } // Populate the memory statistics s.MemoryStats = types.MemoryStats{ Commit: hcss.Memory.UsageCommitBytes, CommitPeak: hcss.Memory.UsageCommitPeakBytes, PrivateWorkingSet: hcss.Memory.UsagePrivateWorkingSetBytes, } // Populate the storage statistics s.StorageStats = types.StorageStats{ ReadCountNormalized: hcss.Storage.ReadCountNormalized, ReadSizeBytes: hcss.Storage.ReadSizeBytes, WriteCountNormalized: hcss.Storage.WriteCountNormalized, WriteSizeBytes: hcss.Storage.WriteSizeBytes, } // Populate the network statistics s.Networks = make(map[string]types.NetworkStats) for _, nstats := range hcss.Network { s.Networks[nstats.EndpointId] = types.NetworkStats{ RxBytes: nstats.BytesReceived, RxPackets: nstats.PacketsReceived, RxDropped: nstats.DroppedPacketsIncoming, TxBytes: nstats.BytesSent, TxPackets: nstats.PacketsSent, TxDropped: nstats.DroppedPacketsOutgoing, } } } return s, nil } // setDefaultIsolation determine the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("process") // On client SKUs, default to Hyper-V. @engine maintainers. This // should not be removed. Ping Microsoft folks is there are PRs to // to change this. if system.IsWindowsClient() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } for _, option := range daemon.configStore.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return err } key = strings.ToLower(key) switch key { case "isolation": if !containertypes.Isolation(val).IsValid() { return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) } if containertypes.Isolation(val).IsHyperV() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } if containertypes.Isolation(val).IsProcess() { if system.IsWindowsClient() && osversion.Build() < osversion.RS5 { // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. return fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } daemon.defaultIsolation = containertypes.Isolation("process") } default: return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) } } logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) return nil } func setupDaemonProcess(config *config.Config) error { return nil } func (daemon *Daemon) setupSeccompProfile() error { return nil } func (daemon *Daemon) loadRuntimes() error { return nil } func (daemon *Daemon) initRuntimes(_ map[string]types.Runtime) error { return nil } func setupResolvConf(config *config.Config) { } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { return sysinfo.New() }
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "math" "path/filepath" "runtime" "strings" "github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim/osversion" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/libcontainerd/local" "github.com/docker/docker/libcontainerd/remote" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/datastore" winlibnetwork "github.com/docker/docker/libnetwork/drivers/windows" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/options" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/platform" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" ) const ( isWindows = true platformSupported = true windowsMinCPUShares = 1 windowsMaxCPUShares = 10000 windowsMinCPUPercent = 1 windowsMaxCPUPercent = 100 windowsV1RuntimeName = "com.docker.hcsshim.v1" windowsV2RuntimeName = "io.containerd.runhcs.v1" ) // Windows containers are much larger than Linux containers and each of them // have > 20 system processes which why we use much smaller parallelism value. func adjustParallelLimit(n int, limit int) int { return int(math.Max(1, math.Floor(float64(runtime.NumCPU())*.8))) } // Windows has no concept of an execution state directory. So use config.Root here. func getPluginExecRoot(root string) string { return filepath.Join(root, "plugins") } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { return nil } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return nil } func checkKernel() error { return nil } func (daemon *Daemon) getCgroupDriver() string { return "" } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig == nil { return nil } return nil } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, isHyperv bool) (warnings []string, err error) { fixMemorySwappiness(resources) if !isHyperv { // The processor resource controls are mutually exclusive on // Windows Server Containers, the order of precedence is // CPUCount first, then CPUShares, and CPUPercent last. if resources.CPUCount > 0 { if resources.CPUShares > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") resources.CPUShares = 0 } if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } else if resources.CPUShares > 0 { if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } } if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) } if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) } if resources.CPUCount < 0 { return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") } if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUShares > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") } // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.NanoCPUs > 0 && isHyperv && osversion.Build() < osversion.RS3 { leftoverNanoCPUs := resources.NanoCPUs % 1e9 if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) warnings = append(warnings, warningString) } } if len(resources.BlkioDeviceReadBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") } if len(resources.BlkioDeviceReadIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") } if len(resources.BlkioDeviceWriteBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") } if len(resources.BlkioDeviceWriteIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") } if resources.BlkioWeight > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") } if len(resources.BlkioWeightDevice) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") } if resources.CgroupParent != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") } if resources.CPUPeriod != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") } if resources.CpusetCpus != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") } if resources.CpusetMems != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") } if resources.KernelMemory != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") } if resources.MemoryReservation != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") } if resources.MemorySwap != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") } if resources.MemorySwappiness != nil { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") } if resources.OomKillDisable != nil && *resources.OomKillDisable { return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") } if resources.PidsLimit != nil && *resources.PidsLimit != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") } if len(resources.Ulimits) != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") } return warnings, nil } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } hyperv := daemon.runAsHyperVContainer(hostConfig) // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // Prior to RS5, we don't allow process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. if !hyperv && system.IsWindowsClient() && osversion.Build() < osversion.RS5 { return warnings, fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } w, err := verifyPlatformContainerResources(&hostConfig.Resources, hyperv) warnings = append(warnings, w...) return warnings, err } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(config *config.Config) error { return nil } // checkSystem validates platform-specific requirements func checkSystem() error { // Validate the OS version. Note that dockerd.exe must be manifested for this // call to return the correct version. if osversion.Get().MajorVersion < 10 { return fmt.Errorf("This version of Windows does not support the docker daemon") } if osversion.Build() < osversion.RS1 { return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") } vmcompute := windows.NewLazySystemDLL("vmcompute.dll") if vmcompute.Load() != nil { return fmt.Errorf("failed to load vmcompute.dll, ensure that the Containers feature is installed") } // Ensure that the required Host Network Service and vmcompute services // are running. Docker will fail in unexpected ways if this is not present. var requiredServices = []string{"hns", "vmcompute"} if err := ensureServicesInstalled(requiredServices); err != nil { return errors.Wrap(err, "a required service is not installed, ensure the Containers feature is installed") } return nil } func ensureServicesInstalled(services []string) error { m, err := mgr.Connect() if err != nil { return err } defer m.Disconnect() for _, service := range services { s, err := m.OpenService(service) if err != nil { return errors.Wrapf(err, "failed to open service %s", service) } s.Close() } return nil } // configureKernelSecuritySupport configures and validate security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { return nil } // configureMaxThreads sets the Go runtime max threads threshold func configureMaxThreads(config *config.Config) error { return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, nil, nil) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") if err != nil { return nil, err } // Remove networks not present in HNS for _, v := range controller.Networks() { options := v.Info().DriverOptions() hnsid := options[winlibnetwork.HNSID] found := false for _, v := range hnsresponse { if v.Id == hnsid { found = true break } } if !found { // global networks should not be deleted by local HNS if v.Info().Scope() != datastore.GlobalScope { err = v.Delete() if err != nil { logrus.Errorf("Error occurred when removing network %v", err) } } } } _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) if err != nil { return nil, err } defaultNetworkExists := false if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { options := network.Info().DriverOptions() for _, v := range hnsresponse { if options[winlibnetwork.HNSID] == v.Id { defaultNetworkExists = true break } } } // discover and add HNS networks to windows // network that exist are removed and added again for _, v := range hnsresponse { networkTypeNorm := strings.ToLower(v.Type) if networkTypeNorm == "private" || networkTypeNorm == "internal" { continue // workaround for HNS reporting unsupported networks } var n libnetwork.Network s := func(current libnetwork.Network) bool { options := current.Info().DriverOptions() if options[winlibnetwork.HNSID] == v.Id { n = current return true } return false } controller.WalkNetworks(s) drvOptions := make(map[string]string) nid := "" if n != nil { nid = n.ID() // global networks should not be deleted by local HNS if n.Info().Scope() == datastore.GlobalScope { continue } v.Name = n.Name() // This will not cause network delete from HNS as the network // is not yet populated in the libnetwork windows driver // restore option if it existed before drvOptions = n.Info().DriverOptions() n.Delete() } netOption := map[string]string{ winlibnetwork.NetworkName: v.Name, winlibnetwork.HNSID: v.Id, } // add persisted driver options for k, v := range drvOptions { if k != winlibnetwork.NetworkName && k != winlibnetwork.HNSID { netOption[k] = v } } v4Conf := []*libnetwork.IpamConf{} for _, subnet := range v.Subnets { ipamV4Conf := libnetwork.IpamConf{} ipamV4Conf.PreferredPool = subnet.AddressPrefix ipamV4Conf.Gateway = subnet.GatewayAddress v4Conf = append(v4Conf, &ipamV4Conf) } name := v.Name // If there is no nat network create one from the first NAT network // encountered if it doesn't already exist if !defaultNetworkExists && runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && n == nil { name = runconfig.DefaultDaemonNetworkMode().NetworkName() defaultNetworkExists = true } v6Conf := []*libnetwork.IpamConf{} _, err := controller.NewNetwork(strings.ToLower(v.Type), name, nid, libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), ) if err != nil { logrus.Errorf("Error occurred when creating network %v", err) } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } return controller, nil } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { return nil } netOption := map[string]string{ winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), } var ipamOption libnetwork.NetworkOption var subnetPrefix string if config.BridgeConfig.FixedCIDR != "" { subnetPrefix = config.BridgeConfig.FixedCIDR } if subnetPrefix != "" { ipamV4Conf := libnetwork.IpamConf{PreferredPool: subnetPrefix} v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) } _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), ipamOption, ) if err != nil { return fmt.Errorf("Error creating default network: %v", err) } return nil } // registerLinks sets up links between containers and writes the // configuration out for persistence. As of Windows TP4, links are not supported. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { return nil } func (daemon *Daemon) cleanupMountsByID(in string) error { return nil } func (daemon *Daemon) cleanupMounts() error { return nil } func recursiveUnmount(_ string) error { return nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools.Identity) error { config.Root = rootDir // Create the root directory if it doesn't exists if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil { return err } return nil } // runasHyperVContainer returns true if we are going to run as a Hyper-V container func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { if hostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration return daemon.defaultIsolation.IsHyperV() } // Container is requesting an isolation mode. Honour it. return hostConfig.Isolation.IsHyperV() } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not mount if a Hyper-V container as it needs to be mounted inside the // utility VM, not the host. return nil } return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not unmount if a Hyper-V container return nil } return daemon.Unmount(container) } func driverOptions(config *config.Config) []nwconfig.Option { return []nwconfig.Option{} } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } // Obtain the stats from HCS via libcontainerd stats, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } // Start with an empty structure s := &types.StatsJSON{} s.Stats.Read = stats.Read s.Stats.NumProcs = platform.NumProcs() if stats.HCSStats != nil { hcss := stats.HCSStats // Populate the CPU/processor statistics s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: hcss.Processor.TotalRuntime100ns, UsageInKernelmode: hcss.Processor.RuntimeKernel100ns, UsageInUsermode: hcss.Processor.RuntimeUser100ns, }, } // Populate the memory statistics s.MemoryStats = types.MemoryStats{ Commit: hcss.Memory.UsageCommitBytes, CommitPeak: hcss.Memory.UsageCommitPeakBytes, PrivateWorkingSet: hcss.Memory.UsagePrivateWorkingSetBytes, } // Populate the storage statistics s.StorageStats = types.StorageStats{ ReadCountNormalized: hcss.Storage.ReadCountNormalized, ReadSizeBytes: hcss.Storage.ReadSizeBytes, WriteCountNormalized: hcss.Storage.WriteCountNormalized, WriteSizeBytes: hcss.Storage.WriteSizeBytes, } // Populate the network statistics s.Networks = make(map[string]types.NetworkStats) for _, nstats := range hcss.Network { s.Networks[nstats.EndpointId] = types.NetworkStats{ RxBytes: nstats.BytesReceived, RxPackets: nstats.PacketsReceived, RxDropped: nstats.DroppedPacketsIncoming, TxBytes: nstats.BytesSent, TxPackets: nstats.PacketsSent, TxDropped: nstats.DroppedPacketsOutgoing, } } } return s, nil } // setDefaultIsolation determine the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("process") // On client SKUs, default to Hyper-V. @engine maintainers. This // should not be removed. Ping Microsoft folks is there are PRs to // to change this. if system.IsWindowsClient() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } for _, option := range daemon.configStore.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return err } key = strings.ToLower(key) switch key { case "isolation": if !containertypes.Isolation(val).IsValid() { return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) } if containertypes.Isolation(val).IsHyperV() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } if containertypes.Isolation(val).IsProcess() { if system.IsWindowsClient() && osversion.Build() < osversion.RS5 { // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. return fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } daemon.defaultIsolation = containertypes.Isolation("process") } default: return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) } } logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) return nil } func setupDaemonProcess(config *config.Config) error { return nil } func (daemon *Daemon) setupSeccompProfile() error { return nil } func (daemon *Daemon) loadRuntimes() error { return nil } func (daemon *Daemon) initRuntimes(_ map[string]types.Runtime) error { return nil } func setupResolvConf(config *config.Config) { } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { return sysinfo.New() } func (daemon *Daemon) initLibcontainerd(ctx context.Context) error { var err error rt := daemon.configStore.GetDefaultRuntimeName() if rt == "" { if daemon.configStore.ContainerdAddr == "" { rt = windowsV1RuntimeName } else { rt = windowsV2RuntimeName } } switch rt { case windowsV1RuntimeName: daemon.containerd, err = local.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) case windowsV2RuntimeName: if daemon.configStore.ContainerdAddr == "" { return fmt.Errorf("cannot use the specified runtime %q without containerd", rt) } daemon.containerd, err = remote.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) default: return fmt.Errorf("unknown windows runtime %s", rt) } return err }
cpuguy83
8dd2a3ca50faf06fc84394d415113fb802e7452c
ed83e2e20e81ed8da26ee0ef84d70faf0ee49d21
No cases where we use a default value for the containerd address or anything like that? ISTR some talk about trying to default to V2 on 2022+? (I guess that ship sailed?)
tianon
4,906
moby/moby
42,089
Allow switching Windows runtimes.
This adds support for 2 runtimes on Windows, one that uses the built-in HCSv1 integration and another which uses containerd with the runhcs shim. --- Related to #41455
null
2021-02-26 23:26:59+00:00
2021-09-23 19:08:07+00:00
daemon/daemon_windows.go
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "math" "path/filepath" "runtime" "strings" "github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim/osversion" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/datastore" winlibnetwork "github.com/docker/docker/libnetwork/drivers/windows" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/options" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/platform" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" ) const ( isWindows = true platformSupported = true windowsMinCPUShares = 1 windowsMaxCPUShares = 10000 windowsMinCPUPercent = 1 windowsMaxCPUPercent = 100 ) // Windows containers are much larger than Linux containers and each of them // have > 20 system processes which why we use much smaller parallelism value. func adjustParallelLimit(n int, limit int) int { return int(math.Max(1, math.Floor(float64(runtime.NumCPU())*.8))) } // Windows has no concept of an execution state directory. So use config.Root here. func getPluginExecRoot(root string) string { return filepath.Join(root, "plugins") } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { return nil } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return nil } func checkKernel() error { return nil } func (daemon *Daemon) getCgroupDriver() string { return "" } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig == nil { return nil } return nil } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, isHyperv bool) (warnings []string, err error) { fixMemorySwappiness(resources) if !isHyperv { // The processor resource controls are mutually exclusive on // Windows Server Containers, the order of precedence is // CPUCount first, then CPUShares, and CPUPercent last. if resources.CPUCount > 0 { if resources.CPUShares > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") resources.CPUShares = 0 } if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } else if resources.CPUShares > 0 { if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } } if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) } if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) } if resources.CPUCount < 0 { return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") } if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUShares > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") } // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.NanoCPUs > 0 && isHyperv && osversion.Build() < osversion.RS3 { leftoverNanoCPUs := resources.NanoCPUs % 1e9 if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) warnings = append(warnings, warningString) } } if len(resources.BlkioDeviceReadBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") } if len(resources.BlkioDeviceReadIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") } if len(resources.BlkioDeviceWriteBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") } if len(resources.BlkioDeviceWriteIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") } if resources.BlkioWeight > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") } if len(resources.BlkioWeightDevice) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") } if resources.CgroupParent != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") } if resources.CPUPeriod != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") } if resources.CpusetCpus != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") } if resources.CpusetMems != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") } if resources.KernelMemory != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") } if resources.MemoryReservation != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") } if resources.MemorySwap != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") } if resources.MemorySwappiness != nil { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") } if resources.OomKillDisable != nil && *resources.OomKillDisable { return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") } if resources.PidsLimit != nil && *resources.PidsLimit != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") } if len(resources.Ulimits) != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") } return warnings, nil } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } hyperv := daemon.runAsHyperVContainer(hostConfig) // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // Prior to RS5, we don't allow process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. if !hyperv && system.IsWindowsClient() && osversion.Build() < osversion.RS5 { return warnings, fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } w, err := verifyPlatformContainerResources(&hostConfig.Resources, hyperv) warnings = append(warnings, w...) return warnings, err } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(config *config.Config) error { return nil } // checkSystem validates platform-specific requirements func checkSystem() error { // Validate the OS version. Note that dockerd.exe must be manifested for this // call to return the correct version. if osversion.Get().MajorVersion < 10 { return fmt.Errorf("This version of Windows does not support the docker daemon") } if osversion.Build() < osversion.RS1 { return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") } vmcompute := windows.NewLazySystemDLL("vmcompute.dll") if vmcompute.Load() != nil { return fmt.Errorf("failed to load vmcompute.dll, ensure that the Containers feature is installed") } // Ensure that the required Host Network Service and vmcompute services // are running. Docker will fail in unexpected ways if this is not present. var requiredServices = []string{"hns", "vmcompute"} if err := ensureServicesInstalled(requiredServices); err != nil { return errors.Wrap(err, "a required service is not installed, ensure the Containers feature is installed") } return nil } func ensureServicesInstalled(services []string) error { m, err := mgr.Connect() if err != nil { return err } defer m.Disconnect() for _, service := range services { s, err := m.OpenService(service) if err != nil { return errors.Wrapf(err, "failed to open service %s", service) } s.Close() } return nil } // configureKernelSecuritySupport configures and validate security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { return nil } // configureMaxThreads sets the Go runtime max threads threshold func configureMaxThreads(config *config.Config) error { return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, nil, nil) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") if err != nil { return nil, err } // Remove networks not present in HNS for _, v := range controller.Networks() { options := v.Info().DriverOptions() hnsid := options[winlibnetwork.HNSID] found := false for _, v := range hnsresponse { if v.Id == hnsid { found = true break } } if !found { // global networks should not be deleted by local HNS if v.Info().Scope() != datastore.GlobalScope { err = v.Delete() if err != nil { logrus.Errorf("Error occurred when removing network %v", err) } } } } _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) if err != nil { return nil, err } defaultNetworkExists := false if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { options := network.Info().DriverOptions() for _, v := range hnsresponse { if options[winlibnetwork.HNSID] == v.Id { defaultNetworkExists = true break } } } // discover and add HNS networks to windows // network that exist are removed and added again for _, v := range hnsresponse { networkTypeNorm := strings.ToLower(v.Type) if networkTypeNorm == "private" || networkTypeNorm == "internal" { continue // workaround for HNS reporting unsupported networks } var n libnetwork.Network s := func(current libnetwork.Network) bool { options := current.Info().DriverOptions() if options[winlibnetwork.HNSID] == v.Id { n = current return true } return false } controller.WalkNetworks(s) drvOptions := make(map[string]string) nid := "" if n != nil { nid = n.ID() // global networks should not be deleted by local HNS if n.Info().Scope() == datastore.GlobalScope { continue } v.Name = n.Name() // This will not cause network delete from HNS as the network // is not yet populated in the libnetwork windows driver // restore option if it existed before drvOptions = n.Info().DriverOptions() n.Delete() } netOption := map[string]string{ winlibnetwork.NetworkName: v.Name, winlibnetwork.HNSID: v.Id, } // add persisted driver options for k, v := range drvOptions { if k != winlibnetwork.NetworkName && k != winlibnetwork.HNSID { netOption[k] = v } } v4Conf := []*libnetwork.IpamConf{} for _, subnet := range v.Subnets { ipamV4Conf := libnetwork.IpamConf{} ipamV4Conf.PreferredPool = subnet.AddressPrefix ipamV4Conf.Gateway = subnet.GatewayAddress v4Conf = append(v4Conf, &ipamV4Conf) } name := v.Name // If there is no nat network create one from the first NAT network // encountered if it doesn't already exist if !defaultNetworkExists && runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && n == nil { name = runconfig.DefaultDaemonNetworkMode().NetworkName() defaultNetworkExists = true } v6Conf := []*libnetwork.IpamConf{} _, err := controller.NewNetwork(strings.ToLower(v.Type), name, nid, libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), ) if err != nil { logrus.Errorf("Error occurred when creating network %v", err) } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } return controller, nil } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { return nil } netOption := map[string]string{ winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), } var ipamOption libnetwork.NetworkOption var subnetPrefix string if config.BridgeConfig.FixedCIDR != "" { subnetPrefix = config.BridgeConfig.FixedCIDR } if subnetPrefix != "" { ipamV4Conf := libnetwork.IpamConf{PreferredPool: subnetPrefix} v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) } _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), ipamOption, ) if err != nil { return fmt.Errorf("Error creating default network: %v", err) } return nil } // registerLinks sets up links between containers and writes the // configuration out for persistence. As of Windows TP4, links are not supported. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { return nil } func (daemon *Daemon) cleanupMountsByID(in string) error { return nil } func (daemon *Daemon) cleanupMounts() error { return nil } func recursiveUnmount(_ string) error { return nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools.Identity) error { config.Root = rootDir // Create the root directory if it doesn't exists if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil { return err } return nil } // runasHyperVContainer returns true if we are going to run as a Hyper-V container func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { if hostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration return daemon.defaultIsolation.IsHyperV() } // Container is requesting an isolation mode. Honour it. return hostConfig.Isolation.IsHyperV() } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not mount if a Hyper-V container as it needs to be mounted inside the // utility VM, not the host. return nil } return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not unmount if a Hyper-V container return nil } return daemon.Unmount(container) } func driverOptions(config *config.Config) []nwconfig.Option { return []nwconfig.Option{} } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } // Obtain the stats from HCS via libcontainerd stats, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } // Start with an empty structure s := &types.StatsJSON{} s.Stats.Read = stats.Read s.Stats.NumProcs = platform.NumProcs() if stats.HCSStats != nil { hcss := stats.HCSStats // Populate the CPU/processor statistics s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: hcss.Processor.TotalRuntime100ns, UsageInKernelmode: hcss.Processor.RuntimeKernel100ns, UsageInUsermode: hcss.Processor.RuntimeUser100ns, }, } // Populate the memory statistics s.MemoryStats = types.MemoryStats{ Commit: hcss.Memory.UsageCommitBytes, CommitPeak: hcss.Memory.UsageCommitPeakBytes, PrivateWorkingSet: hcss.Memory.UsagePrivateWorkingSetBytes, } // Populate the storage statistics s.StorageStats = types.StorageStats{ ReadCountNormalized: hcss.Storage.ReadCountNormalized, ReadSizeBytes: hcss.Storage.ReadSizeBytes, WriteCountNormalized: hcss.Storage.WriteCountNormalized, WriteSizeBytes: hcss.Storage.WriteSizeBytes, } // Populate the network statistics s.Networks = make(map[string]types.NetworkStats) for _, nstats := range hcss.Network { s.Networks[nstats.EndpointId] = types.NetworkStats{ RxBytes: nstats.BytesReceived, RxPackets: nstats.PacketsReceived, RxDropped: nstats.DroppedPacketsIncoming, TxBytes: nstats.BytesSent, TxPackets: nstats.PacketsSent, TxDropped: nstats.DroppedPacketsOutgoing, } } } return s, nil } // setDefaultIsolation determine the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("process") // On client SKUs, default to Hyper-V. @engine maintainers. This // should not be removed. Ping Microsoft folks is there are PRs to // to change this. if system.IsWindowsClient() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } for _, option := range daemon.configStore.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return err } key = strings.ToLower(key) switch key { case "isolation": if !containertypes.Isolation(val).IsValid() { return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) } if containertypes.Isolation(val).IsHyperV() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } if containertypes.Isolation(val).IsProcess() { if system.IsWindowsClient() && osversion.Build() < osversion.RS5 { // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. return fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } daemon.defaultIsolation = containertypes.Isolation("process") } default: return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) } } logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) return nil } func setupDaemonProcess(config *config.Config) error { return nil } func (daemon *Daemon) setupSeccompProfile() error { return nil } func (daemon *Daemon) loadRuntimes() error { return nil } func (daemon *Daemon) initRuntimes(_ map[string]types.Runtime) error { return nil } func setupResolvConf(config *config.Config) { } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { return sysinfo.New() }
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "math" "path/filepath" "runtime" "strings" "github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim/osversion" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/libcontainerd/local" "github.com/docker/docker/libcontainerd/remote" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/datastore" winlibnetwork "github.com/docker/docker/libnetwork/drivers/windows" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/options" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/platform" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" ) const ( isWindows = true platformSupported = true windowsMinCPUShares = 1 windowsMaxCPUShares = 10000 windowsMinCPUPercent = 1 windowsMaxCPUPercent = 100 windowsV1RuntimeName = "com.docker.hcsshim.v1" windowsV2RuntimeName = "io.containerd.runhcs.v1" ) // Windows containers are much larger than Linux containers and each of them // have > 20 system processes which why we use much smaller parallelism value. func adjustParallelLimit(n int, limit int) int { return int(math.Max(1, math.Floor(float64(runtime.NumCPU())*.8))) } // Windows has no concept of an execution state directory. So use config.Root here. func getPluginExecRoot(root string) string { return filepath.Join(root, "plugins") } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { return nil } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return nil } func checkKernel() error { return nil } func (daemon *Daemon) getCgroupDriver() string { return "" } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig == nil { return nil } return nil } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, isHyperv bool) (warnings []string, err error) { fixMemorySwappiness(resources) if !isHyperv { // The processor resource controls are mutually exclusive on // Windows Server Containers, the order of precedence is // CPUCount first, then CPUShares, and CPUPercent last. if resources.CPUCount > 0 { if resources.CPUShares > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") resources.CPUShares = 0 } if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } else if resources.CPUShares > 0 { if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } } if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) } if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) } if resources.CPUCount < 0 { return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") } if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUShares > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") } // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.NanoCPUs > 0 && isHyperv && osversion.Build() < osversion.RS3 { leftoverNanoCPUs := resources.NanoCPUs % 1e9 if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) warnings = append(warnings, warningString) } } if len(resources.BlkioDeviceReadBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") } if len(resources.BlkioDeviceReadIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") } if len(resources.BlkioDeviceWriteBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") } if len(resources.BlkioDeviceWriteIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") } if resources.BlkioWeight > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") } if len(resources.BlkioWeightDevice) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") } if resources.CgroupParent != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") } if resources.CPUPeriod != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") } if resources.CpusetCpus != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") } if resources.CpusetMems != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") } if resources.KernelMemory != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") } if resources.MemoryReservation != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") } if resources.MemorySwap != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") } if resources.MemorySwappiness != nil { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") } if resources.OomKillDisable != nil && *resources.OomKillDisable { return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") } if resources.PidsLimit != nil && *resources.PidsLimit != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") } if len(resources.Ulimits) != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") } return warnings, nil } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } hyperv := daemon.runAsHyperVContainer(hostConfig) // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // Prior to RS5, we don't allow process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. if !hyperv && system.IsWindowsClient() && osversion.Build() < osversion.RS5 { return warnings, fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } w, err := verifyPlatformContainerResources(&hostConfig.Resources, hyperv) warnings = append(warnings, w...) return warnings, err } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(config *config.Config) error { return nil } // checkSystem validates platform-specific requirements func checkSystem() error { // Validate the OS version. Note that dockerd.exe must be manifested for this // call to return the correct version. if osversion.Get().MajorVersion < 10 { return fmt.Errorf("This version of Windows does not support the docker daemon") } if osversion.Build() < osversion.RS1 { return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") } vmcompute := windows.NewLazySystemDLL("vmcompute.dll") if vmcompute.Load() != nil { return fmt.Errorf("failed to load vmcompute.dll, ensure that the Containers feature is installed") } // Ensure that the required Host Network Service and vmcompute services // are running. Docker will fail in unexpected ways if this is not present. var requiredServices = []string{"hns", "vmcompute"} if err := ensureServicesInstalled(requiredServices); err != nil { return errors.Wrap(err, "a required service is not installed, ensure the Containers feature is installed") } return nil } func ensureServicesInstalled(services []string) error { m, err := mgr.Connect() if err != nil { return err } defer m.Disconnect() for _, service := range services { s, err := m.OpenService(service) if err != nil { return errors.Wrapf(err, "failed to open service %s", service) } s.Close() } return nil } // configureKernelSecuritySupport configures and validate security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { return nil } // configureMaxThreads sets the Go runtime max threads threshold func configureMaxThreads(config *config.Config) error { return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, nil, nil) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") if err != nil { return nil, err } // Remove networks not present in HNS for _, v := range controller.Networks() { options := v.Info().DriverOptions() hnsid := options[winlibnetwork.HNSID] found := false for _, v := range hnsresponse { if v.Id == hnsid { found = true break } } if !found { // global networks should not be deleted by local HNS if v.Info().Scope() != datastore.GlobalScope { err = v.Delete() if err != nil { logrus.Errorf("Error occurred when removing network %v", err) } } } } _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) if err != nil { return nil, err } defaultNetworkExists := false if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { options := network.Info().DriverOptions() for _, v := range hnsresponse { if options[winlibnetwork.HNSID] == v.Id { defaultNetworkExists = true break } } } // discover and add HNS networks to windows // network that exist are removed and added again for _, v := range hnsresponse { networkTypeNorm := strings.ToLower(v.Type) if networkTypeNorm == "private" || networkTypeNorm == "internal" { continue // workaround for HNS reporting unsupported networks } var n libnetwork.Network s := func(current libnetwork.Network) bool { options := current.Info().DriverOptions() if options[winlibnetwork.HNSID] == v.Id { n = current return true } return false } controller.WalkNetworks(s) drvOptions := make(map[string]string) nid := "" if n != nil { nid = n.ID() // global networks should not be deleted by local HNS if n.Info().Scope() == datastore.GlobalScope { continue } v.Name = n.Name() // This will not cause network delete from HNS as the network // is not yet populated in the libnetwork windows driver // restore option if it existed before drvOptions = n.Info().DriverOptions() n.Delete() } netOption := map[string]string{ winlibnetwork.NetworkName: v.Name, winlibnetwork.HNSID: v.Id, } // add persisted driver options for k, v := range drvOptions { if k != winlibnetwork.NetworkName && k != winlibnetwork.HNSID { netOption[k] = v } } v4Conf := []*libnetwork.IpamConf{} for _, subnet := range v.Subnets { ipamV4Conf := libnetwork.IpamConf{} ipamV4Conf.PreferredPool = subnet.AddressPrefix ipamV4Conf.Gateway = subnet.GatewayAddress v4Conf = append(v4Conf, &ipamV4Conf) } name := v.Name // If there is no nat network create one from the first NAT network // encountered if it doesn't already exist if !defaultNetworkExists && runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && n == nil { name = runconfig.DefaultDaemonNetworkMode().NetworkName() defaultNetworkExists = true } v6Conf := []*libnetwork.IpamConf{} _, err := controller.NewNetwork(strings.ToLower(v.Type), name, nid, libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), ) if err != nil { logrus.Errorf("Error occurred when creating network %v", err) } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } return controller, nil } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { return nil } netOption := map[string]string{ winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), } var ipamOption libnetwork.NetworkOption var subnetPrefix string if config.BridgeConfig.FixedCIDR != "" { subnetPrefix = config.BridgeConfig.FixedCIDR } if subnetPrefix != "" { ipamV4Conf := libnetwork.IpamConf{PreferredPool: subnetPrefix} v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) } _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), ipamOption, ) if err != nil { return fmt.Errorf("Error creating default network: %v", err) } return nil } // registerLinks sets up links between containers and writes the // configuration out for persistence. As of Windows TP4, links are not supported. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { return nil } func (daemon *Daemon) cleanupMountsByID(in string) error { return nil } func (daemon *Daemon) cleanupMounts() error { return nil } func recursiveUnmount(_ string) error { return nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools.Identity) error { config.Root = rootDir // Create the root directory if it doesn't exists if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil { return err } return nil } // runasHyperVContainer returns true if we are going to run as a Hyper-V container func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { if hostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration return daemon.defaultIsolation.IsHyperV() } // Container is requesting an isolation mode. Honour it. return hostConfig.Isolation.IsHyperV() } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not mount if a Hyper-V container as it needs to be mounted inside the // utility VM, not the host. return nil } return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not unmount if a Hyper-V container return nil } return daemon.Unmount(container) } func driverOptions(config *config.Config) []nwconfig.Option { return []nwconfig.Option{} } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } // Obtain the stats from HCS via libcontainerd stats, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } // Start with an empty structure s := &types.StatsJSON{} s.Stats.Read = stats.Read s.Stats.NumProcs = platform.NumProcs() if stats.HCSStats != nil { hcss := stats.HCSStats // Populate the CPU/processor statistics s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: hcss.Processor.TotalRuntime100ns, UsageInKernelmode: hcss.Processor.RuntimeKernel100ns, UsageInUsermode: hcss.Processor.RuntimeUser100ns, }, } // Populate the memory statistics s.MemoryStats = types.MemoryStats{ Commit: hcss.Memory.UsageCommitBytes, CommitPeak: hcss.Memory.UsageCommitPeakBytes, PrivateWorkingSet: hcss.Memory.UsagePrivateWorkingSetBytes, } // Populate the storage statistics s.StorageStats = types.StorageStats{ ReadCountNormalized: hcss.Storage.ReadCountNormalized, ReadSizeBytes: hcss.Storage.ReadSizeBytes, WriteCountNormalized: hcss.Storage.WriteCountNormalized, WriteSizeBytes: hcss.Storage.WriteSizeBytes, } // Populate the network statistics s.Networks = make(map[string]types.NetworkStats) for _, nstats := range hcss.Network { s.Networks[nstats.EndpointId] = types.NetworkStats{ RxBytes: nstats.BytesReceived, RxPackets: nstats.PacketsReceived, RxDropped: nstats.DroppedPacketsIncoming, TxBytes: nstats.BytesSent, TxPackets: nstats.PacketsSent, TxDropped: nstats.DroppedPacketsOutgoing, } } } return s, nil } // setDefaultIsolation determine the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("process") // On client SKUs, default to Hyper-V. @engine maintainers. This // should not be removed. Ping Microsoft folks is there are PRs to // to change this. if system.IsWindowsClient() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } for _, option := range daemon.configStore.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return err } key = strings.ToLower(key) switch key { case "isolation": if !containertypes.Isolation(val).IsValid() { return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) } if containertypes.Isolation(val).IsHyperV() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } if containertypes.Isolation(val).IsProcess() { if system.IsWindowsClient() && osversion.Build() < osversion.RS5 { // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. return fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } daemon.defaultIsolation = containertypes.Isolation("process") } default: return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) } } logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) return nil } func setupDaemonProcess(config *config.Config) error { return nil } func (daemon *Daemon) setupSeccompProfile() error { return nil } func (daemon *Daemon) loadRuntimes() error { return nil } func (daemon *Daemon) initRuntimes(_ map[string]types.Runtime) error { return nil } func setupResolvConf(config *config.Config) { } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { return sysinfo.New() } func (daemon *Daemon) initLibcontainerd(ctx context.Context) error { var err error rt := daemon.configStore.GetDefaultRuntimeName() if rt == "" { if daemon.configStore.ContainerdAddr == "" { rt = windowsV1RuntimeName } else { rt = windowsV2RuntimeName } } switch rt { case windowsV1RuntimeName: daemon.containerd, err = local.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) case windowsV2RuntimeName: if daemon.configStore.ContainerdAddr == "" { return fmt.Errorf("cannot use the specified runtime %q without containerd", rt) } daemon.containerd, err = remote.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) default: return fmt.Errorf("unknown windows runtime %s", rt) } return err }
cpuguy83
8dd2a3ca50faf06fc84394d415113fb802e7452c
ed83e2e20e81ed8da26ee0ef84d70faf0ee49d21
So my plan is to follow-up with a PR to embed containerd and use that if no address is specified on both linux and windows.
cpuguy83
4,907
moby/moby
42,089
Allow switching Windows runtimes.
This adds support for 2 runtimes on Windows, one that uses the built-in HCSv1 integration and another which uses containerd with the runhcs shim. --- Related to #41455
null
2021-02-26 23:26:59+00:00
2021-09-23 19:08:07+00:00
daemon/daemon_windows.go
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "math" "path/filepath" "runtime" "strings" "github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim/osversion" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/datastore" winlibnetwork "github.com/docker/docker/libnetwork/drivers/windows" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/options" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/platform" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" ) const ( isWindows = true platformSupported = true windowsMinCPUShares = 1 windowsMaxCPUShares = 10000 windowsMinCPUPercent = 1 windowsMaxCPUPercent = 100 ) // Windows containers are much larger than Linux containers and each of them // have > 20 system processes which why we use much smaller parallelism value. func adjustParallelLimit(n int, limit int) int { return int(math.Max(1, math.Floor(float64(runtime.NumCPU())*.8))) } // Windows has no concept of an execution state directory. So use config.Root here. func getPluginExecRoot(root string) string { return filepath.Join(root, "plugins") } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { return nil } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return nil } func checkKernel() error { return nil } func (daemon *Daemon) getCgroupDriver() string { return "" } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig == nil { return nil } return nil } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, isHyperv bool) (warnings []string, err error) { fixMemorySwappiness(resources) if !isHyperv { // The processor resource controls are mutually exclusive on // Windows Server Containers, the order of precedence is // CPUCount first, then CPUShares, and CPUPercent last. if resources.CPUCount > 0 { if resources.CPUShares > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") resources.CPUShares = 0 } if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } else if resources.CPUShares > 0 { if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } } if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) } if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) } if resources.CPUCount < 0 { return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") } if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUShares > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") } // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.NanoCPUs > 0 && isHyperv && osversion.Build() < osversion.RS3 { leftoverNanoCPUs := resources.NanoCPUs % 1e9 if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) warnings = append(warnings, warningString) } } if len(resources.BlkioDeviceReadBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") } if len(resources.BlkioDeviceReadIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") } if len(resources.BlkioDeviceWriteBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") } if len(resources.BlkioDeviceWriteIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") } if resources.BlkioWeight > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") } if len(resources.BlkioWeightDevice) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") } if resources.CgroupParent != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") } if resources.CPUPeriod != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") } if resources.CpusetCpus != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") } if resources.CpusetMems != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") } if resources.KernelMemory != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") } if resources.MemoryReservation != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") } if resources.MemorySwap != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") } if resources.MemorySwappiness != nil { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") } if resources.OomKillDisable != nil && *resources.OomKillDisable { return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") } if resources.PidsLimit != nil && *resources.PidsLimit != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") } if len(resources.Ulimits) != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") } return warnings, nil } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } hyperv := daemon.runAsHyperVContainer(hostConfig) // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // Prior to RS5, we don't allow process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. if !hyperv && system.IsWindowsClient() && osversion.Build() < osversion.RS5 { return warnings, fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } w, err := verifyPlatformContainerResources(&hostConfig.Resources, hyperv) warnings = append(warnings, w...) return warnings, err } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(config *config.Config) error { return nil } // checkSystem validates platform-specific requirements func checkSystem() error { // Validate the OS version. Note that dockerd.exe must be manifested for this // call to return the correct version. if osversion.Get().MajorVersion < 10 { return fmt.Errorf("This version of Windows does not support the docker daemon") } if osversion.Build() < osversion.RS1 { return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") } vmcompute := windows.NewLazySystemDLL("vmcompute.dll") if vmcompute.Load() != nil { return fmt.Errorf("failed to load vmcompute.dll, ensure that the Containers feature is installed") } // Ensure that the required Host Network Service and vmcompute services // are running. Docker will fail in unexpected ways if this is not present. var requiredServices = []string{"hns", "vmcompute"} if err := ensureServicesInstalled(requiredServices); err != nil { return errors.Wrap(err, "a required service is not installed, ensure the Containers feature is installed") } return nil } func ensureServicesInstalled(services []string) error { m, err := mgr.Connect() if err != nil { return err } defer m.Disconnect() for _, service := range services { s, err := m.OpenService(service) if err != nil { return errors.Wrapf(err, "failed to open service %s", service) } s.Close() } return nil } // configureKernelSecuritySupport configures and validate security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { return nil } // configureMaxThreads sets the Go runtime max threads threshold func configureMaxThreads(config *config.Config) error { return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, nil, nil) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") if err != nil { return nil, err } // Remove networks not present in HNS for _, v := range controller.Networks() { options := v.Info().DriverOptions() hnsid := options[winlibnetwork.HNSID] found := false for _, v := range hnsresponse { if v.Id == hnsid { found = true break } } if !found { // global networks should not be deleted by local HNS if v.Info().Scope() != datastore.GlobalScope { err = v.Delete() if err != nil { logrus.Errorf("Error occurred when removing network %v", err) } } } } _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) if err != nil { return nil, err } defaultNetworkExists := false if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { options := network.Info().DriverOptions() for _, v := range hnsresponse { if options[winlibnetwork.HNSID] == v.Id { defaultNetworkExists = true break } } } // discover and add HNS networks to windows // network that exist are removed and added again for _, v := range hnsresponse { networkTypeNorm := strings.ToLower(v.Type) if networkTypeNorm == "private" || networkTypeNorm == "internal" { continue // workaround for HNS reporting unsupported networks } var n libnetwork.Network s := func(current libnetwork.Network) bool { options := current.Info().DriverOptions() if options[winlibnetwork.HNSID] == v.Id { n = current return true } return false } controller.WalkNetworks(s) drvOptions := make(map[string]string) nid := "" if n != nil { nid = n.ID() // global networks should not be deleted by local HNS if n.Info().Scope() == datastore.GlobalScope { continue } v.Name = n.Name() // This will not cause network delete from HNS as the network // is not yet populated in the libnetwork windows driver // restore option if it existed before drvOptions = n.Info().DriverOptions() n.Delete() } netOption := map[string]string{ winlibnetwork.NetworkName: v.Name, winlibnetwork.HNSID: v.Id, } // add persisted driver options for k, v := range drvOptions { if k != winlibnetwork.NetworkName && k != winlibnetwork.HNSID { netOption[k] = v } } v4Conf := []*libnetwork.IpamConf{} for _, subnet := range v.Subnets { ipamV4Conf := libnetwork.IpamConf{} ipamV4Conf.PreferredPool = subnet.AddressPrefix ipamV4Conf.Gateway = subnet.GatewayAddress v4Conf = append(v4Conf, &ipamV4Conf) } name := v.Name // If there is no nat network create one from the first NAT network // encountered if it doesn't already exist if !defaultNetworkExists && runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && n == nil { name = runconfig.DefaultDaemonNetworkMode().NetworkName() defaultNetworkExists = true } v6Conf := []*libnetwork.IpamConf{} _, err := controller.NewNetwork(strings.ToLower(v.Type), name, nid, libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), ) if err != nil { logrus.Errorf("Error occurred when creating network %v", err) } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } return controller, nil } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { return nil } netOption := map[string]string{ winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), } var ipamOption libnetwork.NetworkOption var subnetPrefix string if config.BridgeConfig.FixedCIDR != "" { subnetPrefix = config.BridgeConfig.FixedCIDR } if subnetPrefix != "" { ipamV4Conf := libnetwork.IpamConf{PreferredPool: subnetPrefix} v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) } _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), ipamOption, ) if err != nil { return fmt.Errorf("Error creating default network: %v", err) } return nil } // registerLinks sets up links between containers and writes the // configuration out for persistence. As of Windows TP4, links are not supported. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { return nil } func (daemon *Daemon) cleanupMountsByID(in string) error { return nil } func (daemon *Daemon) cleanupMounts() error { return nil } func recursiveUnmount(_ string) error { return nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools.Identity) error { config.Root = rootDir // Create the root directory if it doesn't exists if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil { return err } return nil } // runasHyperVContainer returns true if we are going to run as a Hyper-V container func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { if hostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration return daemon.defaultIsolation.IsHyperV() } // Container is requesting an isolation mode. Honour it. return hostConfig.Isolation.IsHyperV() } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not mount if a Hyper-V container as it needs to be mounted inside the // utility VM, not the host. return nil } return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not unmount if a Hyper-V container return nil } return daemon.Unmount(container) } func driverOptions(config *config.Config) []nwconfig.Option { return []nwconfig.Option{} } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } // Obtain the stats from HCS via libcontainerd stats, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } // Start with an empty structure s := &types.StatsJSON{} s.Stats.Read = stats.Read s.Stats.NumProcs = platform.NumProcs() if stats.HCSStats != nil { hcss := stats.HCSStats // Populate the CPU/processor statistics s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: hcss.Processor.TotalRuntime100ns, UsageInKernelmode: hcss.Processor.RuntimeKernel100ns, UsageInUsermode: hcss.Processor.RuntimeUser100ns, }, } // Populate the memory statistics s.MemoryStats = types.MemoryStats{ Commit: hcss.Memory.UsageCommitBytes, CommitPeak: hcss.Memory.UsageCommitPeakBytes, PrivateWorkingSet: hcss.Memory.UsagePrivateWorkingSetBytes, } // Populate the storage statistics s.StorageStats = types.StorageStats{ ReadCountNormalized: hcss.Storage.ReadCountNormalized, ReadSizeBytes: hcss.Storage.ReadSizeBytes, WriteCountNormalized: hcss.Storage.WriteCountNormalized, WriteSizeBytes: hcss.Storage.WriteSizeBytes, } // Populate the network statistics s.Networks = make(map[string]types.NetworkStats) for _, nstats := range hcss.Network { s.Networks[nstats.EndpointId] = types.NetworkStats{ RxBytes: nstats.BytesReceived, RxPackets: nstats.PacketsReceived, RxDropped: nstats.DroppedPacketsIncoming, TxBytes: nstats.BytesSent, TxPackets: nstats.PacketsSent, TxDropped: nstats.DroppedPacketsOutgoing, } } } return s, nil } // setDefaultIsolation determine the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("process") // On client SKUs, default to Hyper-V. @engine maintainers. This // should not be removed. Ping Microsoft folks is there are PRs to // to change this. if system.IsWindowsClient() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } for _, option := range daemon.configStore.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return err } key = strings.ToLower(key) switch key { case "isolation": if !containertypes.Isolation(val).IsValid() { return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) } if containertypes.Isolation(val).IsHyperV() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } if containertypes.Isolation(val).IsProcess() { if system.IsWindowsClient() && osversion.Build() < osversion.RS5 { // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. return fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } daemon.defaultIsolation = containertypes.Isolation("process") } default: return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) } } logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) return nil } func setupDaemonProcess(config *config.Config) error { return nil } func (daemon *Daemon) setupSeccompProfile() error { return nil } func (daemon *Daemon) loadRuntimes() error { return nil } func (daemon *Daemon) initRuntimes(_ map[string]types.Runtime) error { return nil } func setupResolvConf(config *config.Config) { } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { return sysinfo.New() }
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "math" "path/filepath" "runtime" "strings" "github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim/osversion" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/libcontainerd/local" "github.com/docker/docker/libcontainerd/remote" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/datastore" winlibnetwork "github.com/docker/docker/libnetwork/drivers/windows" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/options" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/platform" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" ) const ( isWindows = true platformSupported = true windowsMinCPUShares = 1 windowsMaxCPUShares = 10000 windowsMinCPUPercent = 1 windowsMaxCPUPercent = 100 windowsV1RuntimeName = "com.docker.hcsshim.v1" windowsV2RuntimeName = "io.containerd.runhcs.v1" ) // Windows containers are much larger than Linux containers and each of them // have > 20 system processes which why we use much smaller parallelism value. func adjustParallelLimit(n int, limit int) int { return int(math.Max(1, math.Floor(float64(runtime.NumCPU())*.8))) } // Windows has no concept of an execution state directory. So use config.Root here. func getPluginExecRoot(root string) string { return filepath.Join(root, "plugins") } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { return nil } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return nil } func checkKernel() error { return nil } func (daemon *Daemon) getCgroupDriver() string { return "" } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig == nil { return nil } return nil } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, isHyperv bool) (warnings []string, err error) { fixMemorySwappiness(resources) if !isHyperv { // The processor resource controls are mutually exclusive on // Windows Server Containers, the order of precedence is // CPUCount first, then CPUShares, and CPUPercent last. if resources.CPUCount > 0 { if resources.CPUShares > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") resources.CPUShares = 0 } if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } else if resources.CPUShares > 0 { if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } } if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) } if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) } if resources.CPUCount < 0 { return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") } if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUShares > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") } // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.NanoCPUs > 0 && isHyperv && osversion.Build() < osversion.RS3 { leftoverNanoCPUs := resources.NanoCPUs % 1e9 if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) warnings = append(warnings, warningString) } } if len(resources.BlkioDeviceReadBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") } if len(resources.BlkioDeviceReadIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") } if len(resources.BlkioDeviceWriteBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") } if len(resources.BlkioDeviceWriteIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") } if resources.BlkioWeight > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") } if len(resources.BlkioWeightDevice) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") } if resources.CgroupParent != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") } if resources.CPUPeriod != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") } if resources.CpusetCpus != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") } if resources.CpusetMems != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") } if resources.KernelMemory != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") } if resources.MemoryReservation != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") } if resources.MemorySwap != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") } if resources.MemorySwappiness != nil { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") } if resources.OomKillDisable != nil && *resources.OomKillDisable { return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") } if resources.PidsLimit != nil && *resources.PidsLimit != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") } if len(resources.Ulimits) != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") } return warnings, nil } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } hyperv := daemon.runAsHyperVContainer(hostConfig) // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // Prior to RS5, we don't allow process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. if !hyperv && system.IsWindowsClient() && osversion.Build() < osversion.RS5 { return warnings, fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } w, err := verifyPlatformContainerResources(&hostConfig.Resources, hyperv) warnings = append(warnings, w...) return warnings, err } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(config *config.Config) error { return nil } // checkSystem validates platform-specific requirements func checkSystem() error { // Validate the OS version. Note that dockerd.exe must be manifested for this // call to return the correct version. if osversion.Get().MajorVersion < 10 { return fmt.Errorf("This version of Windows does not support the docker daemon") } if osversion.Build() < osversion.RS1 { return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") } vmcompute := windows.NewLazySystemDLL("vmcompute.dll") if vmcompute.Load() != nil { return fmt.Errorf("failed to load vmcompute.dll, ensure that the Containers feature is installed") } // Ensure that the required Host Network Service and vmcompute services // are running. Docker will fail in unexpected ways if this is not present. var requiredServices = []string{"hns", "vmcompute"} if err := ensureServicesInstalled(requiredServices); err != nil { return errors.Wrap(err, "a required service is not installed, ensure the Containers feature is installed") } return nil } func ensureServicesInstalled(services []string) error { m, err := mgr.Connect() if err != nil { return err } defer m.Disconnect() for _, service := range services { s, err := m.OpenService(service) if err != nil { return errors.Wrapf(err, "failed to open service %s", service) } s.Close() } return nil } // configureKernelSecuritySupport configures and validate security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { return nil } // configureMaxThreads sets the Go runtime max threads threshold func configureMaxThreads(config *config.Config) error { return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, nil, nil) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") if err != nil { return nil, err } // Remove networks not present in HNS for _, v := range controller.Networks() { options := v.Info().DriverOptions() hnsid := options[winlibnetwork.HNSID] found := false for _, v := range hnsresponse { if v.Id == hnsid { found = true break } } if !found { // global networks should not be deleted by local HNS if v.Info().Scope() != datastore.GlobalScope { err = v.Delete() if err != nil { logrus.Errorf("Error occurred when removing network %v", err) } } } } _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) if err != nil { return nil, err } defaultNetworkExists := false if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { options := network.Info().DriverOptions() for _, v := range hnsresponse { if options[winlibnetwork.HNSID] == v.Id { defaultNetworkExists = true break } } } // discover and add HNS networks to windows // network that exist are removed and added again for _, v := range hnsresponse { networkTypeNorm := strings.ToLower(v.Type) if networkTypeNorm == "private" || networkTypeNorm == "internal" { continue // workaround for HNS reporting unsupported networks } var n libnetwork.Network s := func(current libnetwork.Network) bool { options := current.Info().DriverOptions() if options[winlibnetwork.HNSID] == v.Id { n = current return true } return false } controller.WalkNetworks(s) drvOptions := make(map[string]string) nid := "" if n != nil { nid = n.ID() // global networks should not be deleted by local HNS if n.Info().Scope() == datastore.GlobalScope { continue } v.Name = n.Name() // This will not cause network delete from HNS as the network // is not yet populated in the libnetwork windows driver // restore option if it existed before drvOptions = n.Info().DriverOptions() n.Delete() } netOption := map[string]string{ winlibnetwork.NetworkName: v.Name, winlibnetwork.HNSID: v.Id, } // add persisted driver options for k, v := range drvOptions { if k != winlibnetwork.NetworkName && k != winlibnetwork.HNSID { netOption[k] = v } } v4Conf := []*libnetwork.IpamConf{} for _, subnet := range v.Subnets { ipamV4Conf := libnetwork.IpamConf{} ipamV4Conf.PreferredPool = subnet.AddressPrefix ipamV4Conf.Gateway = subnet.GatewayAddress v4Conf = append(v4Conf, &ipamV4Conf) } name := v.Name // If there is no nat network create one from the first NAT network // encountered if it doesn't already exist if !defaultNetworkExists && runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && n == nil { name = runconfig.DefaultDaemonNetworkMode().NetworkName() defaultNetworkExists = true } v6Conf := []*libnetwork.IpamConf{} _, err := controller.NewNetwork(strings.ToLower(v.Type), name, nid, libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), ) if err != nil { logrus.Errorf("Error occurred when creating network %v", err) } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } return controller, nil } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { return nil } netOption := map[string]string{ winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), } var ipamOption libnetwork.NetworkOption var subnetPrefix string if config.BridgeConfig.FixedCIDR != "" { subnetPrefix = config.BridgeConfig.FixedCIDR } if subnetPrefix != "" { ipamV4Conf := libnetwork.IpamConf{PreferredPool: subnetPrefix} v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) } _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), ipamOption, ) if err != nil { return fmt.Errorf("Error creating default network: %v", err) } return nil } // registerLinks sets up links between containers and writes the // configuration out for persistence. As of Windows TP4, links are not supported. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { return nil } func (daemon *Daemon) cleanupMountsByID(in string) error { return nil } func (daemon *Daemon) cleanupMounts() error { return nil } func recursiveUnmount(_ string) error { return nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools.Identity) error { config.Root = rootDir // Create the root directory if it doesn't exists if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil { return err } return nil } // runasHyperVContainer returns true if we are going to run as a Hyper-V container func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { if hostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration return daemon.defaultIsolation.IsHyperV() } // Container is requesting an isolation mode. Honour it. return hostConfig.Isolation.IsHyperV() } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not mount if a Hyper-V container as it needs to be mounted inside the // utility VM, not the host. return nil } return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not unmount if a Hyper-V container return nil } return daemon.Unmount(container) } func driverOptions(config *config.Config) []nwconfig.Option { return []nwconfig.Option{} } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } // Obtain the stats from HCS via libcontainerd stats, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } // Start with an empty structure s := &types.StatsJSON{} s.Stats.Read = stats.Read s.Stats.NumProcs = platform.NumProcs() if stats.HCSStats != nil { hcss := stats.HCSStats // Populate the CPU/processor statistics s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: hcss.Processor.TotalRuntime100ns, UsageInKernelmode: hcss.Processor.RuntimeKernel100ns, UsageInUsermode: hcss.Processor.RuntimeUser100ns, }, } // Populate the memory statistics s.MemoryStats = types.MemoryStats{ Commit: hcss.Memory.UsageCommitBytes, CommitPeak: hcss.Memory.UsageCommitPeakBytes, PrivateWorkingSet: hcss.Memory.UsagePrivateWorkingSetBytes, } // Populate the storage statistics s.StorageStats = types.StorageStats{ ReadCountNormalized: hcss.Storage.ReadCountNormalized, ReadSizeBytes: hcss.Storage.ReadSizeBytes, WriteCountNormalized: hcss.Storage.WriteCountNormalized, WriteSizeBytes: hcss.Storage.WriteSizeBytes, } // Populate the network statistics s.Networks = make(map[string]types.NetworkStats) for _, nstats := range hcss.Network { s.Networks[nstats.EndpointId] = types.NetworkStats{ RxBytes: nstats.BytesReceived, RxPackets: nstats.PacketsReceived, RxDropped: nstats.DroppedPacketsIncoming, TxBytes: nstats.BytesSent, TxPackets: nstats.PacketsSent, TxDropped: nstats.DroppedPacketsOutgoing, } } } return s, nil } // setDefaultIsolation determine the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("process") // On client SKUs, default to Hyper-V. @engine maintainers. This // should not be removed. Ping Microsoft folks is there are PRs to // to change this. if system.IsWindowsClient() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } for _, option := range daemon.configStore.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return err } key = strings.ToLower(key) switch key { case "isolation": if !containertypes.Isolation(val).IsValid() { return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) } if containertypes.Isolation(val).IsHyperV() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } if containertypes.Isolation(val).IsProcess() { if system.IsWindowsClient() && osversion.Build() < osversion.RS5 { // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. return fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } daemon.defaultIsolation = containertypes.Isolation("process") } default: return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) } } logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) return nil } func setupDaemonProcess(config *config.Config) error { return nil } func (daemon *Daemon) setupSeccompProfile() error { return nil } func (daemon *Daemon) loadRuntimes() error { return nil } func (daemon *Daemon) initRuntimes(_ map[string]types.Runtime) error { return nil } func setupResolvConf(config *config.Config) { } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { return sysinfo.New() } func (daemon *Daemon) initLibcontainerd(ctx context.Context) error { var err error rt := daemon.configStore.GetDefaultRuntimeName() if rt == "" { if daemon.configStore.ContainerdAddr == "" { rt = windowsV1RuntimeName } else { rt = windowsV2RuntimeName } } switch rt { case windowsV1RuntimeName: daemon.containerd, err = local.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) case windowsV2RuntimeName: if daemon.configStore.ContainerdAddr == "" { return fmt.Errorf("cannot use the specified runtime %q without containerd", rt) } daemon.containerd, err = remote.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) default: return fmt.Errorf("unknown windows runtime %s", rt) } return err }
cpuguy83
8dd2a3ca50faf06fc84394d415113fb802e7452c
ed83e2e20e81ed8da26ee0ef84d70faf0ee49d21
That said, even on Linux we don't default a socket here. On Windows we just don't start a containerd process like we do on Linux.
cpuguy83
4,908
moby/moby
42,089
Allow switching Windows runtimes.
This adds support for 2 runtimes on Windows, one that uses the built-in HCSv1 integration and another which uses containerd with the runhcs shim. --- Related to #41455
null
2021-02-26 23:26:59+00:00
2021-09-23 19:08:07+00:00
daemon/daemon_windows.go
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "math" "path/filepath" "runtime" "strings" "github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim/osversion" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/datastore" winlibnetwork "github.com/docker/docker/libnetwork/drivers/windows" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/options" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/platform" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" ) const ( isWindows = true platformSupported = true windowsMinCPUShares = 1 windowsMaxCPUShares = 10000 windowsMinCPUPercent = 1 windowsMaxCPUPercent = 100 ) // Windows containers are much larger than Linux containers and each of them // have > 20 system processes which why we use much smaller parallelism value. func adjustParallelLimit(n int, limit int) int { return int(math.Max(1, math.Floor(float64(runtime.NumCPU())*.8))) } // Windows has no concept of an execution state directory. So use config.Root here. func getPluginExecRoot(root string) string { return filepath.Join(root, "plugins") } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { return nil } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return nil } func checkKernel() error { return nil } func (daemon *Daemon) getCgroupDriver() string { return "" } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig == nil { return nil } return nil } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, isHyperv bool) (warnings []string, err error) { fixMemorySwappiness(resources) if !isHyperv { // The processor resource controls are mutually exclusive on // Windows Server Containers, the order of precedence is // CPUCount first, then CPUShares, and CPUPercent last. if resources.CPUCount > 0 { if resources.CPUShares > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") resources.CPUShares = 0 } if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } else if resources.CPUShares > 0 { if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } } if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) } if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) } if resources.CPUCount < 0 { return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") } if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUShares > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") } // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.NanoCPUs > 0 && isHyperv && osversion.Build() < osversion.RS3 { leftoverNanoCPUs := resources.NanoCPUs % 1e9 if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) warnings = append(warnings, warningString) } } if len(resources.BlkioDeviceReadBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") } if len(resources.BlkioDeviceReadIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") } if len(resources.BlkioDeviceWriteBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") } if len(resources.BlkioDeviceWriteIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") } if resources.BlkioWeight > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") } if len(resources.BlkioWeightDevice) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") } if resources.CgroupParent != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") } if resources.CPUPeriod != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") } if resources.CpusetCpus != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") } if resources.CpusetMems != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") } if resources.KernelMemory != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") } if resources.MemoryReservation != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") } if resources.MemorySwap != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") } if resources.MemorySwappiness != nil { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") } if resources.OomKillDisable != nil && *resources.OomKillDisable { return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") } if resources.PidsLimit != nil && *resources.PidsLimit != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") } if len(resources.Ulimits) != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") } return warnings, nil } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } hyperv := daemon.runAsHyperVContainer(hostConfig) // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // Prior to RS5, we don't allow process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. if !hyperv && system.IsWindowsClient() && osversion.Build() < osversion.RS5 { return warnings, fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } w, err := verifyPlatformContainerResources(&hostConfig.Resources, hyperv) warnings = append(warnings, w...) return warnings, err } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(config *config.Config) error { return nil } // checkSystem validates platform-specific requirements func checkSystem() error { // Validate the OS version. Note that dockerd.exe must be manifested for this // call to return the correct version. if osversion.Get().MajorVersion < 10 { return fmt.Errorf("This version of Windows does not support the docker daemon") } if osversion.Build() < osversion.RS1 { return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") } vmcompute := windows.NewLazySystemDLL("vmcompute.dll") if vmcompute.Load() != nil { return fmt.Errorf("failed to load vmcompute.dll, ensure that the Containers feature is installed") } // Ensure that the required Host Network Service and vmcompute services // are running. Docker will fail in unexpected ways if this is not present. var requiredServices = []string{"hns", "vmcompute"} if err := ensureServicesInstalled(requiredServices); err != nil { return errors.Wrap(err, "a required service is not installed, ensure the Containers feature is installed") } return nil } func ensureServicesInstalled(services []string) error { m, err := mgr.Connect() if err != nil { return err } defer m.Disconnect() for _, service := range services { s, err := m.OpenService(service) if err != nil { return errors.Wrapf(err, "failed to open service %s", service) } s.Close() } return nil } // configureKernelSecuritySupport configures and validate security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { return nil } // configureMaxThreads sets the Go runtime max threads threshold func configureMaxThreads(config *config.Config) error { return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, nil, nil) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") if err != nil { return nil, err } // Remove networks not present in HNS for _, v := range controller.Networks() { options := v.Info().DriverOptions() hnsid := options[winlibnetwork.HNSID] found := false for _, v := range hnsresponse { if v.Id == hnsid { found = true break } } if !found { // global networks should not be deleted by local HNS if v.Info().Scope() != datastore.GlobalScope { err = v.Delete() if err != nil { logrus.Errorf("Error occurred when removing network %v", err) } } } } _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) if err != nil { return nil, err } defaultNetworkExists := false if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { options := network.Info().DriverOptions() for _, v := range hnsresponse { if options[winlibnetwork.HNSID] == v.Id { defaultNetworkExists = true break } } } // discover and add HNS networks to windows // network that exist are removed and added again for _, v := range hnsresponse { networkTypeNorm := strings.ToLower(v.Type) if networkTypeNorm == "private" || networkTypeNorm == "internal" { continue // workaround for HNS reporting unsupported networks } var n libnetwork.Network s := func(current libnetwork.Network) bool { options := current.Info().DriverOptions() if options[winlibnetwork.HNSID] == v.Id { n = current return true } return false } controller.WalkNetworks(s) drvOptions := make(map[string]string) nid := "" if n != nil { nid = n.ID() // global networks should not be deleted by local HNS if n.Info().Scope() == datastore.GlobalScope { continue } v.Name = n.Name() // This will not cause network delete from HNS as the network // is not yet populated in the libnetwork windows driver // restore option if it existed before drvOptions = n.Info().DriverOptions() n.Delete() } netOption := map[string]string{ winlibnetwork.NetworkName: v.Name, winlibnetwork.HNSID: v.Id, } // add persisted driver options for k, v := range drvOptions { if k != winlibnetwork.NetworkName && k != winlibnetwork.HNSID { netOption[k] = v } } v4Conf := []*libnetwork.IpamConf{} for _, subnet := range v.Subnets { ipamV4Conf := libnetwork.IpamConf{} ipamV4Conf.PreferredPool = subnet.AddressPrefix ipamV4Conf.Gateway = subnet.GatewayAddress v4Conf = append(v4Conf, &ipamV4Conf) } name := v.Name // If there is no nat network create one from the first NAT network // encountered if it doesn't already exist if !defaultNetworkExists && runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && n == nil { name = runconfig.DefaultDaemonNetworkMode().NetworkName() defaultNetworkExists = true } v6Conf := []*libnetwork.IpamConf{} _, err := controller.NewNetwork(strings.ToLower(v.Type), name, nid, libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), ) if err != nil { logrus.Errorf("Error occurred when creating network %v", err) } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } return controller, nil } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { return nil } netOption := map[string]string{ winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), } var ipamOption libnetwork.NetworkOption var subnetPrefix string if config.BridgeConfig.FixedCIDR != "" { subnetPrefix = config.BridgeConfig.FixedCIDR } if subnetPrefix != "" { ipamV4Conf := libnetwork.IpamConf{PreferredPool: subnetPrefix} v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) } _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), ipamOption, ) if err != nil { return fmt.Errorf("Error creating default network: %v", err) } return nil } // registerLinks sets up links between containers and writes the // configuration out for persistence. As of Windows TP4, links are not supported. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { return nil } func (daemon *Daemon) cleanupMountsByID(in string) error { return nil } func (daemon *Daemon) cleanupMounts() error { return nil } func recursiveUnmount(_ string) error { return nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools.Identity) error { config.Root = rootDir // Create the root directory if it doesn't exists if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil { return err } return nil } // runasHyperVContainer returns true if we are going to run as a Hyper-V container func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { if hostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration return daemon.defaultIsolation.IsHyperV() } // Container is requesting an isolation mode. Honour it. return hostConfig.Isolation.IsHyperV() } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not mount if a Hyper-V container as it needs to be mounted inside the // utility VM, not the host. return nil } return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not unmount if a Hyper-V container return nil } return daemon.Unmount(container) } func driverOptions(config *config.Config) []nwconfig.Option { return []nwconfig.Option{} } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } // Obtain the stats from HCS via libcontainerd stats, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } // Start with an empty structure s := &types.StatsJSON{} s.Stats.Read = stats.Read s.Stats.NumProcs = platform.NumProcs() if stats.HCSStats != nil { hcss := stats.HCSStats // Populate the CPU/processor statistics s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: hcss.Processor.TotalRuntime100ns, UsageInKernelmode: hcss.Processor.RuntimeKernel100ns, UsageInUsermode: hcss.Processor.RuntimeUser100ns, }, } // Populate the memory statistics s.MemoryStats = types.MemoryStats{ Commit: hcss.Memory.UsageCommitBytes, CommitPeak: hcss.Memory.UsageCommitPeakBytes, PrivateWorkingSet: hcss.Memory.UsagePrivateWorkingSetBytes, } // Populate the storage statistics s.StorageStats = types.StorageStats{ ReadCountNormalized: hcss.Storage.ReadCountNormalized, ReadSizeBytes: hcss.Storage.ReadSizeBytes, WriteCountNormalized: hcss.Storage.WriteCountNormalized, WriteSizeBytes: hcss.Storage.WriteSizeBytes, } // Populate the network statistics s.Networks = make(map[string]types.NetworkStats) for _, nstats := range hcss.Network { s.Networks[nstats.EndpointId] = types.NetworkStats{ RxBytes: nstats.BytesReceived, RxPackets: nstats.PacketsReceived, RxDropped: nstats.DroppedPacketsIncoming, TxBytes: nstats.BytesSent, TxPackets: nstats.PacketsSent, TxDropped: nstats.DroppedPacketsOutgoing, } } } return s, nil } // setDefaultIsolation determine the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("process") // On client SKUs, default to Hyper-V. @engine maintainers. This // should not be removed. Ping Microsoft folks is there are PRs to // to change this. if system.IsWindowsClient() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } for _, option := range daemon.configStore.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return err } key = strings.ToLower(key) switch key { case "isolation": if !containertypes.Isolation(val).IsValid() { return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) } if containertypes.Isolation(val).IsHyperV() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } if containertypes.Isolation(val).IsProcess() { if system.IsWindowsClient() && osversion.Build() < osversion.RS5 { // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. return fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } daemon.defaultIsolation = containertypes.Isolation("process") } default: return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) } } logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) return nil } func setupDaemonProcess(config *config.Config) error { return nil } func (daemon *Daemon) setupSeccompProfile() error { return nil } func (daemon *Daemon) loadRuntimes() error { return nil } func (daemon *Daemon) initRuntimes(_ map[string]types.Runtime) error { return nil } func setupResolvConf(config *config.Config) { } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { return sysinfo.New() }
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "math" "path/filepath" "runtime" "strings" "github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim/osversion" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/daemon/config" "github.com/docker/docker/libcontainerd/local" "github.com/docker/docker/libcontainerd/remote" "github.com/docker/docker/libnetwork" nwconfig "github.com/docker/docker/libnetwork/config" "github.com/docker/docker/libnetwork/datastore" winlibnetwork "github.com/docker/docker/libnetwork/drivers/windows" "github.com/docker/docker/libnetwork/netlabel" "github.com/docker/docker/libnetwork/options" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/platform" "github.com/docker/docker/pkg/sysinfo" "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc/mgr" ) const ( isWindows = true platformSupported = true windowsMinCPUShares = 1 windowsMaxCPUShares = 10000 windowsMinCPUPercent = 1 windowsMaxCPUPercent = 100 windowsV1RuntimeName = "com.docker.hcsshim.v1" windowsV2RuntimeName = "io.containerd.runhcs.v1" ) // Windows containers are much larger than Linux containers and each of them // have > 20 system processes which why we use much smaller parallelism value. func adjustParallelLimit(n int, limit int) int { return int(math.Max(1, math.Floor(float64(runtime.NumCPU())*.8))) } // Windows has no concept of an execution state directory. So use config.Root here. func getPluginExecRoot(root string) string { return filepath.Join(root, "plugins") } func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { return parseSecurityOpt(container, hostConfig) } func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { return nil } func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error { return nil } func checkKernel() error { return nil } func (daemon *Daemon) getCgroupDriver() string { return "" } // adaptContainerSettings is called during container creation to modify any // settings necessary in the HostConfig structure. func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { if hostConfig == nil { return nil } return nil } // verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration func verifyPlatformContainerResources(resources *containertypes.Resources, isHyperv bool) (warnings []string, err error) { fixMemorySwappiness(resources) if !isHyperv { // The processor resource controls are mutually exclusive on // Windows Server Containers, the order of precedence is // CPUCount first, then CPUShares, and CPUPercent last. if resources.CPUCount > 0 { if resources.CPUShares > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") resources.CPUShares = 0 } if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } else if resources.CPUShares > 0 { if resources.CPUPercent > 0 { warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") resources.CPUPercent = 0 } } } if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) } if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) } if resources.CPUCount < 0 { return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") } if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") } if resources.NanoCPUs > 0 && resources.CPUShares > 0 { return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") } // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) } if resources.NanoCPUs > 0 && isHyperv && osversion.Build() < osversion.RS3 { leftoverNanoCPUs := resources.NanoCPUs % 1e9 if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) warnings = append(warnings, warningString) } } if len(resources.BlkioDeviceReadBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") } if len(resources.BlkioDeviceReadIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") } if len(resources.BlkioDeviceWriteBps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") } if len(resources.BlkioDeviceWriteIOps) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") } if resources.BlkioWeight > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") } if len(resources.BlkioWeightDevice) > 0 { return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") } if resources.CgroupParent != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") } if resources.CPUPeriod != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") } if resources.CpusetCpus != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") } if resources.CpusetMems != "" { return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") } if resources.KernelMemory != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") } if resources.MemoryReservation != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") } if resources.MemorySwap != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") } if resources.MemorySwappiness != nil { return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") } if resources.OomKillDisable != nil && *resources.OomKillDisable { return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") } if resources.PidsLimit != nil && *resources.PidsLimit != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") } if len(resources.Ulimits) != 0 { return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") } return warnings, nil } // verifyPlatformContainerSettings performs platform-specific validation of the // hostconfig and config structures. func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) { if hostConfig == nil { return nil, nil } hyperv := daemon.runAsHyperVContainer(hostConfig) // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // Prior to RS5, we don't allow process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. if !hyperv && system.IsWindowsClient() && osversion.Build() < osversion.RS5 { return warnings, fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } w, err := verifyPlatformContainerResources(&hostConfig.Resources, hyperv) warnings = append(warnings, w...) return warnings, err } // verifyDaemonSettings performs validation of daemon config struct func verifyDaemonSettings(config *config.Config) error { return nil } // checkSystem validates platform-specific requirements func checkSystem() error { // Validate the OS version. Note that dockerd.exe must be manifested for this // call to return the correct version. if osversion.Get().MajorVersion < 10 { return fmt.Errorf("This version of Windows does not support the docker daemon") } if osversion.Build() < osversion.RS1 { return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") } vmcompute := windows.NewLazySystemDLL("vmcompute.dll") if vmcompute.Load() != nil { return fmt.Errorf("failed to load vmcompute.dll, ensure that the Containers feature is installed") } // Ensure that the required Host Network Service and vmcompute services // are running. Docker will fail in unexpected ways if this is not present. var requiredServices = []string{"hns", "vmcompute"} if err := ensureServicesInstalled(requiredServices); err != nil { return errors.Wrap(err, "a required service is not installed, ensure the Containers feature is installed") } return nil } func ensureServicesInstalled(services []string) error { m, err := mgr.Connect() if err != nil { return err } defer m.Disconnect() for _, service := range services { s, err := m.OpenService(service) if err != nil { return errors.Wrapf(err, "failed to open service %s", service) } s.Close() } return nil } // configureKernelSecuritySupport configures and validate security support for the kernel func configureKernelSecuritySupport(config *config.Config, driverName string) error { return nil } // configureMaxThreads sets the Go runtime max threads threshold func configureMaxThreads(config *config.Config) error { return nil } func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { netOptions, err := daemon.networkOptions(config, nil, nil) if err != nil { return nil, err } controller, err := libnetwork.New(netOptions...) if err != nil { return nil, fmt.Errorf("error obtaining controller instance: %v", err) } hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") if err != nil { return nil, err } // Remove networks not present in HNS for _, v := range controller.Networks() { options := v.Info().DriverOptions() hnsid := options[winlibnetwork.HNSID] found := false for _, v := range hnsresponse { if v.Id == hnsid { found = true break } } if !found { // global networks should not be deleted by local HNS if v.Info().Scope() != datastore.GlobalScope { err = v.Delete() if err != nil { logrus.Errorf("Error occurred when removing network %v", err) } } } } _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) if err != nil { return nil, err } defaultNetworkExists := false if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { options := network.Info().DriverOptions() for _, v := range hnsresponse { if options[winlibnetwork.HNSID] == v.Id { defaultNetworkExists = true break } } } // discover and add HNS networks to windows // network that exist are removed and added again for _, v := range hnsresponse { networkTypeNorm := strings.ToLower(v.Type) if networkTypeNorm == "private" || networkTypeNorm == "internal" { continue // workaround for HNS reporting unsupported networks } var n libnetwork.Network s := func(current libnetwork.Network) bool { options := current.Info().DriverOptions() if options[winlibnetwork.HNSID] == v.Id { n = current return true } return false } controller.WalkNetworks(s) drvOptions := make(map[string]string) nid := "" if n != nil { nid = n.ID() // global networks should not be deleted by local HNS if n.Info().Scope() == datastore.GlobalScope { continue } v.Name = n.Name() // This will not cause network delete from HNS as the network // is not yet populated in the libnetwork windows driver // restore option if it existed before drvOptions = n.Info().DriverOptions() n.Delete() } netOption := map[string]string{ winlibnetwork.NetworkName: v.Name, winlibnetwork.HNSID: v.Id, } // add persisted driver options for k, v := range drvOptions { if k != winlibnetwork.NetworkName && k != winlibnetwork.HNSID { netOption[k] = v } } v4Conf := []*libnetwork.IpamConf{} for _, subnet := range v.Subnets { ipamV4Conf := libnetwork.IpamConf{} ipamV4Conf.PreferredPool = subnet.AddressPrefix ipamV4Conf.Gateway = subnet.GatewayAddress v4Conf = append(v4Conf, &ipamV4Conf) } name := v.Name // If there is no nat network create one from the first NAT network // encountered if it doesn't already exist if !defaultNetworkExists && runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && n == nil { name = runconfig.DefaultDaemonNetworkMode().NetworkName() defaultNetworkExists = true } v6Conf := []*libnetwork.IpamConf{} _, err := controller.NewNetwork(strings.ToLower(v.Type), name, nid, libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), ) if err != nil { logrus.Errorf("Error occurred when creating network %v", err) } } if !config.DisableBridge { // Initialize default driver "bridge" if err := initBridgeDriver(controller, config); err != nil { return nil, err } } return controller, nil } func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { return nil } netOption := map[string]string{ winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), } var ipamOption libnetwork.NetworkOption var subnetPrefix string if config.BridgeConfig.FixedCIDR != "" { subnetPrefix = config.BridgeConfig.FixedCIDR } if subnetPrefix != "" { ipamV4Conf := libnetwork.IpamConf{PreferredPool: subnetPrefix} v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} v6Conf := []*libnetwork.IpamConf{} ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) } _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", libnetwork.NetworkOptionGeneric(options.Generic{ netlabel.GenericData: netOption, }), ipamOption, ) if err != nil { return fmt.Errorf("Error creating default network: %v", err) } return nil } // registerLinks sets up links between containers and writes the // configuration out for persistence. As of Windows TP4, links are not supported. func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { return nil } func (daemon *Daemon) cleanupMountsByID(in string) error { return nil } func (daemon *Daemon) cleanupMounts() error { return nil } func recursiveUnmount(_ string) error { return nil } func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) { return &idtools.IdentityMapping{}, nil } func setupDaemonRoot(config *config.Config, rootDir string, rootIdentity idtools.Identity) error { config.Root = rootDir // Create the root directory if it doesn't exists if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil { return err } return nil } // runasHyperVContainer returns true if we are going to run as a Hyper-V container func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { if hostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration return daemon.defaultIsolation.IsHyperV() } // Container is requesting an isolation mode. Honour it. return hostConfig.Isolation.IsHyperV() } // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not mount if a Hyper-V container as it needs to be mounted inside the // utility VM, not the host. return nil } return daemon.Mount(container) } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { if daemon.runAsHyperVContainer(container.HostConfig) { // We do not unmount if a Hyper-V container return nil } return daemon.Unmount(container) } func driverOptions(config *config.Config) []nwconfig.Option { return []nwconfig.Option{} } func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { if !c.IsRunning() { return nil, errNotRunning(c.ID) } // Obtain the stats from HCS via libcontainerd stats, err := daemon.containerd.Stats(context.Background(), c.ID) if err != nil { if strings.Contains(err.Error(), "container not found") { return nil, containerNotFound(c.ID) } return nil, err } // Start with an empty structure s := &types.StatsJSON{} s.Stats.Read = stats.Read s.Stats.NumProcs = platform.NumProcs() if stats.HCSStats != nil { hcss := stats.HCSStats // Populate the CPU/processor statistics s.CPUStats = types.CPUStats{ CPUUsage: types.CPUUsage{ TotalUsage: hcss.Processor.TotalRuntime100ns, UsageInKernelmode: hcss.Processor.RuntimeKernel100ns, UsageInUsermode: hcss.Processor.RuntimeUser100ns, }, } // Populate the memory statistics s.MemoryStats = types.MemoryStats{ Commit: hcss.Memory.UsageCommitBytes, CommitPeak: hcss.Memory.UsageCommitPeakBytes, PrivateWorkingSet: hcss.Memory.UsagePrivateWorkingSetBytes, } // Populate the storage statistics s.StorageStats = types.StorageStats{ ReadCountNormalized: hcss.Storage.ReadCountNormalized, ReadSizeBytes: hcss.Storage.ReadSizeBytes, WriteCountNormalized: hcss.Storage.WriteCountNormalized, WriteSizeBytes: hcss.Storage.WriteSizeBytes, } // Populate the network statistics s.Networks = make(map[string]types.NetworkStats) for _, nstats := range hcss.Network { s.Networks[nstats.EndpointId] = types.NetworkStats{ RxBytes: nstats.BytesReceived, RxPackets: nstats.PacketsReceived, RxDropped: nstats.DroppedPacketsIncoming, TxBytes: nstats.BytesSent, TxPackets: nstats.PacketsSent, TxDropped: nstats.DroppedPacketsOutgoing, } } } return s, nil } // setDefaultIsolation determine the default isolation mode for the // daemon to run in. This is only applicable on Windows func (daemon *Daemon) setDefaultIsolation() error { daemon.defaultIsolation = containertypes.Isolation("process") // On client SKUs, default to Hyper-V. @engine maintainers. This // should not be removed. Ping Microsoft folks is there are PRs to // to change this. if system.IsWindowsClient() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } for _, option := range daemon.configStore.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return err } key = strings.ToLower(key) switch key { case "isolation": if !containertypes.Isolation(val).IsValid() { return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) } if containertypes.Isolation(val).IsHyperV() { daemon.defaultIsolation = containertypes.Isolation("hyperv") } if containertypes.Isolation(val).IsProcess() { if system.IsWindowsClient() && osversion.Build() < osversion.RS5 { // On RS5, we allow (but don't strictly support) process isolation on Client SKUs. // @engine maintainers. This block should not be removed. It partially enforces licensing // restrictions on Windows. Ping Microsoft folks if there are concerns or PRs to change this. return fmt.Errorf("Windows client operating systems earlier than version 1809 can only run Hyper-V containers") } daemon.defaultIsolation = containertypes.Isolation("process") } default: return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) } } logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) return nil } func setupDaemonProcess(config *config.Config) error { return nil } func (daemon *Daemon) setupSeccompProfile() error { return nil } func (daemon *Daemon) loadRuntimes() error { return nil } func (daemon *Daemon) initRuntimes(_ map[string]types.Runtime) error { return nil } func setupResolvConf(config *config.Config) { } // RawSysInfo returns *sysinfo.SysInfo . func (daemon *Daemon) RawSysInfo() *sysinfo.SysInfo { return sysinfo.New() } func (daemon *Daemon) initLibcontainerd(ctx context.Context) error { var err error rt := daemon.configStore.GetDefaultRuntimeName() if rt == "" { if daemon.configStore.ContainerdAddr == "" { rt = windowsV1RuntimeName } else { rt = windowsV2RuntimeName } } switch rt { case windowsV1RuntimeName: daemon.containerd, err = local.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) case windowsV2RuntimeName: if daemon.configStore.ContainerdAddr == "" { return fmt.Errorf("cannot use the specified runtime %q without containerd", rt) } daemon.containerd, err = remote.NewClient( ctx, daemon.containerdCli, filepath.Join(daemon.configStore.ExecRoot, "containerd"), daemon.configStore.ContainerdNamespace, daemon, ) default: return fmt.Errorf("unknown windows runtime %s", rt) } return err }
cpuguy83
8dd2a3ca50faf06fc84394d415113fb802e7452c
ed83e2e20e81ed8da26ee0ef84d70faf0ee49d21
Ahhh, got it; thanks for the added detail.
tianon
4,909
moby/moby
42,083
Enable `process_vm_readv` and `process_vm_writev` for kernel >= 4.8
These syscalls were disabled in #18971 due to them requiring CAP_PTRACE. CAP_PTRACE was blocked by default due to a ptrace related exploit. This has been patched in the Linux kernel (version 4.8) and thus `ptrace` has been re-enabled. However, these associated syscalls seem to have been left behind. This commit brings them in line with `ptrace`, and re-enables it for kernel > 4.8. **- What I did** Re-enabled `process_vm_readv` and `process_vm_writev` **- How I did it** Added them to the `ptrace` profile in the seccomp defaults. **- How to verify it** 1. Start a standard Docker container 2. Run a program that uses these functions (as root, or targeting a same-user process) **- Description for the changelog** Re-enabled `process_vm_readv` and `process_vm_writev` by default for kernel >= 4.8
null
2021-02-26 00:55:05+00:00
2021-03-09 11:07:47+00:00
profiles/seccomp/default.json
{ "defaultAction": "SCMP_ACT_ERRNO", "archMap": [ { "architecture": "SCMP_ARCH_X86_64", "subArchitectures": [ "SCMP_ARCH_X86", "SCMP_ARCH_X32" ] }, { "architecture": "SCMP_ARCH_AARCH64", "subArchitectures": [ "SCMP_ARCH_ARM" ] }, { "architecture": "SCMP_ARCH_MIPS64", "subArchitectures": [ "SCMP_ARCH_MIPS", "SCMP_ARCH_MIPS64N32" ] }, { "architecture": "SCMP_ARCH_MIPS64N32", "subArchitectures": [ "SCMP_ARCH_MIPS", "SCMP_ARCH_MIPS64" ] }, { "architecture": "SCMP_ARCH_MIPSEL64", "subArchitectures": [ "SCMP_ARCH_MIPSEL", "SCMP_ARCH_MIPSEL64N32" ] }, { "architecture": "SCMP_ARCH_MIPSEL64N32", "subArchitectures": [ "SCMP_ARCH_MIPSEL", "SCMP_ARCH_MIPSEL64" ] }, { "architecture": "SCMP_ARCH_S390X", "subArchitectures": [ "SCMP_ARCH_S390" ] } ], "syscalls": [ { "names": [ "accept", "accept4", "access", "adjtimex", "alarm", "bind", "brk", "capget", "capset", "chdir", "chmod", "chown", "chown32", "clock_adjtime", "clock_adjtime64", "clock_getres", "clock_getres_time64", "clock_gettime", "clock_gettime64", "clock_nanosleep", "clock_nanosleep_time64", "close", "close_range", "connect", "copy_file_range", "creat", "dup", "dup2", "dup3", "epoll_create", "epoll_create1", "epoll_ctl", "epoll_ctl_old", "epoll_pwait", "epoll_pwait2", "epoll_wait", "epoll_wait_old", "eventfd", "eventfd2", "execve", "execveat", "exit", "exit_group", "faccessat", "faccessat2", "fadvise64", "fadvise64_64", "fallocate", "fanotify_mark", "fchdir", "fchmod", "fchmodat", "fchown", "fchown32", "fchownat", "fcntl", "fcntl64", "fdatasync", "fgetxattr", "flistxattr", "flock", "fork", "fremovexattr", "fsetxattr", "fstat", "fstat64", "fstatat64", "fstatfs", "fstatfs64", "fsync", "ftruncate", "ftruncate64", "futex", "futex_time64", "futimesat", "getcpu", "getcwd", "getdents", "getdents64", "getegid", "getegid32", "geteuid", "geteuid32", "getgid", "getgid32", "getgroups", "getgroups32", "getitimer", "getpeername", "getpgid", "getpgrp", "getpid", "getppid", "getpriority", "getrandom", "getresgid", "getresgid32", "getresuid", "getresuid32", "getrlimit", "get_robust_list", "getrusage", "getsid", "getsockname", "getsockopt", "get_thread_area", "gettid", "gettimeofday", "getuid", "getuid32", "getxattr", "inotify_add_watch", "inotify_init", "inotify_init1", "inotify_rm_watch", "io_cancel", "ioctl", "io_destroy", "io_getevents", "io_pgetevents", "io_pgetevents_time64", "ioprio_get", "ioprio_set", "io_setup", "io_submit", "io_uring_enter", "io_uring_register", "io_uring_setup", "ipc", "kill", "lchown", "lchown32", "lgetxattr", "link", "linkat", "listen", "listxattr", "llistxattr", "_llseek", "lremovexattr", "lseek", "lsetxattr", "lstat", "lstat64", "madvise", "membarrier", "memfd_create", "mincore", "mkdir", "mkdirat", "mknod", "mknodat", "mlock", "mlock2", "mlockall", "mmap", "mmap2", "mprotect", "mq_getsetattr", "mq_notify", "mq_open", "mq_timedreceive", "mq_timedreceive_time64", "mq_timedsend", "mq_timedsend_time64", "mq_unlink", "mremap", "msgctl", "msgget", "msgrcv", "msgsnd", "msync", "munlock", "munlockall", "munmap", "nanosleep", "newfstatat", "_newselect", "open", "openat", "openat2", "pause", "pidfd_open", "pidfd_send_signal", "pipe", "pipe2", "poll", "ppoll", "ppoll_time64", "prctl", "pread64", "preadv", "preadv2", "prlimit64", "pselect6", "pselect6_time64", "pwrite64", "pwritev", "pwritev2", "read", "readahead", "readlink", "readlinkat", "readv", "recv", "recvfrom", "recvmmsg", "recvmmsg_time64", "recvmsg", "remap_file_pages", "removexattr", "rename", "renameat", "renameat2", "restart_syscall", "rmdir", "rseq", "rt_sigaction", "rt_sigpending", "rt_sigprocmask", "rt_sigqueueinfo", "rt_sigreturn", "rt_sigsuspend", "rt_sigtimedwait", "rt_sigtimedwait_time64", "rt_tgsigqueueinfo", "sched_getaffinity", "sched_getattr", "sched_getparam", "sched_get_priority_max", "sched_get_priority_min", "sched_getscheduler", "sched_rr_get_interval", "sched_rr_get_interval_time64", "sched_setaffinity", "sched_setattr", "sched_setparam", "sched_setscheduler", "sched_yield", "seccomp", "select", "semctl", "semget", "semop", "semtimedop", "semtimedop_time64", "send", "sendfile", "sendfile64", "sendmmsg", "sendmsg", "sendto", "setfsgid", "setfsgid32", "setfsuid", "setfsuid32", "setgid", "setgid32", "setgroups", "setgroups32", "setitimer", "setpgid", "setpriority", "setregid", "setregid32", "setresgid", "setresgid32", "setresuid", "setresuid32", "setreuid", "setreuid32", "setrlimit", "set_robust_list", "setsid", "setsockopt", "set_thread_area", "set_tid_address", "setuid", "setuid32", "setxattr", "shmat", "shmctl", "shmdt", "shmget", "shutdown", "sigaltstack", "signalfd", "signalfd4", "sigprocmask", "sigreturn", "socket", "socketcall", "socketpair", "splice", "stat", "stat64", "statfs", "statfs64", "statx", "symlink", "symlinkat", "sync", "sync_file_range", "syncfs", "sysinfo", "tee", "tgkill", "time", "timer_create", "timer_delete", "timer_getoverrun", "timer_gettime", "timer_gettime64", "timer_settime", "timer_settime64", "timerfd_create", "timerfd_gettime", "timerfd_gettime64", "timerfd_settime", "timerfd_settime64", "times", "tkill", "truncate", "truncate64", "ugetrlimit", "umask", "uname", "unlink", "unlinkat", "utime", "utimensat", "utimensat_time64", "utimes", "vfork", "vmsplice", "wait4", "waitid", "waitpid", "write", "writev" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": {}, "excludes": {} }, { "names": [ "ptrace" ], "action": "SCMP_ACT_ALLOW", "args": null, "comment": "", "includes": { "minKernel": "4.8" }, "excludes": {} }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 0, "op": "SCMP_CMP_EQ" } ], "comment": "", "includes": {}, "excludes": {} }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 8, "op": "SCMP_CMP_EQ" } ], "comment": "", "includes": {}, "excludes": {} }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 131072, "op": "SCMP_CMP_EQ" } ], "comment": "", "includes": {}, "excludes": {} }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 131080, "op": "SCMP_CMP_EQ" } ], "comment": "", "includes": {}, "excludes": {} }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 4294967295, "op": "SCMP_CMP_EQ" } ], "comment": "", "includes": {}, "excludes": {} }, { "names": [ "sync_file_range2" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "arches": [ "ppc64le" ] }, "excludes": {} }, { "names": [ "arm_fadvise64_64", "arm_sync_file_range", "sync_file_range2", "breakpoint", "cacheflush", "set_tls" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "arches": [ "arm", "arm64" ] }, "excludes": {} }, { "names": [ "arch_prctl" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "arches": [ "amd64", "x32" ] }, "excludes": {} }, { "names": [ "modify_ldt" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "arches": [ "amd64", "x32", "x86" ] }, "excludes": {} }, { "names": [ "s390_pci_mmio_read", "s390_pci_mmio_write", "s390_runtime_instr" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "arches": [ "s390", "s390x" ] }, "excludes": {} }, { "names": [ "open_by_handle_at" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_DAC_READ_SEARCH" ] }, "excludes": {} }, { "names": [ "bpf", "clone", "fanotify_init", "fsconfig", "fsmount", "fsopen", "fspick", "lookup_dcookie", "mount", "move_mount", "name_to_handle_at", "open_tree", "perf_event_open", "quotactl", "setdomainname", "sethostname", "setns", "syslog", "umount", "umount2", "unshare" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_ADMIN" ] }, "excludes": {} }, { "names": [ "clone" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 2114060288, "op": "SCMP_CMP_MASKED_EQ" } ], "comment": "", "includes": {}, "excludes": { "caps": [ "CAP_SYS_ADMIN" ], "arches": [ "s390", "s390x" ] } }, { "names": [ "clone" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 1, "value": 2114060288, "op": "SCMP_CMP_MASKED_EQ" } ], "comment": "s390 parameter ordering for clone is different", "includes": { "arches": [ "s390", "s390x" ] }, "excludes": { "caps": [ "CAP_SYS_ADMIN" ] } }, { "names": [ "reboot" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_BOOT" ] }, "excludes": {} }, { "names": [ "chroot" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_CHROOT" ] }, "excludes": {} }, { "names": [ "delete_module", "init_module", "finit_module" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_MODULE" ] }, "excludes": {} }, { "names": [ "acct" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_PACCT" ] }, "excludes": {} }, { "names": [ "kcmp", "pidfd_getfd", "process_madvise", "process_vm_readv", "process_vm_writev", "ptrace" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_PTRACE" ] }, "excludes": {} }, { "names": [ "iopl", "ioperm" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_RAWIO" ] }, "excludes": {} }, { "names": [ "settimeofday", "stime", "clock_settime" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_TIME" ] }, "excludes": {} }, { "names": [ "vhangup" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_TTY_CONFIG" ] }, "excludes": {} }, { "names": [ "get_mempolicy", "mbind", "set_mempolicy" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_NICE" ] }, "excludes": {} }, { "names": [ "syslog" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYSLOG" ] }, "excludes": {} } ] }
{ "defaultAction": "SCMP_ACT_ERRNO", "archMap": [ { "architecture": "SCMP_ARCH_X86_64", "subArchitectures": [ "SCMP_ARCH_X86", "SCMP_ARCH_X32" ] }, { "architecture": "SCMP_ARCH_AARCH64", "subArchitectures": [ "SCMP_ARCH_ARM" ] }, { "architecture": "SCMP_ARCH_MIPS64", "subArchitectures": [ "SCMP_ARCH_MIPS", "SCMP_ARCH_MIPS64N32" ] }, { "architecture": "SCMP_ARCH_MIPS64N32", "subArchitectures": [ "SCMP_ARCH_MIPS", "SCMP_ARCH_MIPS64" ] }, { "architecture": "SCMP_ARCH_MIPSEL64", "subArchitectures": [ "SCMP_ARCH_MIPSEL", "SCMP_ARCH_MIPSEL64N32" ] }, { "architecture": "SCMP_ARCH_MIPSEL64N32", "subArchitectures": [ "SCMP_ARCH_MIPSEL", "SCMP_ARCH_MIPSEL64" ] }, { "architecture": "SCMP_ARCH_S390X", "subArchitectures": [ "SCMP_ARCH_S390" ] } ], "syscalls": [ { "names": [ "accept", "accept4", "access", "adjtimex", "alarm", "bind", "brk", "capget", "capset", "chdir", "chmod", "chown", "chown32", "clock_adjtime", "clock_adjtime64", "clock_getres", "clock_getres_time64", "clock_gettime", "clock_gettime64", "clock_nanosleep", "clock_nanosleep_time64", "close", "close_range", "connect", "copy_file_range", "creat", "dup", "dup2", "dup3", "epoll_create", "epoll_create1", "epoll_ctl", "epoll_ctl_old", "epoll_pwait", "epoll_pwait2", "epoll_wait", "epoll_wait_old", "eventfd", "eventfd2", "execve", "execveat", "exit", "exit_group", "faccessat", "faccessat2", "fadvise64", "fadvise64_64", "fallocate", "fanotify_mark", "fchdir", "fchmod", "fchmodat", "fchown", "fchown32", "fchownat", "fcntl", "fcntl64", "fdatasync", "fgetxattr", "flistxattr", "flock", "fork", "fremovexattr", "fsetxattr", "fstat", "fstat64", "fstatat64", "fstatfs", "fstatfs64", "fsync", "ftruncate", "ftruncate64", "futex", "futex_time64", "futimesat", "getcpu", "getcwd", "getdents", "getdents64", "getegid", "getegid32", "geteuid", "geteuid32", "getgid", "getgid32", "getgroups", "getgroups32", "getitimer", "getpeername", "getpgid", "getpgrp", "getpid", "getppid", "getpriority", "getrandom", "getresgid", "getresgid32", "getresuid", "getresuid32", "getrlimit", "get_robust_list", "getrusage", "getsid", "getsockname", "getsockopt", "get_thread_area", "gettid", "gettimeofday", "getuid", "getuid32", "getxattr", "inotify_add_watch", "inotify_init", "inotify_init1", "inotify_rm_watch", "io_cancel", "ioctl", "io_destroy", "io_getevents", "io_pgetevents", "io_pgetevents_time64", "ioprio_get", "ioprio_set", "io_setup", "io_submit", "io_uring_enter", "io_uring_register", "io_uring_setup", "ipc", "kill", "lchown", "lchown32", "lgetxattr", "link", "linkat", "listen", "listxattr", "llistxattr", "_llseek", "lremovexattr", "lseek", "lsetxattr", "lstat", "lstat64", "madvise", "membarrier", "memfd_create", "mincore", "mkdir", "mkdirat", "mknod", "mknodat", "mlock", "mlock2", "mlockall", "mmap", "mmap2", "mprotect", "mq_getsetattr", "mq_notify", "mq_open", "mq_timedreceive", "mq_timedreceive_time64", "mq_timedsend", "mq_timedsend_time64", "mq_unlink", "mremap", "msgctl", "msgget", "msgrcv", "msgsnd", "msync", "munlock", "munlockall", "munmap", "nanosleep", "newfstatat", "_newselect", "open", "openat", "openat2", "pause", "pidfd_open", "pidfd_send_signal", "pipe", "pipe2", "poll", "ppoll", "ppoll_time64", "prctl", "pread64", "preadv", "preadv2", "prlimit64", "pselect6", "pselect6_time64", "pwrite64", "pwritev", "pwritev2", "read", "readahead", "readlink", "readlinkat", "readv", "recv", "recvfrom", "recvmmsg", "recvmmsg_time64", "recvmsg", "remap_file_pages", "removexattr", "rename", "renameat", "renameat2", "restart_syscall", "rmdir", "rseq", "rt_sigaction", "rt_sigpending", "rt_sigprocmask", "rt_sigqueueinfo", "rt_sigreturn", "rt_sigsuspend", "rt_sigtimedwait", "rt_sigtimedwait_time64", "rt_tgsigqueueinfo", "sched_getaffinity", "sched_getattr", "sched_getparam", "sched_get_priority_max", "sched_get_priority_min", "sched_getscheduler", "sched_rr_get_interval", "sched_rr_get_interval_time64", "sched_setaffinity", "sched_setattr", "sched_setparam", "sched_setscheduler", "sched_yield", "seccomp", "select", "semctl", "semget", "semop", "semtimedop", "semtimedop_time64", "send", "sendfile", "sendfile64", "sendmmsg", "sendmsg", "sendto", "setfsgid", "setfsgid32", "setfsuid", "setfsuid32", "setgid", "setgid32", "setgroups", "setgroups32", "setitimer", "setpgid", "setpriority", "setregid", "setregid32", "setresgid", "setresgid32", "setresuid", "setresuid32", "setreuid", "setreuid32", "setrlimit", "set_robust_list", "setsid", "setsockopt", "set_thread_area", "set_tid_address", "setuid", "setuid32", "setxattr", "shmat", "shmctl", "shmdt", "shmget", "shutdown", "sigaltstack", "signalfd", "signalfd4", "sigprocmask", "sigreturn", "socket", "socketcall", "socketpair", "splice", "stat", "stat64", "statfs", "statfs64", "statx", "symlink", "symlinkat", "sync", "sync_file_range", "syncfs", "sysinfo", "tee", "tgkill", "time", "timer_create", "timer_delete", "timer_getoverrun", "timer_gettime", "timer_gettime64", "timer_settime", "timer_settime64", "timerfd_create", "timerfd_gettime", "timerfd_gettime64", "timerfd_settime", "timerfd_settime64", "times", "tkill", "truncate", "truncate64", "ugetrlimit", "umask", "uname", "unlink", "unlinkat", "utime", "utimensat", "utimensat_time64", "utimes", "vfork", "vmsplice", "wait4", "waitid", "waitpid", "write", "writev" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": {}, "excludes": {} }, { "names": [ "process_vm_readv", "process_vm_writev", "ptrace" ], "action": "SCMP_ACT_ALLOW", "args": null, "comment": "", "includes": { "minKernel": "4.8" }, "excludes": {} }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 0, "op": "SCMP_CMP_EQ" } ], "comment": "", "includes": {}, "excludes": {} }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 8, "op": "SCMP_CMP_EQ" } ], "comment": "", "includes": {}, "excludes": {} }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 131072, "op": "SCMP_CMP_EQ" } ], "comment": "", "includes": {}, "excludes": {} }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 131080, "op": "SCMP_CMP_EQ" } ], "comment": "", "includes": {}, "excludes": {} }, { "names": [ "personality" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 4294967295, "op": "SCMP_CMP_EQ" } ], "comment": "", "includes": {}, "excludes": {} }, { "names": [ "sync_file_range2" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "arches": [ "ppc64le" ] }, "excludes": {} }, { "names": [ "arm_fadvise64_64", "arm_sync_file_range", "sync_file_range2", "breakpoint", "cacheflush", "set_tls" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "arches": [ "arm", "arm64" ] }, "excludes": {} }, { "names": [ "arch_prctl" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "arches": [ "amd64", "x32" ] }, "excludes": {} }, { "names": [ "modify_ldt" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "arches": [ "amd64", "x32", "x86" ] }, "excludes": {} }, { "names": [ "s390_pci_mmio_read", "s390_pci_mmio_write", "s390_runtime_instr" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "arches": [ "s390", "s390x" ] }, "excludes": {} }, { "names": [ "open_by_handle_at" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_DAC_READ_SEARCH" ] }, "excludes": {} }, { "names": [ "bpf", "clone", "fanotify_init", "fsconfig", "fsmount", "fsopen", "fspick", "lookup_dcookie", "mount", "move_mount", "name_to_handle_at", "open_tree", "perf_event_open", "quotactl", "setdomainname", "sethostname", "setns", "syslog", "umount", "umount2", "unshare" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_ADMIN" ] }, "excludes": {} }, { "names": [ "clone" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 0, "value": 2114060288, "op": "SCMP_CMP_MASKED_EQ" } ], "comment": "", "includes": {}, "excludes": { "caps": [ "CAP_SYS_ADMIN" ], "arches": [ "s390", "s390x" ] } }, { "names": [ "clone" ], "action": "SCMP_ACT_ALLOW", "args": [ { "index": 1, "value": 2114060288, "op": "SCMP_CMP_MASKED_EQ" } ], "comment": "s390 parameter ordering for clone is different", "includes": { "arches": [ "s390", "s390x" ] }, "excludes": { "caps": [ "CAP_SYS_ADMIN" ] } }, { "names": [ "reboot" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_BOOT" ] }, "excludes": {} }, { "names": [ "chroot" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_CHROOT" ] }, "excludes": {} }, { "names": [ "delete_module", "init_module", "finit_module" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_MODULE" ] }, "excludes": {} }, { "names": [ "acct" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_PACCT" ] }, "excludes": {} }, { "names": [ "kcmp", "pidfd_getfd", "process_madvise", "process_vm_readv", "process_vm_writev", "ptrace" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_PTRACE" ] }, "excludes": {} }, { "names": [ "iopl", "ioperm" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_RAWIO" ] }, "excludes": {} }, { "names": [ "settimeofday", "stime", "clock_settime" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_TIME" ] }, "excludes": {} }, { "names": [ "vhangup" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_TTY_CONFIG" ] }, "excludes": {} }, { "names": [ "get_mempolicy", "mbind", "set_mempolicy" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYS_NICE" ] }, "excludes": {} }, { "names": [ "syslog" ], "action": "SCMP_ACT_ALLOW", "args": [], "comment": "", "includes": { "caps": [ "CAP_SYSLOG" ] }, "excludes": {} } ] }
clubby789
dbc3365da22ce76367ccaad79248c9a98332d80d
4bbc52c04b83c4ab9c47f2404720fa0c6a4cff40
I suspect this is what CI is failing on (while files _should_ have a newline at the end, the _generated_ version possibly doesn't have one); ``` [2021-02-26T02:03:34.884Z] M profiles/seccomp/default.json [2021-02-26T02:03:34.884Z] [2021-02-26T02:03:34.884Z] Please re-run go generate ./profiles/seccomp/ ```
thaJeztah
4,910
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
dirPath can be like "/sys/fs/cgroup", but we need to check the existence of `/sys/fs/cgroup/${dockerd_cgroup}/memory.swap.max`, not `/sys/fs/cgroup/memory.swap.max`, because `/sys/fs/cgroup/memory.swap.max` never appears.
AkihiroSuda
4,911
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
You are right, I missed that. I changed the logic a little bit but inside the function.
jmguzik
4,912
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
Actually, if `/sys/fs/cgroup/memory.swap.max` never appears, this check right now could be that `path.Join(dirPath, "memory.swap.max")` exists, without checking that we are in the group. Let me know what you think
jmguzik
4,913
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
This does not work when dirPath == /sys/fs/cgroup
AkihiroSuda
4,914
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
So, right now if dirPath == /sys/fs/cgroup then it is assumed upper function did not deliver /sys/fs/cgroup/${dockerd_cgroup}/ and info.SwapLimit defaults to false. Is that correct?
jmguzik
4,915
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
Is there a possibility that `/sys/fs/cgroup/memory.swap.max` will appear? If yes, is it then relevant to check for this use-case?
jmguzik
4,916
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
> Is there a possibility that /sys/fs/cgroup/memory.swap.max will appear? Yes `/sys/fs/cgroup/memory.swap.max` can appear when the daemon is running inside a cgroup namespace (i.e., dind), but we never need to check `/sys/fs/cgroup/memory.swap.max`
AkihiroSuda
4,917
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
The caller never pass "/sys/fs/cgroup/${dockerd_cgroup}". (See `git grep WithCgroup2GroupPath`). So we need to detect the cgroup of the daemon by ourselves, regardless to the value of `dirPath`.
AkihiroSuda
4,918
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
> The caller never pass "/sys/fs/cgroup/${dockerd_cgroup}". (See git grep WithCgroup2GroupPath). Sorry, I thought `g` in the caller is the added part that will complement the path with the group. Sysinfo can be used in a package that does not belong to dockerd, so /proc/self/cgroup is not the option here, am I right?
jmguzik
4,919
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
I think we can safely use /proc/self/cgroup here
AkihiroSuda
4,920
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
@AkihiroSuda I hope this one is better now. I have tested the logic over several distros and it seems to be working. Let me know what you think.
jmguzik
4,921
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
- How can g be "."? - When g is "/", we are in a cgroup namespace, so we should check /sys/fs/cgroup/memory.swap.max
AkihiroSuda
4,922
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
nit: function name should be like getSwapLimitV2()
AkihiroSuda
4,923
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
- If g is empty, clean colapses it to ".". I understand empty would be some kind of a bug, so collapsing to "." should be treated as a bug - Regarding second comment, wasn't sure. I will remove that one.
jmguzik
4,924
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
To make things more visible I could first check if g == "" and then do a Clean. I think "" is the only case when Clean collapses to "."
jmguzik
4,925
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
Given that this is linux-only, this should probably use `path.Join()` instead of `filepath.Join()` (also prevents the extra import being added
thaJeztah
4,926
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
was the extra `path.Clean()` needed here? from the docs; https://golang.org/pkg/path/#Join > Join joins any number of path elements into a single path, separating them with slashes. Empty elements are ignored. **The result is Cleaned.**
thaJeztah
4,927
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
@AkihiroSuda I'm a bit confused why we need to use libcontainer cgroups here; does containerd cgroups not provide this info? We're already passing that one here; https://github.com/moby/moby/blob/46cdcd206c56172b95ba5c77b827a722dab426c5/pkg/sysinfo/cgroup2_linux.go#L24 (if it doesn't provide this info, should we fix that there?
thaJeztah
4,928
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
Maybe it is not needed. I can remove this one.
jmguzik
4,929
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
ok
jmguzik
4,930
moby/moby
42,071
Fix setting swaplimit=true without checking memory.swap.max
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** fixes #41926 **- How I did it** Added one condition to moby/pkg/sysinfo/cgroup2_linux.go **- How to verify it** Actually, I wanted to create unit tests for this one. The problem is certain files must be present on FS, so I see 2 solutions: 1) use mocks (of os.Stat for example) 2) since part of the path is passed as an argument, choose a different dir for tests (eg /tmp?) I do not know what is the common practice in the project (1st PR), so I decided just to simply open a PR and ask. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fixes setting swaplimit=true without checking memory.swap.max **- A picture of a cute animal (not mandatory but encouraged)** Another time :)
null
2021-02-24 23:26:44+00:00
2021-06-05 15:01:35+00:00
pkg/sysinfo/cgroup2_linux.go
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = true info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
package sysinfo // import "github.com/docker/docker/pkg/sysinfo" import ( "io/ioutil" "os" "path" "strings" cgroupsV2 "github.com/containerd/cgroups/v2" "github.com/containerd/containerd/sys" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/sirupsen/logrus" ) type infoCollectorV2 func(info *SysInfo, controllers map[string]struct{}, dirPath string) (warnings []string) func newV2(quiet bool, opts *opts) *SysInfo { var warnings []string sysInfo := &SysInfo{ CgroupUnified: true, } g := opts.cg2GroupPath if g == "" { g = "/" } m, err := cgroupsV2.LoadManager("/sys/fs/cgroup", g) if err != nil { logrus.Warn(err) } else { controllersM := make(map[string]struct{}) controllers, err := m.Controllers() if err != nil { logrus.Warn(err) } for _, c := range controllers { controllersM[c] = struct{}{} } opsV2 := []infoCollectorV2{ applyMemoryCgroupInfoV2, applyCPUCgroupInfoV2, applyIOCgroupInfoV2, applyCPUSetCgroupInfoV2, applyPIDSCgroupInfoV2, applyDevicesCgroupInfoV2, } dirPath := path.Join("/sys/fs/cgroup", path.Clean(g)) for _, o := range opsV2 { w := o(sysInfo, controllersM, dirPath) warnings = append(warnings, w...) } } ops := []infoCollector{ applyNetworkingInfo, applyAppArmorInfo, applySeccompInfo, applyCgroupNsInfo, } for _, o := range ops { w := o(sysInfo, nil) warnings = append(warnings, w...) } if !quiet { for _, w := range warnings { logrus.Warn(w) } } return sysInfo } func getSwapLimitV2() bool { groups, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return false } g := groups[""] if g == "" { return false } cGroupPath := path.Join("/sys/fs/cgroup", g, "memory.swap.max") if _, err = os.Stat(cGroupPath); os.IsNotExist(err) { return false } return true } func applyMemoryCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["memory"]; !ok { warnings = append(warnings, "Unable to find memory controller") return warnings } info.MemoryLimit = true info.SwapLimit = getSwapLimitV2() info.MemoryReservation = true info.OomKillDisable = false info.MemorySwappiness = false info.KernelMemory = false info.KernelMemoryTCP = false return warnings } func applyCPUCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["cpu"]; !ok { warnings = append(warnings, "Unable to find cpu controller") return warnings } info.CPUShares = true info.CPUCfs = true info.CPURealtime = false return warnings } func applyIOCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["io"]; !ok { warnings = append(warnings, "Unable to find io controller") return warnings } info.BlkioWeight = true info.BlkioWeightDevice = true info.BlkioReadBpsDevice = true info.BlkioWriteBpsDevice = true info.BlkioReadIOpsDevice = true info.BlkioWriteIOpsDevice = true return warnings } func applyCPUSetCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, dirPath string) []string { var warnings []string if _, ok := controllers["cpuset"]; !ok { warnings = append(warnings, "Unable to find cpuset controller") return warnings } info.Cpuset = true cpus, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.cpus.effective")) if err != nil { return warnings } info.Cpus = strings.TrimSpace(string(cpus)) mems, err := ioutil.ReadFile(path.Join(dirPath, "cpuset.mems.effective")) if err != nil { return warnings } info.Mems = strings.TrimSpace(string(mems)) return warnings } func applyPIDSCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { var warnings []string if _, ok := controllers["pids"]; !ok { warnings = append(warnings, "Unable to find pids controller") return warnings } info.PidsLimit = true return warnings } func applyDevicesCgroupInfoV2(info *SysInfo, controllers map[string]struct{}, _ string) []string { info.CgroupDevicesEnabled = !sys.RunningInUserNS() return nil }
jmguzik
58f0a1597d1c323c105d28156e6f74fa15e7ee70
8dbd90ec00daa26dc45d7da2431c965dec99e8b4
> The caller never pass "/sys/fs/cgroup/${dockerd_cgroup}". (See `git grep WithCgroup2GroupPath`). > > So we need to detect the cgroup of the daemon by ourselves, regardless to the value of `dirPath`. Quote from resolved conversations
jmguzik
4,931
moby/moby
42,068
overlay2: support "userxattr" option (kernel 5.11)
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Fix #42055 "[kernel 5.11 + overlay2 + rootless] : apt-get fails with `Invalid cross-device link`" **- How I did it** Support "userxattr" option (kernel 5.11). The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. The "userxattr" option is NOT needed for the initial user namespace (aka "the host"). Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount the overlayfs in a user namespace without the "userxattr" option. The corresponding kernel commit: torvalds/linux@2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1 > **ovl: user xattr** > > Optionally allow using "user.overlay." namespace instead of "trusted.overlay." > ... > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the > "user.overlay.redirect" or "user.overlay.metacopy" xattrs. Related to containerd/containerd#5076 **- How to verify it** - Install Ubuntu 20.10 - Install mainline kernel 5.11 https://kernel.ubuntu.com/~kernel-ppa/mainline/v5.11/amd64/ - Launch rootless dockerd with `overlay2` storage driver. - Make sure `docker --context=rootless run -it --rm ubuntu sh -ec "apt-get update && apt-get install -y sl"` succeeds without an error (#42055). **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> overlay2: support "userxattr" option (kernel 5.11) **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-02-24 09:58:38+00:00
2021-03-18 18:54:01+00:00
daemon/graphdriver/overlay2/overlay.go
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q", backingFs, projectQuotaSupported, indexOff) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string userxattr string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } needsUserXattr, err := overlayutils.NeedsUserXAttr(home) if err != nil { logger.Warnf("Unable to detect whether overlay kernel module needs \"userxattr\" parameter: %s", err) } if needsUserXattr { userxattr = "userxattr," } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q, userxattr=%q", backingFs, projectQuotaSupported, indexOff, userxattr) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, {"userxattr", strconv.FormatBool(userxattr != "")}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + userxattr + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + userxattr + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + userxattr + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + userxattr + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
AkihiroSuda
e0c87f90cd93fbbfe1ba0e0abc80ba757e3b0a82
a84d824c5f23b002669b10cbe7c191508a4b6b21
RFC: do we want to import this to reduce code copypasta, or do we want to just copy the code without introducing dependency on gh/c/c/snapshots ?
AkihiroSuda
4,932
moby/moby
42,068
overlay2: support "userxattr" option (kernel 5.11)
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Fix #42055 "[kernel 5.11 + overlay2 + rootless] : apt-get fails with `Invalid cross-device link`" **- How I did it** Support "userxattr" option (kernel 5.11). The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. The "userxattr" option is NOT needed for the initial user namespace (aka "the host"). Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount the overlayfs in a user namespace without the "userxattr" option. The corresponding kernel commit: torvalds/linux@2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1 > **ovl: user xattr** > > Optionally allow using "user.overlay." namespace instead of "trusted.overlay." > ... > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the > "user.overlay.redirect" or "user.overlay.metacopy" xattrs. Related to containerd/containerd#5076 **- How to verify it** - Install Ubuntu 20.10 - Install mainline kernel 5.11 https://kernel.ubuntu.com/~kernel-ppa/mainline/v5.11/amd64/ - Launch rootless dockerd with `overlay2` storage driver. - Make sure `docker --context=rootless run -it --rm ubuntu sh -ec "apt-get update && apt-get install -y sl"` succeeds without an error (#42055). **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> overlay2: support "userxattr" option (kernel 5.11) **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-02-24 09:58:38+00:00
2021-03-18 18:54:01+00:00
daemon/graphdriver/overlay2/overlay.go
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q", backingFs, projectQuotaSupported, indexOff) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string userxattr string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } needsUserXattr, err := overlayutils.NeedsUserXAttr(home) if err != nil { logger.Warnf("Unable to detect whether overlay kernel module needs \"userxattr\" parameter: %s", err) } if needsUserXattr { userxattr = "userxattr," } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q, userxattr=%q", backingFs, projectQuotaSupported, indexOff, userxattr) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, {"userxattr", strconv.FormatBool(userxattr != "")}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + userxattr + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + userxattr + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + userxattr + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + userxattr + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
AkihiroSuda
e0c87f90cd93fbbfe1ba0e0abc80ba757e3b0a82
a84d824c5f23b002669b10cbe7c191508a4b6b21
Hm... good one; "generally" I'd say "don't duplicate", but in this case; some thoughts; - the code currently lives in containerd's overlay snapshotter code (which we _don't_ need) - _if_ we want to use the containerd code, I'd prefer it to be refactored to be in (e.g.) `containerd/containerd/snapshots/overlay/utils` (or `overlayutils`) - in that case, we should also check what the overlap is between our "overlayutils" and the "containerd" one; for example, [`supportsMultipleLowerDir()`](https://github.com/containerd/containerd/blob/ddf6594fbeeddf30a55e0d2bcdb85956ec792fd2/snapshots/overlay/check.go#L34-L41) looks to be a fork of our `overlayutils` package - (this depends a bit on how 'stable' that part of the code is, because the downside of depending on it from containerd means that if we need a fix in that code, we need to update the _whole_ containerd dependency (which could bring a lot of additional / updated dependencies due to how it's structured) From the above, I'm tempted to say; 1. Start with copying the check to our own `graphdriver/overlayutils` package 2. After that (follow-up), refactor containerd to have it in a `(overlay)utils` package 3. Look at the overlap between our `overlayutils` and containerd's utils, and consider replacing our overlayutils with the one from containerd
thaJeztah
4,933
moby/moby
42,068
overlay2: support "userxattr" option (kernel 5.11)
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Fix #42055 "[kernel 5.11 + overlay2 + rootless] : apt-get fails with `Invalid cross-device link`" **- How I did it** Support "userxattr" option (kernel 5.11). The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. The "userxattr" option is NOT needed for the initial user namespace (aka "the host"). Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount the overlayfs in a user namespace without the "userxattr" option. The corresponding kernel commit: torvalds/linux@2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1 > **ovl: user xattr** > > Optionally allow using "user.overlay." namespace instead of "trusted.overlay." > ... > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the > "user.overlay.redirect" or "user.overlay.metacopy" xattrs. Related to containerd/containerd#5076 **- How to verify it** - Install Ubuntu 20.10 - Install mainline kernel 5.11 https://kernel.ubuntu.com/~kernel-ppa/mainline/v5.11/amd64/ - Launch rootless dockerd with `overlay2` storage driver. - Make sure `docker --context=rootless run -it --rm ubuntu sh -ec "apt-get update && apt-get install -y sl"` succeeds without an error (#42055). **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> overlay2: support "userxattr" option (kernel 5.11) **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-02-24 09:58:38+00:00
2021-03-18 18:54:01+00:00
daemon/graphdriver/overlay2/overlay.go
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q", backingFs, projectQuotaSupported, indexOff) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string userxattr string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } needsUserXattr, err := overlayutils.NeedsUserXAttr(home) if err != nil { logger.Warnf("Unable to detect whether overlay kernel module needs \"userxattr\" parameter: %s", err) } if needsUserXattr { userxattr = "userxattr," } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q, userxattr=%q", backingFs, projectQuotaSupported, indexOff, userxattr) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, {"userxattr", strconv.FormatBool(userxattr != "")}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + userxattr + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + userxattr + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + userxattr + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + userxattr + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
AkihiroSuda
e0c87f90cd93fbbfe1ba0e0abc80ba757e3b0a82
a84d824c5f23b002669b10cbe7c191508a4b6b21
@cpuguy83 @tonistiigi WDYT?
thaJeztah
4,934
moby/moby
42,068
overlay2: support "userxattr" option (kernel 5.11)
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Fix #42055 "[kernel 5.11 + overlay2 + rootless] : apt-get fails with `Invalid cross-device link`" **- How I did it** Support "userxattr" option (kernel 5.11). The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. The "userxattr" option is NOT needed for the initial user namespace (aka "the host"). Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount the overlayfs in a user namespace without the "userxattr" option. The corresponding kernel commit: torvalds/linux@2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1 > **ovl: user xattr** > > Optionally allow using "user.overlay." namespace instead of "trusted.overlay." > ... > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the > "user.overlay.redirect" or "user.overlay.metacopy" xattrs. Related to containerd/containerd#5076 **- How to verify it** - Install Ubuntu 20.10 - Install mainline kernel 5.11 https://kernel.ubuntu.com/~kernel-ppa/mainline/v5.11/amd64/ - Launch rootless dockerd with `overlay2` storage driver. - Make sure `docker --context=rootless run -it --rm ubuntu sh -ec "apt-get update && apt-get install -y sl"` succeeds without an error (#42055). **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> overlay2: support "userxattr" option (kernel 5.11) **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-02-24 09:58:38+00:00
2021-03-18 18:54:01+00:00
daemon/graphdriver/overlay2/overlay.go
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q", backingFs, projectQuotaSupported, indexOff) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string userxattr string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } needsUserXattr, err := overlayutils.NeedsUserXAttr(home) if err != nil { logger.Warnf("Unable to detect whether overlay kernel module needs \"userxattr\" parameter: %s", err) } if needsUserXattr { userxattr = "userxattr," } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q, userxattr=%q", backingFs, projectQuotaSupported, indexOff, userxattr) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, {"userxattr", strconv.FormatBool(userxattr != "")}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + userxattr + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + userxattr + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + userxattr + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + userxattr + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
AkihiroSuda
e0c87f90cd93fbbfe1ba0e0abc80ba757e3b0a82
a84d824c5f23b002669b10cbe7c191508a4b6b21
updated
AkihiroSuda
4,935
moby/moby
42,068
overlay2: support "userxattr" option (kernel 5.11)
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Fix #42055 "[kernel 5.11 + overlay2 + rootless] : apt-get fails with `Invalid cross-device link`" **- How I did it** Support "userxattr" option (kernel 5.11). The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. The "userxattr" option is NOT needed for the initial user namespace (aka "the host"). Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount the overlayfs in a user namespace without the "userxattr" option. The corresponding kernel commit: torvalds/linux@2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1 > **ovl: user xattr** > > Optionally allow using "user.overlay." namespace instead of "trusted.overlay." > ... > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the > "user.overlay.redirect" or "user.overlay.metacopy" xattrs. Related to containerd/containerd#5076 **- How to verify it** - Install Ubuntu 20.10 - Install mainline kernel 5.11 https://kernel.ubuntu.com/~kernel-ppa/mainline/v5.11/amd64/ - Launch rootless dockerd with `overlay2` storage driver. - Make sure `docker --context=rootless run -it --rm ubuntu sh -ec "apt-get update && apt-get install -y sl"` succeeds without an error (#42055). **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> overlay2: support "userxattr" option (kernel 5.11) **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-02-24 09:58:38+00:00
2021-03-18 18:54:01+00:00
daemon/graphdriver/overlay2/overlay.go
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q", backingFs, projectQuotaSupported, indexOff) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string userxattr string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } needsUserXattr, err := overlayutils.NeedsUserXAttr(home) if err != nil { logger.Warnf("Unable to detect whether overlay kernel module needs \"userxattr\" parameter: %s", err) } if needsUserXattr { userxattr = "userxattr," } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q, userxattr=%q", backingFs, projectQuotaSupported, indexOff, userxattr) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, {"userxattr", strconv.FormatBool(userxattr != "")}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + userxattr + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + userxattr + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + userxattr + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + userxattr + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
AkihiroSuda
e0c87f90cd93fbbfe1ba0e0abc80ba757e3b0a82
a84d824c5f23b002669b10cbe7c191508a4b6b21
Can we remove this from the `init`?
cpuguy83
4,936
moby/moby
42,068
overlay2: support "userxattr" option (kernel 5.11)
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Fix #42055 "[kernel 5.11 + overlay2 + rootless] : apt-get fails with `Invalid cross-device link`" **- How I did it** Support "userxattr" option (kernel 5.11). The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. The "userxattr" option is NOT needed for the initial user namespace (aka "the host"). Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount the overlayfs in a user namespace without the "userxattr" option. The corresponding kernel commit: torvalds/linux@2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1 > **ovl: user xattr** > > Optionally allow using "user.overlay." namespace instead of "trusted.overlay." > ... > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the > "user.overlay.redirect" or "user.overlay.metacopy" xattrs. Related to containerd/containerd#5076 **- How to verify it** - Install Ubuntu 20.10 - Install mainline kernel 5.11 https://kernel.ubuntu.com/~kernel-ppa/mainline/v5.11/amd64/ - Launch rootless dockerd with `overlay2` storage driver. - Make sure `docker --context=rootless run -it --rm ubuntu sh -ec "apt-get update && apt-get install -y sl"` succeeds without an error (#42055). **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> overlay2: support "userxattr" option (kernel 5.11) **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-02-24 09:58:38+00:00
2021-03-18 18:54:01+00:00
daemon/graphdriver/overlay2/overlay.go
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q", backingFs, projectQuotaSupported, indexOff) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string userxattr string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } needsUserXattr, err := overlayutils.NeedsUserXAttr(home) if err != nil { logger.Warnf("Unable to detect whether overlay kernel module needs \"userxattr\" parameter: %s", err) } if needsUserXattr { userxattr = "userxattr," } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q, userxattr=%q", backingFs, projectQuotaSupported, indexOff, userxattr) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, {"userxattr", strconv.FormatBool(userxattr != "")}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + userxattr + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + userxattr + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + userxattr + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + userxattr + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
AkihiroSuda
e0c87f90cd93fbbfe1ba0e0abc80ba757e3b0a82
a84d824c5f23b002669b10cbe7c191508a4b6b21
This Debugf is not new in this PR.
AkihiroSuda
4,937
moby/moby
42,068
overlay2: support "userxattr" option (kernel 5.11)
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Fix #42055 "[kernel 5.11 + overlay2 + rootless] : apt-get fails with `Invalid cross-device link`" **- How I did it** Support "userxattr" option (kernel 5.11). The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. The "userxattr" option is NOT needed for the initial user namespace (aka "the host"). Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount the overlayfs in a user namespace without the "userxattr" option. The corresponding kernel commit: torvalds/linux@2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1 > **ovl: user xattr** > > Optionally allow using "user.overlay." namespace instead of "trusted.overlay." > ... > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the > "user.overlay.redirect" or "user.overlay.metacopy" xattrs. Related to containerd/containerd#5076 **- How to verify it** - Install Ubuntu 20.10 - Install mainline kernel 5.11 https://kernel.ubuntu.com/~kernel-ppa/mainline/v5.11/amd64/ - Launch rootless dockerd with `overlay2` storage driver. - Make sure `docker --context=rootless run -it --rm ubuntu sh -ec "apt-get update && apt-get install -y sl"` succeeds without an error (#42055). **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> overlay2: support "userxattr" option (kernel 5.11) **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-02-24 09:58:38+00:00
2021-03-18 18:54:01+00:00
daemon/graphdriver/overlay2/overlay.go
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q", backingFs, projectQuotaSupported, indexOff) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string userxattr string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } needsUserXattr, err := overlayutils.NeedsUserXAttr(home) if err != nil { logger.Warnf("Unable to detect whether overlay kernel module needs \"userxattr\" parameter: %s", err) } if needsUserXattr { userxattr = "userxattr," } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q, userxattr=%q", backingFs, projectQuotaSupported, indexOff, userxattr) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, {"userxattr", strconv.FormatBool(userxattr != "")}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + userxattr + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + userxattr + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + userxattr + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + userxattr + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
AkihiroSuda
e0c87f90cd93fbbfe1ba0e0abc80ba757e3b0a82
a84d824c5f23b002669b10cbe7c191508a4b6b21
I'm personally ok with keeping this (for now) as it's not new indeed. Looking at the code, I also want to do some refactoring after this is merged; the string-concatting code is becoming a bit messy and, assuming that won't affect performance, I'd like to use a `[]string` for these options, and `strings.Join()` them at the end (I think that will make the code more readable).
thaJeztah
4,938
moby/moby
42,068
overlay2: support "userxattr" option (kernel 5.11)
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Fix #42055 "[kernel 5.11 + overlay2 + rootless] : apt-get fails with `Invalid cross-device link`" **- How I did it** Support "userxattr" option (kernel 5.11). The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. The "userxattr" option is NOT needed for the initial user namespace (aka "the host"). Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount the overlayfs in a user namespace without the "userxattr" option. The corresponding kernel commit: torvalds/linux@2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1 > **ovl: user xattr** > > Optionally allow using "user.overlay." namespace instead of "trusted.overlay." > ... > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the > "user.overlay.redirect" or "user.overlay.metacopy" xattrs. Related to containerd/containerd#5076 **- How to verify it** - Install Ubuntu 20.10 - Install mainline kernel 5.11 https://kernel.ubuntu.com/~kernel-ppa/mainline/v5.11/amd64/ - Launch rootless dockerd with `overlay2` storage driver. - Make sure `docker --context=rootless run -it --rm ubuntu sh -ec "apt-get update && apt-get install -y sl"` succeeds without an error (#42055). **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> overlay2: support "userxattr" option (kernel 5.11) **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-02-24 09:58:38+00:00
2021-03-18 18:54:01+00:00
daemon/graphdriver/overlay2/overlay.go
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q", backingFs, projectQuotaSupported, indexOff) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
// +build linux package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" import ( "context" "errors" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strconv" "strings" "sync" "github.com/containerd/containerd/sys" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlayutils" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/containerfs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/fsutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/pkg/system" "github.com/docker/docker/quota" units "github.com/docker/go-units" "github.com/moby/locker" "github.com/moby/sys/mount" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) var ( // untar defines the untar method untar = chrootarchive.UntarUncompressed ) // This backend uses the overlay union filesystem for containers // with diff directories for each layer. // This version of the overlay driver requires at least kernel // 4.0.0 in order to support mounting multiple diff directories. // Each container/image has at least a "diff" directory and "link" file. // If there is also a "lower" file when there are diff layers // below as well as "merged" and "work" directories. The "diff" directory // has the upper layer of the overlay and is used to capture any // changes to the layer. The "lower" file contains all the lower layer // mounts separated by ":" and ordered from uppermost to lowermost // layers. The overlay itself is mounted in the "merged" directory, // and the "work" dir is needed for overlay to work. // The "link" file for each layer contains a unique string for the layer. // Under the "l" directory at the root there will be a symbolic link // with that unique string pointing the "diff" directory for the layer. // The symbolic links are used to reference lower layers in the "lower" // file and on mount. The links are used to shorten the total length // of a layer reference without requiring changes to the layer identifier // or root directory. Mounts are always done relative to root and // referencing the symbolic links in order to ensure the number of // lower directories can fit in a single page for making the mount // syscall. A hard upper limit of 128 lower layers is enforced to ensure // that mounts do not fail due to length. const ( driverName = "overlay2" linkDir = "l" diffDirName = "diff" workDirName = "work" mergedDirName = "merged" lowerFile = "lower" maxDepth = 128 // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the // page size limit for the mount command may be exceeded. // The idLength should be selected such that following equation // is true (512 is a buffer for label metadata). // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) idLength = 26 ) type overlayOptions struct { overrideKernelCheck bool quota quota.Quota } // Driver contains information about the home directory and the list of active // mounts that are created using this driver. type Driver struct { home string uidMaps []idtools.IDMap gidMaps []idtools.IDMap ctr *graphdriver.RefCounter quotaCtl *quota.Control options overlayOptions naiveDiff graphdriver.DiffDriver supportsDType bool locker *locker.Locker } var ( logger = logrus.WithField("storage-driver", "overlay2") backingFs = "<unknown>" projectQuotaSupported = false useNaiveDiffLock sync.Once useNaiveDiffOnly bool indexOff string userxattr string ) func init() { graphdriver.Register(driverName, Init) } // Init returns the native diff driver for overlay filesystem. // If overlay filesystem is not supported on the host, the error // graphdriver.ErrNotSupported is returned. // If an overlay filesystem is not supported over an existing filesystem then // the error graphdriver.ErrIncompatibleFS is returned. func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { opts, err := parseOptions(options) if err != nil { return nil, err } // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different // filesystem than /var/lib/docker. // If the path does not exist, fall back to using /var/lib/docker for feature detection. testdir := home if _, err := os.Stat(testdir); os.IsNotExist(err) { testdir = filepath.Dir(testdir) } if err := overlayutils.SupportsOverlay(testdir, true); err != nil { logger.Error(err) return nil, graphdriver.ErrNotSupported } fsMagic, err := graphdriver.GetFSMagic(testdir) if err != nil { return nil, err } if fsName, ok := graphdriver.FsNames[fsMagic]; ok { backingFs = fsName } supportsDType, err := fsutils.SupportsDType(testdir) if err != nil { return nil, err } if !supportsDType { if !graphdriver.IsInitialized(home) { return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) } // allow running without d_type only for existing setups (#27443) logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) } if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0701, idtools.CurrentIdentity()); err != nil { return nil, err } d := &Driver{ home: home, uidMaps: uidMaps, gidMaps: gidMaps, ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), supportsDType: supportsDType, locker: locker.New(), options: *opts, } d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) if backingFs == "xfs" { // Try to enable project quota support over xfs. if d.quotaCtl, err = quota.NewControl(home); err == nil { projectQuotaSupported = true } else if opts.quota.Size > 0 { return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) } // figure out whether "index=off" option is recognized by the kernel _, err = os.Stat("/sys/module/overlay/parameters/index") switch { case err == nil: indexOff = "index=off," case os.IsNotExist(err): // old kernel, no index -- do nothing default: logger.Warnf("Unable to detect whether overlay kernel module supports index parameter: %s", err) } needsUserXattr, err := overlayutils.NeedsUserXAttr(home) if err != nil { logger.Warnf("Unable to detect whether overlay kernel module needs \"userxattr\" parameter: %s", err) } if needsUserXattr { userxattr = "userxattr," } logger.Debugf("backingFs=%s, projectQuotaSupported=%v, indexOff=%q, userxattr=%q", backingFs, projectQuotaSupported, indexOff, userxattr) return d, nil } func parseOptions(options []string) (*overlayOptions, error) { o := &overlayOptions{} for _, option := range options { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil { return nil, err } key = strings.ToLower(key) switch key { case "overlay2.override_kernel_check": o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } case "overlay2.size": size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) default: return nil, fmt.Errorf("overlay2: unknown option %s", key) } } return o, nil } func useNaiveDiff(home string) bool { useNaiveDiffLock.Do(func() { if err := doesSupportNativeDiff(home); err != nil { logger.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) useNaiveDiffOnly = true } }) return useNaiveDiffOnly } func (d *Driver) String() string { return driverName } // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, {"userxattr", strconv.FormatBool(userxattr != "")}, } } // GetMetadata returns metadata about the overlay driver such as the LowerDir, // UpperDir, WorkDir, and MergeDir used to store data. func (d *Driver) GetMetadata(id string) (map[string]string, error) { dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } metadata := map[string]string{ "WorkDir": path.Join(dir, workDirName), "MergedDir": path.Join(dir, mergedDirName), "UpperDir": path.Join(dir, diffDirName), } lowerDirs, err := d.getLowerDirs(id) if err != nil { return nil, err } if len(lowerDirs) > 0 { metadata["LowerDir"] = strings.Join(lowerDirs, ":") } return metadata, nil } // Cleanup any state created by overlay which should be cleaned when daemon // is being shutdown. For now, we just have to unmount the bind mounted // we had created. func (d *Driver) Cleanup() error { return mount.RecursiveUnmount(d.home) } // CreateReadWrite creates a layer that is writable for use as a container // file system. func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { if opts == nil { opts = &graphdriver.CreateOpts{ StorageOpt: make(map[string]string), } } else if opts.StorageOpt == nil { opts.StorageOpt = make(map[string]string) } // Merge daemon default config. if _, ok := opts.StorageOpt["size"]; !ok && d.options.quota.Size != 0 { opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) } if _, ok := opts.StorageOpt["size"]; ok && !projectQuotaSupported { return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") } return d.create(id, parent, opts) } // Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. // The parent filesystem is used to configure these directories for the overlay. func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { if opts != nil && len(opts.StorageOpt) != 0 { if _, ok := opts.StorageOpt["size"]; ok { return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") } } return d.create(id, parent, opts) } func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { dir := d.dir(id) rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return err } root := idtools.Identity{UID: rootUID, GID: rootGID} current := idtools.CurrentIdentity() if err := idtools.MkdirAllAndChown(path.Dir(dir), 0701, current); err != nil { return err } if err := idtools.MkdirAndChown(dir, 0701, current); err != nil { return err } defer func() { // Clean up on failure if retErr != nil { os.RemoveAll(dir) } }() if opts != nil && len(opts.StorageOpt) > 0 { driver := &Driver{} if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { return err } if driver.options.quota.Size > 0 { // Set container disk quota limit if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { return err } } } if err := idtools.MkdirAndChown(path.Join(dir, diffDirName), 0755, root); err != nil { return err } lid := overlayutils.GenerateID(idLength, logger) if err := os.Symlink(path.Join("..", id, diffDirName), path.Join(d.home, linkDir, lid)); err != nil { return err } // Write link id to link file if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { return err } // if no parent directory, done if parent == "" { return nil } if err := idtools.MkdirAndChown(path.Join(dir, workDirName), 0700, root); err != nil { return err } if err := ioutil.WriteFile(path.Join(d.dir(parent), "committed"), []byte{}, 0600); err != nil { return err } lower, err := d.getLower(parent) if err != nil { return err } if lower != "" { if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { return err } } return nil } // Parse overlay storage options func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { // Read size to set the disk project quota per container for key, val := range storageOpt { key := strings.ToLower(key) switch key { case "size": size, err := units.RAMInBytes(val) if err != nil { return err } driver.options.quota.Size = uint64(size) default: return fmt.Errorf("Unknown option %s", key) } } return nil } func (d *Driver) getLower(parent string) (string, error) { parentDir := d.dir(parent) // Ensure parent exists if _, err := os.Lstat(parentDir); err != nil { return "", err } // Read Parent link fileA parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) if err != nil { return "", err } lowers := []string{path.Join(linkDir, string(parentLink))} parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) if err == nil { parentLowers := strings.Split(string(parentLower), ":") lowers = append(lowers, parentLowers...) } if len(lowers) > maxDepth { return "", errors.New("max depth exceeded") } return strings.Join(lowers, ":"), nil } func (d *Driver) dir(id string) string { return path.Join(d.home, id) } func (d *Driver) getLowerDirs(id string) ([]string, error) { var lowersArray []string lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) if err == nil { for _, s := range strings.Split(string(lowers), ":") { lp, err := os.Readlink(path.Join(d.home, s)) if err != nil { return nil, err } lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) } } else if !os.IsNotExist(err) { return nil, err } return lowersArray, nil } // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { if id == "" { return fmt.Errorf("refusing to remove the directories: id is empty") } d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) lid, err := ioutil.ReadFile(path.Join(dir, "link")) if err == nil { if len(lid) == 0 { logger.Errorf("refusing to remove empty link for layer %v", id) } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { logger.Debugf("Failed to remove link: %v", err) } } if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { return err } return nil } // Get creates and mounts the required file system for the given id and returns the mount path. func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) if _, err := os.Stat(dir); err != nil { return nil, err } diffDir := path.Join(dir, diffDirName) lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, just return diff directory if os.IsNotExist(err) { return containerfs.NewLocalContainerFS(diffDir), nil } return nil, err } mergedDir := path.Join(dir, mergedDirName) if count := d.ctr.Increment(mergedDir); count > 1 { return containerfs.NewLocalContainerFS(mergedDir), nil } defer func() { if retErr != nil { if c := d.ctr.Decrement(mergedDir); c <= 0 { if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { logger.Errorf("error unmounting %v: %v", mergedDir, mntErr) } // Cleanup the created merged directory; see the comment in Put's rmdir if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { logger.Debugf("Failed to remove %s: %v: %v", id, rmErr, err) } } } }() workDir := path.Join(dir, workDirName) splitLowers := strings.Split(string(lowers), ":") absLowers := make([]string, len(splitLowers)) for i, s := range splitLowers { absLowers[i] = path.Join(d.home, s) } var readonly bool if _, err := os.Stat(path.Join(dir, "committed")); err == nil { readonly = true } else if !os.IsNotExist(err) { return nil, err } var opts string if readonly { opts = indexOff + userxattr + "lowerdir=" + diffDir + ":" + strings.Join(absLowers, ":") } else { opts = indexOff + userxattr + "lowerdir=" + strings.Join(absLowers, ":") + ",upperdir=" + diffDir + ",workdir=" + workDir } mountData := label.FormatMountLabel(opts, mountLabel) mount := unix.Mount mountTarget := mergedDir rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) if err != nil { return nil, err } if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.Identity{UID: rootUID, GID: rootGID}); err != nil { return nil, err } pageSize := unix.Getpagesize() // Use relative paths and mountFrom when the mount data has exceeded // the page size. The mount syscall fails if the mount data cannot // fit within a page and relative links make the mount data much // smaller at the expense of requiring a fork exec to chroot. if len(mountData) > pageSize-1 { if readonly { opts = indexOff + userxattr + "lowerdir=" + path.Join(id, diffDirName) + ":" + string(lowers) } else { opts = indexOff + userxattr + "lowerdir=" + string(lowers) + ",upperdir=" + path.Join(id, diffDirName) + ",workdir=" + path.Join(id, workDirName) } mountData = label.FormatMountLabel(opts, mountLabel) if len(mountData) > pageSize-1 { return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) } mount = func(source string, target string, mType string, flags uintptr, label string) error { return mountFrom(d.home, source, target, mType, flags, label) } mountTarget = path.Join(id, mergedDirName) } if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) } if !readonly { // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a // user namespace requires this to move a directory from lower to upper. if err := os.Chown(path.Join(workDir, workDirName), rootUID, rootGID); err != nil { return nil, err } } return containerfs.NewLocalContainerFS(mergedDir), nil } // Put unmounts the mount path created for the give id. // It also removes the 'merged' directory to force the kernel to unmount the // overlay mount in other namespaces. func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) dir := d.dir(id) _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) if err != nil { // If no lower, no mount happened and just return directly if os.IsNotExist(err) { return nil } return err } mountpoint := path.Join(dir, mergedDirName) if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } // Remove the mountpoint here. Removing the mountpoint (in newer kernels) // will cause all other instances of this mount in other mount namespaces // to be unmounted. This is necessary to avoid cases where an overlay mount // that is present in another namespace will cause subsequent mounts // operations to fail with ebusy. We ignore any errors here because this may // fail on older kernels which don't have // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { logger.Debugf("Failed to remove %s overlay: %v", id, err) } return nil } // Exists checks to see if the id is already mounted. func (d *Driver) Exists(id string) bool { _, err := os.Stat(d.dir(id)) return err == nil } // isParent determines whether the given parent is the direct parent of the // given layer id func (d *Driver) isParent(id, parent string) bool { lowers, err := d.getLowerDirs(id) if err != nil { return false } if parent == "" && len(lowers) > 0 { return false } parentDir := d.dir(parent) var ld string if len(lowers) > 0 { ld = filepath.Dir(lowers[0]) } if ld == "" && parent == "" { return true } return ld == parentDir } // ApplyDiff applies the new layer into a root func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { if !d.isParent(id, parent) { return d.naiveDiff.ApplyDiff(id, parent, diff) } applyDir := d.getDiffPath(id) logger.Debugf("Applying tar in %s", applyDir) // Overlay doesn't need the parent id to apply the diff if err := untar(diff, applyDir, &archive.TarOptions{ UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, InUserNS: sys.RunningInUserNS(), }); err != nil { return 0, err } return directory.Size(context.TODO(), applyDir) } func (d *Driver) getDiffPath(id string) string { dir := d.dir(id) return path.Join(dir, diffDirName) } // DiffSize calculates the changes between the specified id // and its parent and returns the size in bytes of the changes // relative to its base filesystem directory. func (d *Driver) DiffSize(id, parent string) (size int64, err error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.DiffSize(id, parent) } return directory.Size(context.TODO(), d.getDiffPath(id)) } // Diff produces an archive of the changes between the specified // layer and its parent layer which may be "". func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { if useNaiveDiff(d.home) || !d.isParent(id, parent) { return d.naiveDiff.Diff(id, parent) } diffPath := d.getDiffPath(id) logger.Debugf("Tar with options on %s", diffPath) return archive.TarWithOptions(diffPath, &archive.TarOptions{ Compression: archive.Uncompressed, UIDMaps: d.uidMaps, GIDMaps: d.gidMaps, WhiteoutFormat: archive.OverlayWhiteoutFormat, }) } // Changes produces a list of changes between the specified layer and its // parent layer. If parent is "", then all changes will be ADD changes. func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { return d.naiveDiff.Changes(id, parent) }
AkihiroSuda
e0c87f90cd93fbbfe1ba0e0abc80ba757e3b0a82
a84d824c5f23b002669b10cbe7c191508a4b6b21
Oh this was a github UI issue, its not in the init like I thought.
cpuguy83
4,939
moby/moby
42,067
Include VPNkit binary for arm64
Previously, VPNkit binary was installed only for amd64.
null
2021-02-24 04:38:39+00:00
2021-03-12 08:02:29+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.4.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
AkihiroSuda
4bbc52c04b83c4ab9c47f2404720fa0c6a4cff40
4648e8bc6fdcbeee867270578afd1fd8f06a03ab
@djs55 could you tag a release for vpnkit?
thaJeztah
4,940
moby/moby
42,067
Include VPNkit binary for arm64
Previously, VPNkit binary was installed only for amd64.
null
2021-02-24 04:38:39+00:00
2021-03-12 08:02:29+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.4.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
AkihiroSuda
4bbc52c04b83c4ab9c47f2404720fa0c6a4cff40
4648e8bc6fdcbeee867270578afd1fd8f06a03ab
Sure, let me take a look.
djs55
4,941
moby/moby
42,067
Include VPNkit binary for arm64
Previously, VPNkit binary was installed only for amd64.
null
2021-02-24 04:38:39+00:00
2021-03-12 08:02:29+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.4.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
AkihiroSuda
4bbc52c04b83c4ab9c47f2404720fa0c6a4cff40
4648e8bc6fdcbeee867270578afd1fd8f06a03ab
Is this suitable: https://github.com/moby/vpnkit/releases/tag/v0.5.0
djs55
4,942
moby/moby
42,067
Include VPNkit binary for arm64
Previously, VPNkit binary was installed only for amd64.
null
2021-02-24 04:38:39+00:00
2021-03-12 08:02:29+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.4.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
AkihiroSuda
4bbc52c04b83c4ab9c47f2404720fa0c6a4cff40
4648e8bc6fdcbeee867270578afd1fd8f06a03ab
Thanks!!
thaJeztah
4,943
moby/moby
42,067
Include VPNkit binary for arm64
Previously, VPNkit binary was installed only for amd64.
null
2021-02-24 04:38:39+00:00
2021-03-12 08:02:29+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.4.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
AkihiroSuda
4bbc52c04b83c4ab9c47f2404720fa0c6a4cff40
4648e8bc6fdcbeee867270578afd1fd8f06a03ab
micro-nit; perhaps it's slightly clearer to add an empty line before this one to make it better stand out as "the vpnkit stage"
thaJeztah
4,944
moby/moby
42,067
Include VPNkit binary for arm64
Previously, VPNkit binary was installed only for amd64.
null
2021-02-24 04:38:39+00:00
2021-03-12 08:02:29+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.4.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
AkihiroSuda
4bbc52c04b83c4ab9c47f2404720fa0c6a4cff40
4648e8bc6fdcbeee867270578afd1fd8f06a03ab
❤️ nice
thaJeztah
4,945
moby/moby
42,067
Include VPNkit binary for arm64
Previously, VPNkit binary was installed only for amd64.
null
2021-02-24 04:38:39+00:00
2021-03-12 08:02:29+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.4.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
AkihiroSuda
4bbc52c04b83c4ab9c47f2404720fa0c6a4cff40
4648e8bc6fdcbeee867270578afd1fd8f06a03ab
Could you push `:0.5.0` to Docker Hub as well
AkihiroSuda
4,946
moby/moby
42,067
Include VPNkit binary for arm64
Previously, VPNkit binary was installed only for amd64.
null
2021-02-24 04:38:39+00:00
2021-03-12 08:02:29+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.4.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
AkihiroSuda
4bbc52c04b83c4ab9c47f2404720fa0c6a4cff40
4648e8bc6fdcbeee867270578afd1fd8f06a03ab
Pushed `vpnkit:0.5.0` to Docker Hub (sorry, I forgot to do this yesterday!)
djs55
4,947
moby/moby
42,067
Include VPNkit binary for arm64
Previously, VPNkit binary was installed only for amd64.
null
2021-02-24 04:38:39+00:00
2021-03-12 08:02:29+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.4.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
AkihiroSuda
4bbc52c04b83c4ab9c47f2404720fa0c6a4cff40
4648e8bc6fdcbeee867270578afd1fd8f06a03ab
thanks!
AkihiroSuda
4,948
moby/moby
42,067
Include VPNkit binary for arm64
Previously, VPNkit binary was installed only for amd64.
null
2021-02-24 04:38:39+00:00
2021-03-12 08:02:29+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.4.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
AkihiroSuda
4bbc52c04b83c4ab9c47f2404720fa0c6a4cff40
4648e8bc6fdcbeee867270578afd1fd8f06a03ab
Why are we putting multiple binaries into a stage, including binary for wrong arch? Just the one for the target platform should suffice.
tonistiigi
4,949
moby/moby
42,067
Include VPNkit binary for arm64
Previously, VPNkit binary was installed only for amd64.
null
2021-02-24 04:38:39+00:00
2021-03-12 08:02:29+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.4.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /vpnkit /usr/local/bin/vpnkit.x86_64 WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
AkihiroSuda
4bbc52c04b83c4ab9c47f2404720fa0c6a4cff40
4648e8bc6fdcbeee867270578afd1fd8f06a03ab
Ideally we could write this like `COPY --from=vpnkit-${TARGETPLATFORM} /vpnkit /build/vpnkit`, but we can't, because `djs55/vpnkit:0.5.0` only contains `amd64` and `arm64` manifests.
AkihiroSuda
4,950
moby/moby
42,064
API: add "Swarm" header to _ping endpoint
### depends on https://github.com/moby/moby/pull/42063 This adds an additional "Swarm" header to the _ping endpoint response, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. This change is not versioned in the API, and will be returned irregardless of the API version that is used. Clients should fall back to using other endpoints to get this information if the header is not present. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown The `GET /_ping` and `HEAD /_ping` API endpoints now return a `Swarm` header, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-23 17:39:52+00:00
2022-03-26 13:39:50+00:00
api/server/router/system/system_routes.go
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } w.Header().Set("Swarm", s.swarmStatus()) if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) swarmStatus() string { if s.cluster != nil { if p, ok := s.cluster.(StatusProvider); ok { return p.Status() } } return string(swarm.LocalNodeStateInactive) } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
thaJeztah
070726194d3e29f19697ffa954234df38ffa7ec4
aaf70b5c6b1dcce784cfb70bc7be28c552a88da7
This seems expensive to call for `ping`, which is easily called multiple times by the official client for a single action.
cpuguy83
4,951
moby/moby
42,064
API: add "Swarm" header to _ping endpoint
### depends on https://github.com/moby/moby/pull/42063 This adds an additional "Swarm" header to the _ping endpoint response, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. This change is not versioned in the API, and will be returned irregardless of the API version that is used. Clients should fall back to using other endpoints to get this information if the header is not present. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown The `GET /_ping` and `HEAD /_ping` API endpoints now return a `Swarm` header, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-23 17:39:52+00:00
2022-03-26 13:39:50+00:00
api/server/router/system/system_routes.go
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } w.Header().Set("Swarm", s.swarmStatus()) if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) swarmStatus() string { if s.cluster != nil { if p, ok := s.cluster.(StatusProvider); ok { return p.Status() } } return string(swarm.LocalNodeStateInactive) } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
thaJeztah
070726194d3e29f19697ffa954234df38ffa7ec4
aaf70b5c6b1dcce784cfb70bc7be28c552a88da7
I think `ControlAvailable` means it is the current leader, not if it is a manager (outside of being the leader).
cpuguy83
4,952
moby/moby
42,064
API: add "Swarm" header to _ping endpoint
### depends on https://github.com/moby/moby/pull/42063 This adds an additional "Swarm" header to the _ping endpoint response, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. This change is not versioned in the API, and will be returned irregardless of the API version that is used. Clients should fall back to using other endpoints to get this information if the header is not present. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown The `GET /_ping` and `HEAD /_ping` API endpoints now return a `Swarm` header, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-23 17:39:52+00:00
2022-03-26 13:39:50+00:00
api/server/router/system/system_routes.go
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } w.Header().Set("Swarm", s.swarmStatus()) if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) swarmStatus() string { if s.cluster != nil { if p, ok := s.cluster.(StatusProvider); ok { return p.Status() } } return string(swarm.LocalNodeStateInactive) } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
thaJeztah
070726194d3e29f19697ffa954234df38ffa7ec4
aaf70b5c6b1dcce784cfb70bc7be28c552a88da7
Yes, I was thinking about that as well, should probably have left a comment about that 🤔 I can look around if there's possibly a more lightweight approach to get the needed info (unless you have suggestions)
thaJeztah
4,953
moby/moby
42,064
API: add "Swarm" header to _ping endpoint
### depends on https://github.com/moby/moby/pull/42063 This adds an additional "Swarm" header to the _ping endpoint response, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. This change is not versioned in the API, and will be returned irregardless of the API version that is used. Clients should fall back to using other endpoints to get this information if the header is not present. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown The `GET /_ping` and `HEAD /_ping` API endpoints now return a `Swarm` header, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-23 17:39:52+00:00
2022-03-26 13:39:50+00:00
api/server/router/system/system_routes.go
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } w.Header().Set("Swarm", s.swarmStatus()) if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) swarmStatus() string { if s.cluster != nil { if p, ok := s.cluster.(StatusProvider); ok { return p.Status() } } return string(swarm.LocalNodeStateInactive) } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
thaJeztah
070726194d3e29f19697ffa954234df38ffa7ec4
aaf70b5c6b1dcce784cfb70bc7be28c552a88da7
A dedicated function to get just what we need seems ideal. Specifically for stuff like this that can change and as such requires locking I would probably set a value much like we do for the container state counters for `docker info`.
cpuguy83
4,954
moby/moby
42,064
API: add "Swarm" header to _ping endpoint
### depends on https://github.com/moby/moby/pull/42063 This adds an additional "Swarm" header to the _ping endpoint response, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. This change is not versioned in the API, and will be returned irregardless of the API version that is used. Clients should fall back to using other endpoints to get this information if the header is not present. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown The `GET /_ping` and `HEAD /_ping` API endpoints now return a `Swarm` header, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-23 17:39:52+00:00
2022-03-26 13:39:50+00:00
api/server/router/system/system_routes.go
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } w.Header().Set("Swarm", s.swarmStatus()) if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) swarmStatus() string { if s.cluster != nil { if p, ok := s.cluster.(StatusProvider); ok { return p.Status() } } return string(swarm.LocalNodeStateInactive) } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
thaJeztah
070726194d3e29f19697ffa954234df38ffa7ec4
aaf70b5c6b1dcce784cfb70bc7be28c552a88da7
Hm.. you may be right; https://github.com/moby/moby/blob/46cdcd206c56172b95ba5c77b827a722dab426c5/daemon/cluster/noderunner.go#L388-L396
thaJeztah
4,955
moby/moby
42,064
API: add "Swarm" header to _ping endpoint
### depends on https://github.com/moby/moby/pull/42063 This adds an additional "Swarm" header to the _ping endpoint response, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. This change is not versioned in the API, and will be returned irregardless of the API version that is used. Clients should fall back to using other endpoints to get this information if the header is not present. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown The `GET /_ping` and `HEAD /_ping` API endpoints now return a `Swarm` header, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-23 17:39:52+00:00
2022-03-26 13:39:50+00:00
api/server/router/system/system_routes.go
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } w.Header().Set("Swarm", s.swarmStatus()) if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) swarmStatus() string { if s.cluster != nil { if p, ok := s.cluster.(StatusProvider); ok { return p.Status() } } return string(swarm.LocalNodeStateInactive) } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
thaJeztah
070726194d3e29f19697ffa954234df38ffa7ec4
aaf70b5c6b1dcce784cfb70bc7be28c552a88da7
Oh! That's slightly different, but I think `controlClient` would match that. Let me dig a bit further
thaJeztah
4,956
moby/moby
42,064
API: add "Swarm" header to _ping endpoint
### depends on https://github.com/moby/moby/pull/42063 This adds an additional "Swarm" header to the _ping endpoint response, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. This change is not versioned in the API, and will be returned irregardless of the API version that is used. Clients should fall back to using other endpoints to get this information if the header is not present. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown The `GET /_ping` and `HEAD /_ping` API endpoints now return a `Swarm` header, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-23 17:39:52+00:00
2022-03-26 13:39:50+00:00
api/server/router/system/system_routes.go
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
package system // import "github.com/docker/docker/api/server/router/system" import ( "context" "encoding/json" "fmt" "net/http" "time" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/server/router/build" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" timetypes "github.com/docker/docker/api/types/time" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/pkg/ioutils" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.WriteHeader(http.StatusOK) return nil } func (s *systemRouter) pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate") w.Header().Add("Pragma", "no-cache") builderVersion := build.BuilderVersion(*s.features) if bv := builderVersion; bv != "" { w.Header().Set("Builder-Version", string(bv)) } w.Header().Set("Swarm", s.swarmStatus()) if r.Method == http.MethodHead { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Length", "0") return nil } _, err := w.Write([]byte{'O', 'K'}) return err } func (s *systemRouter) swarmStatus() string { if s.cluster != nil { if p, ok := s.cluster.(StatusProvider); ok { return p.Status() } } return string(swarm.LocalNodeStateInactive) } func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemInfo() if s.cluster != nil { info.Swarm = s.cluster.Info() info.Warnings = append(info.Warnings, info.Swarm.Warnings...) } version := httputils.VersionFromContext(ctx) if versions.LessThan(version, "1.25") { // TODO: handle this conversion in engine-api type oldInfo struct { *types.Info ExecutionDriver string } old := &oldInfo{ Info: info, ExecutionDriver: "<not supported>", } nameOnlySecurityOptions := []string{} kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) if err != nil { return err } for _, s := range kvSecOpts { nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) } old.SecurityOptions = nameOnlySecurityOptions return httputils.WriteJSON(w, http.StatusOK, old) } if versions.LessThan(version, "1.39") { if info.KernelVersion == "" { info.KernelVersion = "<unknown>" } if info.OperatingSystem == "" { info.OperatingSystem = "<unknown>" } } if versions.GreaterThanOrEqualTo(version, "1.42") { info.KernelMemory = false } return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { info := s.backend.SystemVersion() return httputils.WriteJSON(w, http.StatusOK, info) } func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { getContainers, getImages, getVolumes, getBuildCache = true, true, true, true } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { case types.ContainerObject: getContainers = true case types.ImageObject: getImages = true case types.VolumeObject: getVolumes = true case types.BuildCacheObject: getBuildCache = true default: return invalidRequestError{Err: fmt.Errorf("unknown object type: %s", typ)} } } } eg, ctx := errgroup.WithContext(ctx) var systemDiskUsage *types.DiskUsage if getContainers || getImages || getVolumes { eg.Go(func() error { var err error systemDiskUsage, err = s.backend.SystemDiskUsage(ctx, DiskUsageOptions{ Containers: getContainers, Images: getImages, Volumes: getVolumes, }) return err }) } var buildCache []*types.BuildCache if getBuildCache { eg.Go(func() error { var err error buildCache, err = s.builder.DiskUsage(ctx) if err != nil { return errors.Wrap(err, "error getting build cache usage") } if buildCache == nil { // Ensure empty `BuildCache` field is represented as empty JSON array(`[]`) // instead of `null` to be consistent with `Images`, `Containers` etc. buildCache = []*types.BuildCache{} } return nil }) } if err := eg.Wait(); err != nil { return err } var builderSize int64 if versions.LessThan(version, "1.42") { for _, b := range buildCache { builderSize += b.Size } } du := types.DiskUsage{ BuildCache: buildCache, BuilderSize: builderSize, } if systemDiskUsage != nil { du.LayersSize = systemDiskUsage.LayersSize du.Images = systemDiskUsage.Images du.Containers = systemDiskUsage.Containers du.Volumes = systemDiskUsage.Volumes } return httputils.WriteJSON(w, http.StatusOK, du) } type invalidRequestError struct { Err error } func (e invalidRequestError) Error() string { return e.Err.Error() } func (e invalidRequestError) InvalidParameter() {} func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timer := time.NewTimer(dur) defer timer.Stop() timeout = timer.C } } ef, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } } func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { var config *types.AuthConfig err := json.NewDecoder(r.Body).Decode(&config) r.Body.Close() if err != nil { return err } status, token, err := s.backend.AuthenticateToRegistry(ctx, config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &registry.AuthenticateOKBody{ Status: status, IdentityToken: token, }) } func eventTime(formTime string) (time.Time, error) { t, tNano, err := timetypes.ParseTimestamps(formTime, -1) if err != nil { return time.Time{}, err } if t == -1 { return time.Time{}, nil } return time.Unix(t, tNano), nil }
thaJeztah
070726194d3e29f19697ffa954234df38ffa7ec4
aaf70b5c6b1dcce784cfb70bc7be28c552a88da7
Right, so this is the `cluster.Info()` (which is used to propagate the `docker info` data); https://github.com/moby/moby/blob/46cdcd206c56172b95ba5c77b827a722dab426c5/daemon/cluster/swarm.go#L447-L454 And looks like that's the field that's used by the CLI to print if the node is a Manager; https://github.com/docker/cli/blob/88c6089300a82d3373892adf6845a4fed1a4ba8d/cli/command/system/info.go#L311-L319 ```go func printSwarmInfo(dockerCli command.Cli, info types.Info) { if info.Swarm.LocalNodeState == swarm.LocalNodeStateInactive || info.Swarm.LocalNodeState == swarm.LocalNodeStateLocked { return } fmt.Fprintln(dockerCli.Out(), " NodeID:", info.Swarm.NodeID) if info.Swarm.Error != "" { fmt.Fprintln(dockerCli.Out(), " Error:", info.Swarm.Error) } fmt.Fprintln(dockerCli.Out(), " Is Manager:", info.Swarm.ControlAvailable) ```
thaJeztah
4,957
moby/moby
42,064
API: add "Swarm" header to _ping endpoint
### depends on https://github.com/moby/moby/pull/42063 This adds an additional "Swarm" header to the _ping endpoint response, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. This change is not versioned in the API, and will be returned irregardless of the API version that is used. Clients should fall back to using other endpoints to get this information if the header is not present. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown The `GET /_ping` and `HEAD /_ping` API endpoints now return a `Swarm` header, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-23 17:39:52+00:00
2022-03-26 13:39:50+00:00
client/ping.go
package client // import "github.com/docker/docker/client" import ( "context" "net/http" "path" "github.com/docker/docker/api/types" "github.com/docker/docker/errdefs" ) // Ping pings the server and returns the value of the "Docker-Experimental", // "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use // a HEAD request on the endpoint, but falls back to GET if HEAD is not supported // by the daemon. func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() // because ping requests are used during API version negotiation, so we want // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } serverResp, err := cli.doRequest(ctx, req) if err == nil { defer ensureReaderClosed(serverResp) switch serverResp.statusCode { case http.StatusOK, http.StatusInternalServerError: // Server handled the request, so parse the response return parsePingResponse(cli, serverResp) } } else if IsErrConnectionFailed(err) { return ping, err } req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } serverResp, err = cli.doRequest(ctx, req) defer ensureReaderClosed(serverResp) if err != nil { return ping, err } return parsePingResponse(cli, serverResp) } func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { var ping types.Ping if resp.header == nil { err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) } ping.APIVersion = resp.header.Get("API-Version") ping.OSType = resp.header.Get("OSType") if resp.header.Get("Docker-Experimental") == "true" { ping.Experimental = true } if bv := resp.header.Get("Builder-Version"); bv != "" { ping.BuilderVersion = types.BuilderVersion(bv) } err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) }
package client // import "github.com/docker/docker/client" import ( "context" "net/http" "path" "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/errdefs" ) // Ping pings the server and returns the value of the "Docker-Experimental", // "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use // a HEAD request on the endpoint, but falls back to GET if HEAD is not supported // by the daemon. func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() // because ping requests are used during API version negotiation, so we want // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } serverResp, err := cli.doRequest(ctx, req) if err == nil { defer ensureReaderClosed(serverResp) switch serverResp.statusCode { case http.StatusOK, http.StatusInternalServerError: // Server handled the request, so parse the response return parsePingResponse(cli, serverResp) } } else if IsErrConnectionFailed(err) { return ping, err } req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } serverResp, err = cli.doRequest(ctx, req) defer ensureReaderClosed(serverResp) if err != nil { return ping, err } return parsePingResponse(cli, serverResp) } func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { var ping types.Ping if resp.header == nil { err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) } ping.APIVersion = resp.header.Get("API-Version") ping.OSType = resp.header.Get("OSType") if resp.header.Get("Docker-Experimental") == "true" { ping.Experimental = true } if bv := resp.header.Get("Builder-Version"); bv != "" { ping.BuilderVersion = types.BuilderVersion(bv) } if si := resp.header.Get("Swarm"); si != "" { parts := strings.SplitN(si, "/", 2) ping.SwarmStatus = &swarm.Status{ NodeState: swarm.LocalNodeState(parts[0]), ControlAvailable: len(parts) == 2 && parts[1] == "manager", } } err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) }
thaJeztah
070726194d3e29f19697ffa954234df38ffa7ec4
aaf70b5c6b1dcce784cfb70bc7be28c552a88da7
This seems a little bit fiddly -- we're essentially parsing what's effectively a human-readable string back into separate parts? :grimacing:
tianon
4,958
moby/moby
42,064
API: add "Swarm" header to _ping endpoint
### depends on https://github.com/moby/moby/pull/42063 This adds an additional "Swarm" header to the _ping endpoint response, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. This change is not versioned in the API, and will be returned irregardless of the API version that is used. Clients should fall back to using other endpoints to get this information if the header is not present. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown The `GET /_ping` and `HEAD /_ping` API endpoints now return a `Swarm` header, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-23 17:39:52+00:00
2022-03-26 13:39:50+00:00
client/ping.go
package client // import "github.com/docker/docker/client" import ( "context" "net/http" "path" "github.com/docker/docker/api/types" "github.com/docker/docker/errdefs" ) // Ping pings the server and returns the value of the "Docker-Experimental", // "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use // a HEAD request on the endpoint, but falls back to GET if HEAD is not supported // by the daemon. func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() // because ping requests are used during API version negotiation, so we want // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } serverResp, err := cli.doRequest(ctx, req) if err == nil { defer ensureReaderClosed(serverResp) switch serverResp.statusCode { case http.StatusOK, http.StatusInternalServerError: // Server handled the request, so parse the response return parsePingResponse(cli, serverResp) } } else if IsErrConnectionFailed(err) { return ping, err } req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } serverResp, err = cli.doRequest(ctx, req) defer ensureReaderClosed(serverResp) if err != nil { return ping, err } return parsePingResponse(cli, serverResp) } func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { var ping types.Ping if resp.header == nil { err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) } ping.APIVersion = resp.header.Get("API-Version") ping.OSType = resp.header.Get("OSType") if resp.header.Get("Docker-Experimental") == "true" { ping.Experimental = true } if bv := resp.header.Get("Builder-Version"); bv != "" { ping.BuilderVersion = types.BuilderVersion(bv) } err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) }
package client // import "github.com/docker/docker/client" import ( "context" "net/http" "path" "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/errdefs" ) // Ping pings the server and returns the value of the "Docker-Experimental", // "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use // a HEAD request on the endpoint, but falls back to GET if HEAD is not supported // by the daemon. func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() // because ping requests are used during API version negotiation, so we want // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } serverResp, err := cli.doRequest(ctx, req) if err == nil { defer ensureReaderClosed(serverResp) switch serverResp.statusCode { case http.StatusOK, http.StatusInternalServerError: // Server handled the request, so parse the response return parsePingResponse(cli, serverResp) } } else if IsErrConnectionFailed(err) { return ping, err } req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } serverResp, err = cli.doRequest(ctx, req) defer ensureReaderClosed(serverResp) if err != nil { return ping, err } return parsePingResponse(cli, serverResp) } func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { var ping types.Ping if resp.header == nil { err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) } ping.APIVersion = resp.header.Get("API-Version") ping.OSType = resp.header.Get("OSType") if resp.header.Get("Docker-Experimental") == "true" { ping.Experimental = true } if bv := resp.header.Get("Builder-Version"); bv != "" { ping.BuilderVersion = types.BuilderVersion(bv) } if si := resp.header.Get("Swarm"); si != "" { parts := strings.SplitN(si, "/", 2) ping.SwarmStatus = &swarm.Status{ NodeState: swarm.LocalNodeState(parts[0]), ControlAvailable: len(parts) == 2 && parts[1] == "manager", } } err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) }
thaJeztah
070726194d3e29f19697ffa954234df38ffa7ec4
aaf70b5c6b1dcce784cfb70bc7be28c552a88da7
Yes, agreed. I've been going back-and-forth on what the best approach would be; we could make the format itself more "parseable" (`<status>/<role>`) or two separate headers (I wasn't sure about the separate headers, because we won't be able to set the `role` until the status is also `available` (IIRC).
thaJeztah
4,959
moby/moby
42,064
API: add "Swarm" header to _ping endpoint
### depends on https://github.com/moby/moby/pull/42063 This adds an additional "Swarm" header to the _ping endpoint response, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. This change is not versioned in the API, and will be returned irregardless of the API version that is used. Clients should fall back to using other endpoints to get this information if the header is not present. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown The `GET /_ping` and `HEAD /_ping` API endpoints now return a `Swarm` header, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-23 17:39:52+00:00
2022-03-26 13:39:50+00:00
daemon/cluster/swarm.go
package cluster // import "github.com/docker/docker/daemon/cluster" import ( "context" "fmt" "net" "strings" "time" apitypes "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" types "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/daemon/cluster/convert" "github.com/docker/docker/errdefs" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/stack" swarmapi "github.com/docker/swarmkit/api" "github.com/docker/swarmkit/manager/encryption" swarmnode "github.com/docker/swarmkit/node" "github.com/pkg/errors" "github.com/sirupsen/logrus" "google.golang.org/grpc" ) // Init initializes new cluster from user provided request. func (c *Cluster) Init(req types.InitRequest) (string, error) { c.controlMutex.Lock() defer c.controlMutex.Unlock() if c.nr != nil { if req.ForceNewCluster { // Take c.mu temporarily to wait for presently running // API handlers to finish before shutting down the node. c.mu.Lock() if !c.nr.nodeState.IsManager() { c.mu.Unlock() return "", errSwarmNotManager } c.mu.Unlock() if err := c.nr.Stop(); err != nil { return "", err } } else { return "", errSwarmExists } } if err := validateAndSanitizeInitRequest(&req); err != nil { return "", errdefs.InvalidParameter(err) } listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) if err != nil { return "", err } advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) if err != nil { return "", err } dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) if err != nil { return "", err } localAddr := listenHost // If the local address is undetermined, the advertise address // will be used as local address, if it belongs to this system. // If the advertise address is not local, then we try to find // a system address to use as local address. If this fails, // we give up and ask the user to pass the listen address. if net.ParseIP(localAddr).IsUnspecified() { advertiseIP := net.ParseIP(advertiseHost) found := false for _, systemIP := range listSystemIPs() { if systemIP.Equal(advertiseIP) { localAddr = advertiseIP.String() found = true break } } if !found { ip, err := c.resolveSystemAddr() if err != nil { logrus.Warnf("Could not find a local address: %v", err) return "", errMustSpecifyListenAddr } localAddr = ip.String() } } if err := validateDefaultAddrPool(req.DefaultAddrPool, req.SubnetSize); err != nil { return "", err } port, err := getDataPathPort(req.DataPathPort) if err != nil { return "", err } nr, err := c.newNodeRunner(nodeStartConfig{ forceNewCluster: req.ForceNewCluster, autolock: req.AutoLockManagers, LocalAddr: localAddr, ListenAddr: net.JoinHostPort(listenHost, listenPort), AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort), DataPathAddr: dataPathAddr, DefaultAddressPool: req.DefaultAddrPool, SubnetSize: req.SubnetSize, availability: req.Availability, DataPathPort: port, }) if err != nil { return "", err } c.mu.Lock() c.nr = nr c.mu.Unlock() if err := <-nr.Ready(); err != nil { c.mu.Lock() c.nr = nil c.mu.Unlock() if !req.ForceNewCluster { // if failure on first attempt don't keep state if err := clearPersistentState(c.root); err != nil { return "", err } } return "", err } state := nr.State() if state.swarmNode == nil { // should never happen but protect from panic return "", errors.New("invalid cluster state for spec initialization") } if err := initClusterSpec(state.swarmNode, req.Spec); err != nil { return "", err } return state.NodeID(), nil } // Join makes current Cluster part of an existing swarm cluster. func (c *Cluster) Join(req types.JoinRequest) error { c.controlMutex.Lock() defer c.controlMutex.Unlock() c.mu.Lock() if c.nr != nil { c.mu.Unlock() return errors.WithStack(errSwarmExists) } c.mu.Unlock() if err := validateAndSanitizeJoinRequest(&req); err != nil { return errdefs.InvalidParameter(err) } listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) if err != nil { return err } var advertiseAddr string if req.AdvertiseAddr != "" { advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) // For joining, we don't need to provide an advertise address, // since the remote side can detect it. if err == nil { advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort) } } dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) if err != nil { return err } nr, err := c.newNodeRunner(nodeStartConfig{ RemoteAddr: req.RemoteAddrs[0], ListenAddr: net.JoinHostPort(listenHost, listenPort), AdvertiseAddr: advertiseAddr, DataPathAddr: dataPathAddr, joinAddr: req.RemoteAddrs[0], joinToken: req.JoinToken, availability: req.Availability, }) if err != nil { return err } c.mu.Lock() c.nr = nr c.mu.Unlock() timeout := time.NewTimer(swarmConnectTimeout) defer timeout.Stop() select { case <-timeout.C: return errSwarmJoinTimeoutReached case err := <-nr.Ready(): if err != nil { c.mu.Lock() c.nr = nil c.mu.Unlock() if err := clearPersistentState(c.root); err != nil { return err } } return err } } // Inspect retrieves the configuration properties of a managed swarm cluster. func (c *Cluster) Inspect() (types.Swarm, error) { var swarm types.Swarm if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { s, err := c.inspect(ctx, state) if err != nil { return err } swarm = s return nil }); err != nil { return types.Swarm{}, err } return swarm, nil } func (c *Cluster) inspect(ctx context.Context, state nodeState) (types.Swarm, error) { s, err := getSwarm(ctx, state.controlClient) if err != nil { return types.Swarm{}, err } return convert.SwarmFromGRPC(*s), nil } // Update updates configuration of a managed swarm cluster. func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error { return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { swarm, err := getSwarm(ctx, state.controlClient) if err != nil { return err } // Validate spec name. if spec.Annotations.Name == "" { spec.Annotations.Name = "default" } else if spec.Annotations.Name != "default" { return errdefs.InvalidParameter(errors.New(`swarm spec must be named "default"`)) } // In update, client should provide the complete spec of the swarm, including // Name and Labels. If a field is specified with 0 or nil, then the default value // will be used to swarmkit. clusterSpec, err := convert.SwarmSpecToGRPC(spec) if err != nil { return errdefs.InvalidParameter(err) } _, err = state.controlClient.UpdateCluster( ctx, &swarmapi.UpdateClusterRequest{ ClusterID: swarm.ID, Spec: &clusterSpec, ClusterVersion: &swarmapi.Version{ Index: version, }, Rotation: swarmapi.KeyRotation{ WorkerJoinToken: flags.RotateWorkerToken, ManagerJoinToken: flags.RotateManagerToken, ManagerUnlockKey: flags.RotateManagerUnlockKey, }, }, ) return err }) } // GetUnlockKey returns the unlock key for the swarm. func (c *Cluster) GetUnlockKey() (string, error) { var resp *swarmapi.GetUnlockKeyResponse if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { client := swarmapi.NewCAClient(state.grpcConn) r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{}) if err != nil { return err } resp = r return nil }); err != nil { return "", err } if len(resp.UnlockKey) == 0 { // no key return "", nil } return encryption.HumanReadableKey(resp.UnlockKey), nil } // UnlockSwarm provides a key to decrypt data that is encrypted at rest. func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { c.controlMutex.Lock() defer c.controlMutex.Unlock() c.mu.RLock() state := c.currentNodeState() if !state.IsActiveManager() { // when manager is not active, // unless it is locked, otherwise return error. if err := c.errNoManager(state); err != errSwarmLocked { c.mu.RUnlock() return err } } else { // when manager is active, return an error of "not locked" c.mu.RUnlock() return notLockedError{} } // only when swarm is locked, code running reaches here nr := c.nr c.mu.RUnlock() key, err := encryption.ParseHumanReadableKey(req.UnlockKey) if err != nil { return errdefs.InvalidParameter(err) } config := nr.config config.lockKey = key if err := nr.Stop(); err != nil { return err } nr, err = c.newNodeRunner(config) if err != nil { return err } c.mu.Lock() c.nr = nr c.mu.Unlock() if err := <-nr.Ready(); err != nil { if errors.Is(err, errSwarmLocked) { return invalidUnlockKey{} } return errors.Errorf("swarm component could not be started: %v", err) } return nil } // Leave shuts down Cluster and removes current state. func (c *Cluster) Leave(force bool) error { c.controlMutex.Lock() defer c.controlMutex.Unlock() c.mu.Lock() nr := c.nr if nr == nil { c.mu.Unlock() return errors.WithStack(errNoSwarm) } state := c.currentNodeState() c.mu.Unlock() if errors.Is(state.err, errSwarmLocked) && !force { // leave a locked swarm without --force is not allowed return errors.WithStack(notAvailableError("Swarm is encrypted and locked. Please unlock it first or use `--force` to ignore this message.")) } if state.IsManager() && !force { msg := "You are attempting to leave the swarm on a node that is participating as a manager. " if state.IsActiveManager() { active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) if err == nil { if active && removingManagerCausesLossOfQuorum(reachable, unreachable) { if isLastManager(reachable, unreachable) { msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " return errors.WithStack(notAvailableError(msg)) } msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) } } } else { msg += "Doing so may lose the consensus of your cluster. " } msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." return errors.WithStack(notAvailableError(msg)) } // release readers in here if err := nr.Stop(); err != nil { logrus.Errorf("failed to shut down cluster node: %v", err) stack.Dump() return err } c.mu.Lock() c.nr = nil c.mu.Unlock() if nodeID := state.NodeID(); nodeID != "" { nodeContainers, err := c.listContainerForNode(nodeID) if err != nil { return err } for _, id := range nodeContainers { if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { logrus.Errorf("error removing %v: %v", id, err) } } } // todo: cleanup optional? if err := clearPersistentState(c.root); err != nil { return err } c.config.Backend.DaemonLeavesCluster() return nil } // Info returns information about the current cluster state. func (c *Cluster) Info() types.Info { info := types.Info{ NodeAddr: c.GetAdvertiseAddress(), } c.mu.RLock() defer c.mu.RUnlock() state := c.currentNodeState() info.LocalNodeState = state.status if state.err != nil { info.Error = state.err.Error() } ctx, cancel := c.getRequestContext() defer cancel() if state.IsActiveManager() { info.ControlAvailable = true swarm, err := c.inspect(ctx, state) if err != nil { info.Error = err.Error() } info.Cluster = &swarm.ClusterInfo if r, err := state.controlClient.ListNodes( ctx, &swarmapi.ListNodesRequest{}, grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse), ); err != nil { info.Error = err.Error() } else { info.Nodes = len(r.Nodes) for _, n := range r.Nodes { if n.ManagerStatus != nil { info.Managers = info.Managers + 1 } } } switch info.LocalNodeState { case types.LocalNodeStateInactive, types.LocalNodeStateLocked, types.LocalNodeStateError: // nothing to do default: if info.Managers == 2 { const warn string = `WARNING: Running Swarm in a two-manager configuration. This configuration provides no fault tolerance, and poses a high risk to lose control over the cluster. Refer to https://docs.docker.com/engine/swarm/admin_guide/ to configure the Swarm for fault-tolerance.` info.Warnings = append(info.Warnings, warn) } } } if state.swarmNode != nil { for _, r := range state.swarmNode.Remotes() { info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) } info.NodeID = state.swarmNode.NodeID() } return info } func validateAndSanitizeInitRequest(req *types.InitRequest) error { var err error req.ListenAddr, err = validateAddr(req.ListenAddr) if err != nil { return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) } if req.Spec.Annotations.Name == "" { req.Spec.Annotations.Name = "default" } else if req.Spec.Annotations.Name != "default" { return errors.New(`swarm spec must be named "default"`) } return nil } func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { var err error req.ListenAddr, err = validateAddr(req.ListenAddr) if err != nil { return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) } if len(req.RemoteAddrs) == 0 { return errors.New("at least 1 RemoteAddr is required to join") } for i := range req.RemoteAddrs { req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) if err != nil { return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) } } return nil } func validateAddr(addr string) (string, error) { if addr == "" { return addr, errors.New("invalid empty address") } newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) if err != nil { return addr, nil } return strings.TrimPrefix(newaddr, "tcp://"), nil } func initClusterSpec(node *swarmnode.Node, spec types.Spec) error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() for conn := range node.ListenControlSocket(ctx) { if ctx.Err() != nil { return ctx.Err() } if conn != nil { client := swarmapi.NewControlClient(conn) var cluster *swarmapi.Cluster for i := 0; ; i++ { lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) if err != nil { return fmt.Errorf("error on listing clusters: %v", err) } if len(lcr.Clusters) == 0 { if i < 10 { time.Sleep(200 * time.Millisecond) continue } return errors.New("empty list of clusters was returned") } cluster = lcr.Clusters[0] break } // In init, we take the initial default values from swarmkit, and merge // any non nil or 0 value from spec to GRPC spec. This will leave the // default value alone. // Note that this is different from Update(), as in Update() we expect // user to specify the complete spec of the cluster (as they already know // the existing one and knows which field to update) clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec) if err != nil { return fmt.Errorf("error updating cluster settings: %v", err) } _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ ClusterID: cluster.ID, ClusterVersion: &cluster.Meta.Version, Spec: &clusterSpec, }) if err != nil { return fmt.Errorf("error updating cluster settings: %v", err) } return nil } } return ctx.Err() } func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { var ids []string filters := filters.NewArgs() filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ Filters: filters, }) if err != nil { return []string{}, err } for _, c := range containers { ids = append(ids, c.ID) } return ids, nil }
package cluster // import "github.com/docker/docker/daemon/cluster" import ( "context" "fmt" "net" "strings" "time" apitypes "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" types "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/daemon/cluster/convert" "github.com/docker/docker/errdefs" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/stack" swarmapi "github.com/docker/swarmkit/api" "github.com/docker/swarmkit/manager/encryption" swarmnode "github.com/docker/swarmkit/node" "github.com/pkg/errors" "github.com/sirupsen/logrus" "google.golang.org/grpc" ) // Init initializes new cluster from user provided request. func (c *Cluster) Init(req types.InitRequest) (string, error) { c.controlMutex.Lock() defer c.controlMutex.Unlock() if c.nr != nil { if req.ForceNewCluster { // Take c.mu temporarily to wait for presently running // API handlers to finish before shutting down the node. c.mu.Lock() if !c.nr.nodeState.IsManager() { c.mu.Unlock() return "", errSwarmNotManager } c.mu.Unlock() if err := c.nr.Stop(); err != nil { return "", err } } else { return "", errSwarmExists } } if err := validateAndSanitizeInitRequest(&req); err != nil { return "", errdefs.InvalidParameter(err) } listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) if err != nil { return "", err } advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) if err != nil { return "", err } dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) if err != nil { return "", err } localAddr := listenHost // If the local address is undetermined, the advertise address // will be used as local address, if it belongs to this system. // If the advertise address is not local, then we try to find // a system address to use as local address. If this fails, // we give up and ask the user to pass the listen address. if net.ParseIP(localAddr).IsUnspecified() { advertiseIP := net.ParseIP(advertiseHost) found := false for _, systemIP := range listSystemIPs() { if systemIP.Equal(advertiseIP) { localAddr = advertiseIP.String() found = true break } } if !found { ip, err := c.resolveSystemAddr() if err != nil { logrus.Warnf("Could not find a local address: %v", err) return "", errMustSpecifyListenAddr } localAddr = ip.String() } } if err := validateDefaultAddrPool(req.DefaultAddrPool, req.SubnetSize); err != nil { return "", err } port, err := getDataPathPort(req.DataPathPort) if err != nil { return "", err } nr, err := c.newNodeRunner(nodeStartConfig{ forceNewCluster: req.ForceNewCluster, autolock: req.AutoLockManagers, LocalAddr: localAddr, ListenAddr: net.JoinHostPort(listenHost, listenPort), AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort), DataPathAddr: dataPathAddr, DefaultAddressPool: req.DefaultAddrPool, SubnetSize: req.SubnetSize, availability: req.Availability, DataPathPort: port, }) if err != nil { return "", err } c.mu.Lock() c.nr = nr c.mu.Unlock() if err := <-nr.Ready(); err != nil { c.mu.Lock() c.nr = nil c.mu.Unlock() if !req.ForceNewCluster { // if failure on first attempt don't keep state if err := clearPersistentState(c.root); err != nil { return "", err } } return "", err } state := nr.State() if state.swarmNode == nil { // should never happen but protect from panic return "", errors.New("invalid cluster state for spec initialization") } if err := initClusterSpec(state.swarmNode, req.Spec); err != nil { return "", err } return state.NodeID(), nil } // Join makes current Cluster part of an existing swarm cluster. func (c *Cluster) Join(req types.JoinRequest) error { c.controlMutex.Lock() defer c.controlMutex.Unlock() c.mu.Lock() if c.nr != nil { c.mu.Unlock() return errors.WithStack(errSwarmExists) } c.mu.Unlock() if err := validateAndSanitizeJoinRequest(&req); err != nil { return errdefs.InvalidParameter(err) } listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) if err != nil { return err } var advertiseAddr string if req.AdvertiseAddr != "" { advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) // For joining, we don't need to provide an advertise address, // since the remote side can detect it. if err == nil { advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort) } } dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) if err != nil { return err } nr, err := c.newNodeRunner(nodeStartConfig{ RemoteAddr: req.RemoteAddrs[0], ListenAddr: net.JoinHostPort(listenHost, listenPort), AdvertiseAddr: advertiseAddr, DataPathAddr: dataPathAddr, joinAddr: req.RemoteAddrs[0], joinToken: req.JoinToken, availability: req.Availability, }) if err != nil { return err } c.mu.Lock() c.nr = nr c.mu.Unlock() timeout := time.NewTimer(swarmConnectTimeout) defer timeout.Stop() select { case <-timeout.C: return errSwarmJoinTimeoutReached case err := <-nr.Ready(): if err != nil { c.mu.Lock() c.nr = nil c.mu.Unlock() if err := clearPersistentState(c.root); err != nil { return err } } return err } } // Inspect retrieves the configuration properties of a managed swarm cluster. func (c *Cluster) Inspect() (types.Swarm, error) { var swarm types.Swarm if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { s, err := c.inspect(ctx, state) if err != nil { return err } swarm = s return nil }); err != nil { return types.Swarm{}, err } return swarm, nil } func (c *Cluster) inspect(ctx context.Context, state nodeState) (types.Swarm, error) { s, err := getSwarm(ctx, state.controlClient) if err != nil { return types.Swarm{}, err } return convert.SwarmFromGRPC(*s), nil } // Update updates configuration of a managed swarm cluster. func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error { return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { swarm, err := getSwarm(ctx, state.controlClient) if err != nil { return err } // Validate spec name. if spec.Annotations.Name == "" { spec.Annotations.Name = "default" } else if spec.Annotations.Name != "default" { return errdefs.InvalidParameter(errors.New(`swarm spec must be named "default"`)) } // In update, client should provide the complete spec of the swarm, including // Name and Labels. If a field is specified with 0 or nil, then the default value // will be used to swarmkit. clusterSpec, err := convert.SwarmSpecToGRPC(spec) if err != nil { return errdefs.InvalidParameter(err) } _, err = state.controlClient.UpdateCluster( ctx, &swarmapi.UpdateClusterRequest{ ClusterID: swarm.ID, Spec: &clusterSpec, ClusterVersion: &swarmapi.Version{ Index: version, }, Rotation: swarmapi.KeyRotation{ WorkerJoinToken: flags.RotateWorkerToken, ManagerJoinToken: flags.RotateManagerToken, ManagerUnlockKey: flags.RotateManagerUnlockKey, }, }, ) return err }) } // GetUnlockKey returns the unlock key for the swarm. func (c *Cluster) GetUnlockKey() (string, error) { var resp *swarmapi.GetUnlockKeyResponse if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { client := swarmapi.NewCAClient(state.grpcConn) r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{}) if err != nil { return err } resp = r return nil }); err != nil { return "", err } if len(resp.UnlockKey) == 0 { // no key return "", nil } return encryption.HumanReadableKey(resp.UnlockKey), nil } // UnlockSwarm provides a key to decrypt data that is encrypted at rest. func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { c.controlMutex.Lock() defer c.controlMutex.Unlock() c.mu.RLock() state := c.currentNodeState() if !state.IsActiveManager() { // when manager is not active, // unless it is locked, otherwise return error. if err := c.errNoManager(state); err != errSwarmLocked { c.mu.RUnlock() return err } } else { // when manager is active, return an error of "not locked" c.mu.RUnlock() return notLockedError{} } // only when swarm is locked, code running reaches here nr := c.nr c.mu.RUnlock() key, err := encryption.ParseHumanReadableKey(req.UnlockKey) if err != nil { return errdefs.InvalidParameter(err) } config := nr.config config.lockKey = key if err := nr.Stop(); err != nil { return err } nr, err = c.newNodeRunner(config) if err != nil { return err } c.mu.Lock() c.nr = nr c.mu.Unlock() if err := <-nr.Ready(); err != nil { if errors.Is(err, errSwarmLocked) { return invalidUnlockKey{} } return errors.Errorf("swarm component could not be started: %v", err) } return nil } // Leave shuts down Cluster and removes current state. func (c *Cluster) Leave(force bool) error { c.controlMutex.Lock() defer c.controlMutex.Unlock() c.mu.Lock() nr := c.nr if nr == nil { c.mu.Unlock() return errors.WithStack(errNoSwarm) } state := c.currentNodeState() c.mu.Unlock() if errors.Is(state.err, errSwarmLocked) && !force { // leave a locked swarm without --force is not allowed return errors.WithStack(notAvailableError("Swarm is encrypted and locked. Please unlock it first or use `--force` to ignore this message.")) } if state.IsManager() && !force { msg := "You are attempting to leave the swarm on a node that is participating as a manager. " if state.IsActiveManager() { active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) if err == nil { if active && removingManagerCausesLossOfQuorum(reachable, unreachable) { if isLastManager(reachable, unreachable) { msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " return errors.WithStack(notAvailableError(msg)) } msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) } } } else { msg += "Doing so may lose the consensus of your cluster. " } msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." return errors.WithStack(notAvailableError(msg)) } // release readers in here if err := nr.Stop(); err != nil { logrus.Errorf("failed to shut down cluster node: %v", err) stack.Dump() return err } c.mu.Lock() c.nr = nil c.mu.Unlock() if nodeID := state.NodeID(); nodeID != "" { nodeContainers, err := c.listContainerForNode(nodeID) if err != nil { return err } for _, id := range nodeContainers { if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { logrus.Errorf("error removing %v: %v", id, err) } } } // todo: cleanup optional? if err := clearPersistentState(c.root); err != nil { return err } c.config.Backend.DaemonLeavesCluster() return nil } // Info returns information about the current cluster state. func (c *Cluster) Info() types.Info { info := types.Info{ NodeAddr: c.GetAdvertiseAddress(), } c.mu.RLock() defer c.mu.RUnlock() state := c.currentNodeState() info.LocalNodeState = state.status if state.err != nil { info.Error = state.err.Error() } ctx, cancel := c.getRequestContext() defer cancel() if state.IsActiveManager() { info.ControlAvailable = true swarm, err := c.inspect(ctx, state) if err != nil { info.Error = err.Error() } info.Cluster = &swarm.ClusterInfo if r, err := state.controlClient.ListNodes( ctx, &swarmapi.ListNodesRequest{}, grpc.MaxCallRecvMsgSize(defaultRecvSizeForListResponse), ); err != nil { info.Error = err.Error() } else { info.Nodes = len(r.Nodes) for _, n := range r.Nodes { if n.ManagerStatus != nil { info.Managers = info.Managers + 1 } } } switch info.LocalNodeState { case types.LocalNodeStateInactive, types.LocalNodeStateLocked, types.LocalNodeStateError: // nothing to do default: if info.Managers == 2 { const warn string = `WARNING: Running Swarm in a two-manager configuration. This configuration provides no fault tolerance, and poses a high risk to lose control over the cluster. Refer to https://docs.docker.com/engine/swarm/admin_guide/ to configure the Swarm for fault-tolerance.` info.Warnings = append(info.Warnings, warn) } } } if state.swarmNode != nil { for _, r := range state.swarmNode.Remotes() { info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) } info.NodeID = state.swarmNode.NodeID() } return info } // Status returns a textual representation of the node's swarm status and role (manager/worker) func (c *Cluster) Status() string { c.mu.RLock() s := c.currentNodeState() c.mu.RUnlock() state := string(s.status) if s.status == types.LocalNodeStateActive { if s.IsActiveManager() || s.IsManager() { state += "/manager" } else { state += "/worker" } } return state } func validateAndSanitizeInitRequest(req *types.InitRequest) error { var err error req.ListenAddr, err = validateAddr(req.ListenAddr) if err != nil { return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) } if req.Spec.Annotations.Name == "" { req.Spec.Annotations.Name = "default" } else if req.Spec.Annotations.Name != "default" { return errors.New(`swarm spec must be named "default"`) } return nil } func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { var err error req.ListenAddr, err = validateAddr(req.ListenAddr) if err != nil { return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) } if len(req.RemoteAddrs) == 0 { return errors.New("at least 1 RemoteAddr is required to join") } for i := range req.RemoteAddrs { req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) if err != nil { return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) } } return nil } func validateAddr(addr string) (string, error) { if addr == "" { return addr, errors.New("invalid empty address") } newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) if err != nil { return addr, nil } return strings.TrimPrefix(newaddr, "tcp://"), nil } func initClusterSpec(node *swarmnode.Node, spec types.Spec) error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() for conn := range node.ListenControlSocket(ctx) { if ctx.Err() != nil { return ctx.Err() } if conn != nil { client := swarmapi.NewControlClient(conn) var cluster *swarmapi.Cluster for i := 0; ; i++ { lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) if err != nil { return fmt.Errorf("error on listing clusters: %v", err) } if len(lcr.Clusters) == 0 { if i < 10 { time.Sleep(200 * time.Millisecond) continue } return errors.New("empty list of clusters was returned") } cluster = lcr.Clusters[0] break } // In init, we take the initial default values from swarmkit, and merge // any non nil or 0 value from spec to GRPC spec. This will leave the // default value alone. // Note that this is different from Update(), as in Update() we expect // user to specify the complete spec of the cluster (as they already know // the existing one and knows which field to update) clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec) if err != nil { return fmt.Errorf("error updating cluster settings: %v", err) } _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ ClusterID: cluster.ID, ClusterVersion: &cluster.Meta.Version, Spec: &clusterSpec, }) if err != nil { return fmt.Errorf("error updating cluster settings: %v", err) } return nil } } return ctx.Err() } func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { var ids []string filters := filters.NewArgs() filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ Filters: filters, }) if err != nil { return []string{}, err } for _, c := range containers { ids = append(ids, c.ID) } return ids, nil }
thaJeztah
070726194d3e29f19697ffa954234df38ffa7ec4
aaf70b5c6b1dcce784cfb70bc7be28c552a88da7
Slightly thinking if we should keep manager/worker separate from node state (as other status/role combinations would be possible)
thaJeztah
4,960
moby/moby
42,064
API: add "Swarm" header to _ping endpoint
### depends on https://github.com/moby/moby/pull/42063 This adds an additional "Swarm" header to the _ping endpoint response, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. This change is not versioned in the API, and will be returned irregardless of the API version that is used. Clients should fall back to using other endpoints to get this information if the header is not present. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ```markdown The `GET /_ping` and `HEAD /_ping` API endpoints now return a `Swarm` header, which allows a client to detect if Swarm is enabled on the daemon, without having to call additional endpoints. ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-23 17:39:52+00:00
2022-03-26 13:39:50+00:00
integration/system/ping_test.go
package system // import "github.com/docker/docker/integration/system" import ( "net/http" "strings" "testing" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/testutil/request" "gotest.tools/v3/assert" "gotest.tools/v3/skip" ) func TestPingCacheHeaders(t *testing.T) { skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "skip test from new feature") defer setupTest(t)() res, _, err := request.Get("/_ping") assert.NilError(t, err) assert.Equal(t, res.StatusCode, http.StatusOK) assert.Equal(t, hdr(res, "Cache-Control"), "no-cache, no-store, must-revalidate") assert.Equal(t, hdr(res, "Pragma"), "no-cache") } func TestPingGet(t *testing.T) { defer setupTest(t)() res, body, err := request.Get("/_ping") assert.NilError(t, err) b, err := request.ReadBody(body) assert.NilError(t, err) assert.Equal(t, string(b), "OK") assert.Equal(t, res.StatusCode, http.StatusOK) assert.Check(t, hdr(res, "API-Version") != "") } func TestPingHead(t *testing.T) { skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "skip test from new feature") defer setupTest(t)() res, body, err := request.Head("/_ping") assert.NilError(t, err) b, err := request.ReadBody(body) assert.NilError(t, err) assert.Equal(t, 0, len(b)) assert.Equal(t, res.StatusCode, http.StatusOK) assert.Check(t, hdr(res, "API-Version") != "") } func hdr(res *http.Response, name string) string { val, ok := res.Header[http.CanonicalHeaderKey(name)] if !ok || len(val) == 0 { return "" } return strings.Join(val, ", ") }
package system // import "github.com/docker/docker/integration/system" import ( "context" "net/http" "strings" "testing" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/testutil/daemon" "github.com/docker/docker/testutil/request" "gotest.tools/v3/assert" "gotest.tools/v3/skip" ) func TestPingCacheHeaders(t *testing.T) { skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "skip test from new feature") defer setupTest(t)() res, _, err := request.Get("/_ping") assert.NilError(t, err) assert.Equal(t, res.StatusCode, http.StatusOK) assert.Equal(t, hdr(res, "Cache-Control"), "no-cache, no-store, must-revalidate") assert.Equal(t, hdr(res, "Pragma"), "no-cache") } func TestPingGet(t *testing.T) { defer setupTest(t)() res, body, err := request.Get("/_ping") assert.NilError(t, err) b, err := request.ReadBody(body) assert.NilError(t, err) assert.Equal(t, string(b), "OK") assert.Equal(t, res.StatusCode, http.StatusOK) assert.Check(t, hdr(res, "API-Version") != "") } func TestPingHead(t *testing.T) { skip.If(t, versions.LessThan(testEnv.DaemonAPIVersion(), "1.40"), "skip test from new feature") defer setupTest(t)() res, body, err := request.Head("/_ping") assert.NilError(t, err) b, err := request.ReadBody(body) assert.NilError(t, err) assert.Equal(t, 0, len(b)) assert.Equal(t, res.StatusCode, http.StatusOK) assert.Check(t, hdr(res, "API-Version") != "") } func TestPingSwarmHeader(t *testing.T) { skip.If(t, testEnv.IsRemoteDaemon) skip.If(t, testEnv.DaemonInfo.OSType == "windows") defer setupTest(t)() d := daemon.New(t) d.Start(t) defer d.Stop(t) client := d.NewClientT(t) defer client.Close() ctx := context.TODO() t.Run("before swarm init", func(t *testing.T) { res, _, err := request.Get("/_ping") assert.NilError(t, err) assert.Equal(t, res.StatusCode, http.StatusOK) assert.Equal(t, hdr(res, "Swarm"), "inactive") }) _, err := client.SwarmInit(ctx, swarm.InitRequest{ListenAddr: "127.0.0.1", AdvertiseAddr: "127.0.0.1:2377"}) assert.NilError(t, err) t.Run("after swarm init", func(t *testing.T) { res, _, err := request.Get("/_ping", request.Host(d.Sock())) assert.NilError(t, err) assert.Equal(t, res.StatusCode, http.StatusOK) assert.Equal(t, hdr(res, "Swarm"), "active/manager") }) err = client.SwarmLeave(ctx, true) assert.NilError(t, err) t.Run("after swarm leave", func(t *testing.T) { res, _, err := request.Get("/_ping", request.Host(d.Sock())) assert.NilError(t, err) assert.Equal(t, res.StatusCode, http.StatusOK) assert.Equal(t, hdr(res, "Swarm"), "inactive") }) } func hdr(res *http.Response, name string) string { val, ok := res.Header[http.CanonicalHeaderKey(name)] if !ok || len(val) == 0 { return "" } return strings.Join(val, ", ") }
thaJeztah
070726194d3e29f19697ffa954234df38ffa7ec4
aaf70b5c6b1dcce784cfb70bc7be28c552a88da7
This should've been: ```suggestion res, _, err := request.Get("/_ping", request.Host(d.Sock())) ``` Fixing in https://github.com/moby/moby/pull/43658
thaJeztah
4,961
moby/moby
42,054
Use buster backports to build with libseccomp-2.4.4
Fixes #41704 The latest released versions of the static binaries (20.10.3) are still unable to use faccessat2 with musl-1.2.2 even though this was addressed in #41353 and related issues. The underlying cause seems to be that the build system here still uses the default version of libseccomp shipped with buster. An updated version is available in buster backports: https://packages.debian.org/buster-backports/libseccomp-dev Signed-off-by: Jeremy Huntwork <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Built dockerd, containerd and runc locally and then compared the results with previous ones. **- How I did it** My original running system used the 20.10.3 published static binaries: ```sh $ docker version Client: Docker Engine - Community Version: 20.10.3 API version: 1.41 Go version: go1.13.15 Git commit: 48d30b5 Built: Fri Jan 29 14:28:23 2021 OS/Arch: linux/amd64 Context: default Experimental: true Server: Docker Engine - Community Engine: Version: 20.10.3 API version: 1.41 (minimum version 1.12) Go version: go1.13.15 Git commit: 46229ca Built: Fri Jan 29 14:31:57 2021 OS/Arch: linux/amd64 Experimental: false containerd: Version: v1.4.3 GitCommit: 269548fa27e0089a8b8278fc4fc781d7f65a939b runc: Version: 1.0.0-rc92 GitCommit: ff819c7e9184c13b7c2607fe6c30ae19403a7aff docker-init: Version: 0.19.0 GitCommit: de40ad0 $ runc --version runc version 1.0.0-rc92 commit: ff819c7e9184c13b7c2607fe6c30ae19403a7aff spec: 1.0.2-dev ``` Prepare a test file that will use the faccessat2 syscall: ```c #include <fcntl.h> #include <unistd.h> #include <stdio.h> int main(void) { int r; r = faccessat(AT_FDCWD, "/etc/resolv.conf", O_RDONLY, AT_EACCESS); printf("Returned %d\n", r); return r; } ``` Run a container using musl-1.2.2, build it and execute it: ```sh $ docker run -it -v "$(pwd)":/src mere/dev # pacman -Q musl musl 1.2.2-1 # cc test.c -o test -static # file test test: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), statically linked, not stripped # ./test Returned -1 ``` Running with strace would also give something like this: ``` faccessat2(AT_FDCWD, "/etc/resolv.conf", F_OK, AT_EACCESS) = -1 EPERM (Operation not permitted) ``` **- How to verify it** Build locally with this change, I simply ran 'make' and 'make install' Afterwards, the docker and runc version reported this: ```sh $ docker version Client: Version: 20.10.0-dev API version: 1.41 Go version: go1.13.15 Git commit: 70a00157f1 Built: Mon Feb 22 04:15:30 2021 OS/Arch: linux/amd64 Context: default Experimental: true Server: Engine: Version: dev API version: 1.41 (minimum version 1.12) Go version: go1.13.15 Git commit: bc6f4cc703 Built: Mon Feb 22 03:36:24 2021 OS/Arch: linux/amd64 Experimental: false containerd: Version: v1.4.3 GitCommit: 269548fa27e0089a8b8278fc4fc781d7f65a939b runc: Version: 1.0.0-rc93 GitCommit: 12644e614e25b05da6fd08a38ffa0cfe1903fdec docker-init: Version: 0.19.0 GitCommit: de40ad0 $ runc --version runc version 1.0.0-rc93 commit: 12644e614e25b05da6fd08a38ffa0cfe1903fdec spec: 1.0.2-dev go: go1.13.15 libseccomp: 2.4.4 ``` And testing the statically compiled C file running in the same container now works: ```sh $ docker run -it -v "$(pwd)":/src mere/dev # ./test Returned 0 ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Build static binaries with libseccomp-2.4.4 **- A picture of a cute animal (not mandatory but encouraged)** ![image](https://user-images.githubusercontent.com/239626/108665434-e62a7c00-74a2-11eb-821a-e31058f85891.png)
null
2021-02-22 05:16:11+00:00
2021-03-14 20:54:43+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev/buster-backports \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64/buster-backports \ libseccomp-dev:armel/buster-backports \ libseccomp-dev:armhf/buster-backports FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
jhuntwork
4f366540575c6ffcc8168b642136de522852e4d1
4735a0c84f89d6be53d92cf2ccbd086d682edc42
Minor nit; I think we have a space after `>` in most places; perhaps add one here as well for consistency ```suggestion RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list ```
thaJeztah
4,962
moby/moby
42,054
Use buster backports to build with libseccomp-2.4.4
Fixes #41704 The latest released versions of the static binaries (20.10.3) are still unable to use faccessat2 with musl-1.2.2 even though this was addressed in #41353 and related issues. The underlying cause seems to be that the build system here still uses the default version of libseccomp shipped with buster. An updated version is available in buster backports: https://packages.debian.org/buster-backports/libseccomp-dev Signed-off-by: Jeremy Huntwork <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Built dockerd, containerd and runc locally and then compared the results with previous ones. **- How I did it** My original running system used the 20.10.3 published static binaries: ```sh $ docker version Client: Docker Engine - Community Version: 20.10.3 API version: 1.41 Go version: go1.13.15 Git commit: 48d30b5 Built: Fri Jan 29 14:28:23 2021 OS/Arch: linux/amd64 Context: default Experimental: true Server: Docker Engine - Community Engine: Version: 20.10.3 API version: 1.41 (minimum version 1.12) Go version: go1.13.15 Git commit: 46229ca Built: Fri Jan 29 14:31:57 2021 OS/Arch: linux/amd64 Experimental: false containerd: Version: v1.4.3 GitCommit: 269548fa27e0089a8b8278fc4fc781d7f65a939b runc: Version: 1.0.0-rc92 GitCommit: ff819c7e9184c13b7c2607fe6c30ae19403a7aff docker-init: Version: 0.19.0 GitCommit: de40ad0 $ runc --version runc version 1.0.0-rc92 commit: ff819c7e9184c13b7c2607fe6c30ae19403a7aff spec: 1.0.2-dev ``` Prepare a test file that will use the faccessat2 syscall: ```c #include <fcntl.h> #include <unistd.h> #include <stdio.h> int main(void) { int r; r = faccessat(AT_FDCWD, "/etc/resolv.conf", O_RDONLY, AT_EACCESS); printf("Returned %d\n", r); return r; } ``` Run a container using musl-1.2.2, build it and execute it: ```sh $ docker run -it -v "$(pwd)":/src mere/dev # pacman -Q musl musl 1.2.2-1 # cc test.c -o test -static # file test test: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), statically linked, not stripped # ./test Returned -1 ``` Running with strace would also give something like this: ``` faccessat2(AT_FDCWD, "/etc/resolv.conf", F_OK, AT_EACCESS) = -1 EPERM (Operation not permitted) ``` **- How to verify it** Build locally with this change, I simply ran 'make' and 'make install' Afterwards, the docker and runc version reported this: ```sh $ docker version Client: Version: 20.10.0-dev API version: 1.41 Go version: go1.13.15 Git commit: 70a00157f1 Built: Mon Feb 22 04:15:30 2021 OS/Arch: linux/amd64 Context: default Experimental: true Server: Engine: Version: dev API version: 1.41 (minimum version 1.12) Go version: go1.13.15 Git commit: bc6f4cc703 Built: Mon Feb 22 03:36:24 2021 OS/Arch: linux/amd64 Experimental: false containerd: Version: v1.4.3 GitCommit: 269548fa27e0089a8b8278fc4fc781d7f65a939b runc: Version: 1.0.0-rc93 GitCommit: 12644e614e25b05da6fd08a38ffa0cfe1903fdec docker-init: Version: 0.19.0 GitCommit: de40ad0 $ runc --version runc version 1.0.0-rc93 commit: 12644e614e25b05da6fd08a38ffa0cfe1903fdec spec: 1.0.2-dev go: go1.13.15 libseccomp: 2.4.4 ``` And testing the statically compiled C file running in the same container now works: ```sh $ docker run -it -v "$(pwd)":/src mere/dev # ./test Returned 0 ``` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Build static binaries with libseccomp-2.4.4 **- A picture of a cute animal (not mandatory but encouraged)** ![image](https://user-images.githubusercontent.com/239626/108665434-e62a7c00-74a2-11eb-821a-e31058f85891.png)
null
2021-02-22 05:16:11+00:00
2021-03-14 20:54:43+00:00
Dockerfile
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64 \ libseccomp-dev:armel \ libseccomp-dev:armhf FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
# syntax=docker/dockerfile:1.2 ARG CROSS="false" ARG SYSTEMD="false" # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored ARG GO_VERSION=1.13.15 ARG DEBIAN_FRONTEND=noninteractive ARG VPNKIT_VERSION=0.5.0 ARG DOCKER_BUILDTAGS="apparmor seccomp" ARG BASE_DEBIAN_DISTRO="buster" ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}" FROM ${GOLANG_IMAGE} AS base RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache ARG APT_MIRROR RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \ && sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list ENV GO111MODULE=off FROM base AS criu ARG DEBIAN_FRONTEND # Install dependency packages specific to criu RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libcap-dev \ libnet-dev \ libnl-3-dev \ libprotobuf-c-dev \ libprotobuf-dev \ protobuf-c-compiler \ protobuf-compiler \ python-protobuf # Install CRIU for checkpoint/restore support ARG CRIU_VERSION=3.14 RUN mkdir -p /usr/src/criu \ && curl -sSL https://github.com/checkpoint-restore/criu/archive/v${CRIU_VERSION}.tar.gz | tar -C /usr/src/criu/ -xz --strip-components=1 \ && cd /usr/src/criu \ && make \ && make PREFIX=/build/ install-criu FROM base AS registry WORKDIR /go/src/github.com/docker/distribution # Install two versions of the registry. The first one is a recent version that # supports both schema 1 and 2 manifests. The second one is an older version that # only supports schema1 manifests. This allows integration-cli tests to cover # push/pull with both schema1 and schema2 manifests. # The old version of the registry is not working on arm64, so installation is # skipped on that architecture. ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/docker/distribution.git . \ && git checkout -q "$REGISTRY_COMMIT" \ && GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \ && case $(dpkg --print-architecture) in \ amd64|armhf|ppc64*|s390x) \ git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \ GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \ go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \ ;; \ esac FROM base AS swagger WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger # Install go-swagger for validating swagger.yaml # This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix # TODO: move to under moby/ or fix upstream go-swagger to work for us. ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050 RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=tmpfs,target=/go/src/ \ set -x \ && git clone https://github.com/kolyshkin/go-swagger.git . \ && git checkout -q "$GO_SWAGGER_COMMIT" \ && go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ ca-certificates \ curl \ jq # Get useful and necessary Hub images so we can "docker load" locally instead of pulling COPY contrib/download-frozen-image-v2.sh / ARG TARGETARCH RUN /download-frozen-image-v2.sh /build \ buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \ busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \ busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \ debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \ hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \ arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1 # See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list) FROM base AS cross-false FROM --platform=linux/amd64 base AS cross-true ARG DEBIAN_FRONTEND RUN dpkg --add-architecture arm64 RUN dpkg --add-architecture armel RUN dpkg --add-architecture armhf RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ crossbuild-essential-arm64 \ crossbuild-essential-armel \ crossbuild-essential-armhf FROM cross-${CROSS} as dev-base FROM dev-base AS runtime-dev-cross-false ARG DEBIAN_FRONTEND RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ binutils-mingw-w64 \ g++-mingw-w64-x86-64 \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev/buster-backports \ libsystemd-dev \ libudev-dev FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true ARG DEBIAN_FRONTEND # These crossbuild packages rely on gcc-<arch>, but this doesn't want to install # on non-amd64 systems. # Additionally, the crossbuild-amd64 is currently only on debian:buster, so # other architectures cannnot crossbuild amd64. RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libapparmor-dev:arm64 \ libapparmor-dev:armel \ libapparmor-dev:armhf \ libseccomp-dev:arm64/buster-backports \ libseccomp-dev:armel/buster-backports \ libseccomp-dev:armhf/buster-backports FROM runtime-dev-cross-${CROSS} AS runtime-dev FROM base AS tomlv ARG TOMLV_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tomlv FROM base AS vndr ARG VNDR_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh vndr FROM dev-base AS containerd ARG DEBIAN_FRONTEND RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ libbtrfs-dev ARG CONTAINERD_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh containerd FROM dev-base AS proxy ARG LIBNETWORK_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh proxy FROM base AS golangci_lint ARG GOLANGCI_LINT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh golangci_lint FROM base AS gotestsum ARG GOTESTSUM_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh gotestsum FROM base AS shfmt ARG SHFMT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh shfmt FROM dev-base AS dockercli ARG DOCKERCLI_CHANNEL ARG DOCKERCLI_VERSION RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh dockercli FROM runtime-dev AS runc ARG RUNC_COMMIT ARG RUNC_BUILDTAGS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh runc FROM dev-base AS tini ARG DEBIAN_FRONTEND ARG TINI_COMMIT RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ cmake \ vim-common RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh tini FROM dev-base AS rootlesskit ARG ROOTLESSKIT_COMMIT RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=cache,target=/go/pkg/mod \ --mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \ PREFIX=/build /tmp/install/install.sh rootlesskit COPY ./contrib/dockerd-rootless.sh /build COPY ./contrib/dockerd-rootless-setuptool.sh /build FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64 FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64 FROM scratch AS vpnkit COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64 COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64 # TODO: Some of this is only really needed for testing, it would be nice to split this up FROM runtime-dev AS dev-systemd-false ARG DEBIAN_FRONTEND RUN groupadd -r docker RUN useradd --create-home --gid docker unprivilegeduser \ && mkdir -p /home/unprivilegeduser/.local/share/docker \ && chown -R unprivilegeduser /home/unprivilegeduser # Let us use a .bashrc file RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc # Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker RUN ldconfig # This should only install packages that are specifically needed for the dev environment and nothing else # Do you really need to add another package here? Can it be done in a different build stage? RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ apparmor \ aufs-tools \ bash-completion \ bzip2 \ iptables \ jq \ libcap2-bin \ libnet1 \ libnl-3-200 \ libprotobuf-c1 \ net-tools \ pigz \ python3-pip \ python3-setuptools \ python3-wheel \ sudo \ thin-provisioning-tools \ uidmap \ vim \ vim-common \ xfsprogs \ xz-utils \ zip # Switch to use iptables instead of nftables (to match the CI hosts) # TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824) RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \ && update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \ && update-alternatives --set arptables /usr/sbin/arptables-legacy || true RUN pip3 install yamllint==1.16.0 COPY --from=dockercli /build/ /usr/local/cli COPY --from=frozen-images /build/ /docker-frozen-images COPY --from=swagger /build/ /usr/local/bin/ COPY --from=tomlv /build/ /usr/local/bin/ COPY --from=tini /build/ /usr/local/bin/ COPY --from=registry /build/ /usr/local/bin/ COPY --from=criu /build/ /usr/local/ COPY --from=vndr /build/ /usr/local/bin/ COPY --from=gotestsum /build/ /usr/local/bin/ COPY --from=golangci_lint /build/ /usr/local/bin/ COPY --from=shfmt /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ ENV PATH=/usr/local/cli:$PATH ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" WORKDIR /go/src/github.com/docker/docker VOLUME /var/lib/docker VOLUME /home/unprivilegeduser/.local/share/docker # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] FROM dev-systemd-false AS dev-systemd-true RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \ --mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \ apt-get update && apt-get install -y --no-install-recommends \ dbus \ dbus-user-session \ systemd \ systemd-sysv RUN mkdir -p hack \ && curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \ && chmod +x hack/dind-systemd ENTRYPOINT ["hack/dind-systemd"] FROM dev-systemd-${SYSTEMD} AS dev FROM runtime-dev AS binary-base ARG DOCKER_GITCOMMIT=HEAD ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT} ARG VERSION ENV VERSION=${VERSION} ARG PLATFORM ENV PLATFORM=${PLATFORM} ARG PRODUCT ENV PRODUCT=${PRODUCT} ARG DEFAULT_PRODUCT_LICENSE ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE} ARG DOCKER_BUILDTAGS ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}" ENV PREFIX=/build # TODO: This is here because hack/make.sh binary copies these extras binaries # from $PATH into the bundles dir. # It would be nice to handle this in a different way. COPY --from=tini /build/ /usr/local/bin/ COPY --from=runc /build/ /usr/local/bin/ COPY --from=containerd /build/ /usr/local/bin/ COPY --from=rootlesskit /build/ /usr/local/bin/ COPY --from=proxy /build/ /usr/local/bin/ COPY --from=vpnkit /build/ /usr/local/bin/ WORKDIR /go/src/github.com/docker/docker FROM binary-base AS build-binary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh binary FROM binary-base AS build-dynbinary RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ hack/make.sh dynbinary FROM binary-base AS build-cross ARG DOCKER_CROSSPLATFORMS RUN --mount=type=cache,target=/root/.cache/go-build \ --mount=type=bind,target=/go/src/github.com/docker/docker \ --mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \ hack/make.sh cross FROM scratch AS binary COPY --from=build-binary /build/bundles/ / FROM scratch AS dynbinary COPY --from=build-dynbinary /build/bundles/ / FROM scratch AS cross COPY --from=build-cross /build/bundles/ / FROM dev AS final COPY . /go/src/github.com/docker/docker
jhuntwork
4f366540575c6ffcc8168b642136de522852e4d1
4735a0c84f89d6be53d92cf2ccbd086d682edc42
Same here ```suggestion RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list ```
thaJeztah
4,963
moby/moby
42,050
CI: update tests to be more resilient against CLI output format and for libnetwork changes
relates to / in preparation of https://github.com/moby/moby/pull/41908 ("vendor: docker/libnetwork b3507428be5b458cb0e2b4086b13531fb0706e46") ### integration/container: wrap some long lines for readability Just some minor code-style changes for readability ### integration: update getExternalAddress to prefer IPv4 Rootlesskit doesn't currently handle IPv6 addresses, causing TestNetworkLoopbackNat and TestNetworkNat to fail; Error starting userland proxy: error while calling PortManager.AddPort(): listen tcp: address :::8080: too many colons in address This patch: - Updates `getExternalAddress()` to pick IPv4 address if both IPv6 and IPv4 are found - Update TestNetworkNat to net.JoinHostPort(), so that square brackets are used for IPv6 addresses (e.g. `[::]:8080`) ### integration-cli: rely less on "docker port" output format Also re-formatting some lines for readability.
null
2021-02-19 10:45:05+00:00
2021-03-24 20:19:26+00:00
integration-cli/docker_cli_port_test.go
package main import ( "fmt" "net" "regexp" "sort" "strconv" "strings" "testing" "gotest.tools/v3/assert" ) func (s *DockerSuite) TestPortList(c *testing.T) { testRequires(c, DaemonIsLinux) // one port out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "top") firstID := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", firstID, "80") err := assertPortList(c, out, []string{"0.0.0.0:9876"}) // Port list is not correct assert.NilError(c, err) out, _ = dockerCmd(c, "port", firstID) err = assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876"}) // Port list is not correct assert.NilError(c, err) dockerCmd(c, "rm", "-f", firstID) // three port out, _ = dockerCmd(c, "run", "-d", "-p", "9876:80", "-p", "9877:81", "-p", "9878:82", "busybox", "top") ID := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", ID, "80") err = assertPortList(c, out, []string{"0.0.0.0:9876"}) // Port list is not correct assert.NilError(c, err) out, _ = dockerCmd(c, "port", ID) err = assertPortList(c, out, []string{ "80/tcp -> 0.0.0.0:9876", "81/tcp -> 0.0.0.0:9877", "82/tcp -> 0.0.0.0:9878"}) // Port list is not correct assert.NilError(c, err) dockerCmd(c, "rm", "-f", ID) // more and one port mapped to the same container port out, _ = dockerCmd(c, "run", "-d", "-p", "9876:80", "-p", "9999:80", "-p", "9877:81", "-p", "9878:82", "busybox", "top") ID = strings.TrimSpace(out) out, _ = dockerCmd(c, "port", ID, "80") err = assertPortList(c, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) // Port list is not correct assert.NilError(c, err) out, _ = dockerCmd(c, "port", ID) err = assertPortList(c, out, []string{ "80/tcp -> 0.0.0.0:9876", "80/tcp -> 0.0.0.0:9999", "81/tcp -> 0.0.0.0:9877", "82/tcp -> 0.0.0.0:9878"}) // Port list is not correct assert.NilError(c, err) dockerCmd(c, "rm", "-f", ID) testRange := func() { // host port ranges used IDs := make([]string, 3) for i := 0; i < 3; i++ { out, _ = dockerCmd(c, "run", "-d", "-p", "9090-9092:80", "busybox", "top") IDs[i] = strings.TrimSpace(out) out, _ = dockerCmd(c, "port", IDs[i]) err = assertPortList(c, out, []string{fmt.Sprintf("80/tcp -> 0.0.0.0:%d", 9090+i)}) // Port list is not correct assert.NilError(c, err) } // test port range exhaustion out, _, err = dockerCmdWithError("run", "-d", "-p", "9090-9092:80", "busybox", "top") // Exhausted port range did not return an error assert.Assert(c, err != nil, "out: %s", out) for i := 0; i < 3; i++ { dockerCmd(c, "rm", "-f", IDs[i]) } } testRange() // Verify we ran re-use port ranges after they are no longer in use. testRange() // test invalid port ranges for _, invalidRange := range []string{"9090-9089:80", "9090-:80", "-9090:80"} { out, _, err = dockerCmdWithError("run", "-d", "-p", invalidRange, "busybox", "top") // Port range should have returned an error assert.Assert(c, err != nil, "out: %s", out) } // test host range:container range spec. out, _ = dockerCmd(c, "run", "-d", "-p", "9800-9803:80-83", "busybox", "top") ID = strings.TrimSpace(out) out, _ = dockerCmd(c, "port", ID) err = assertPortList(c, out, []string{ "80/tcp -> 0.0.0.0:9800", "81/tcp -> 0.0.0.0:9801", "82/tcp -> 0.0.0.0:9802", "83/tcp -> 0.0.0.0:9803"}) // Port list is not correct assert.NilError(c, err) dockerCmd(c, "rm", "-f", ID) // test mixing protocols in same port range out, _ = dockerCmd(c, "run", "-d", "-p", "8000-8080:80", "-p", "8000-8080:80/udp", "busybox", "top") ID = strings.TrimSpace(out) out, _ = dockerCmd(c, "port", ID) // Running this test multiple times causes the TCP port to increment. err = assertPortRange(c, out, []int{8000, 8080}, []int{8000, 8080}) // Port list is not correct assert.NilError(c, err) dockerCmd(c, "rm", "-f", ID) } func assertPortList(c *testing.T, out string, expected []string) error { lines := strings.Split(strings.Trim(out, "\n "), "\n") if len(lines) != len(expected) { return fmt.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) } sort.Strings(lines) sort.Strings(expected) for i := 0; i < len(expected); i++ { if lines[i] != expected[i] { return fmt.Errorf("|" + lines[i] + "!=" + expected[i] + "|") } } return nil } func assertPortRange(c *testing.T, out string, expectedTCP, expectedUDP []int) error { lines := strings.Split(strings.Trim(out, "\n "), "\n") var validTCP, validUDP bool for _, l := range lines { // 80/tcp -> 0.0.0.0:8015 port, err := strconv.Atoi(strings.Split(l, ":")[1]) if err != nil { return err } if strings.Contains(l, "tcp") && expectedTCP != nil { if port < expectedTCP[0] || port > expectedTCP[1] { return fmt.Errorf("tcp port (%d) not in range expected range %d-%d", port, expectedTCP[0], expectedTCP[1]) } validTCP = true } if strings.Contains(l, "udp") && expectedUDP != nil { if port < expectedUDP[0] || port > expectedUDP[1] { return fmt.Errorf("udp port (%d) not in range expected range %d-%d", port, expectedUDP[0], expectedUDP[1]) } validUDP = true } } if !validTCP { return fmt.Errorf("tcp port not found") } if !validUDP { return fmt.Errorf("udp port not found") } return nil } func stopRemoveContainer(id string, c *testing.T) { dockerCmd(c, "rm", "-f", id) } func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *testing.T) { testRequires(c, DaemonIsLinux) // Run busybox with command line expose (equivalent to EXPOSE in image's Dockerfile) for the following ports port1 := 80 port2 := 443 expose1 := fmt.Sprintf("--expose=%d", port1) expose2 := fmt.Sprintf("--expose=%d", port2) dockerCmd(c, "run", "-d", expose1, expose2, "busybox", "sleep", "5") // Check docker ps o/p for last created container reports the unpublished ports unpPort1 := fmt.Sprintf("%d/tcp", port1) unpPort2 := fmt.Sprintf("%d/tcp", port2) out, _ := dockerCmd(c, "ps", "-n=1") // Missing unpublished ports in docker ps output assert.Assert(c, strings.Contains(out, unpPort1)) // Missing unpublished ports in docker ps output assert.Assert(c, strings.Contains(out, unpPort2)) // Run the container forcing to publish the exposed ports dockerCmd(c, "run", "-d", "-P", expose1, expose2, "busybox", "sleep", "5") // Check docker ps o/p for last created container reports the exposed ports in the port bindings expBndRegx1 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort1) expBndRegx2 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort2) out, _ = dockerCmd(c, "ps", "-n=1") // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort1) in docker ps output assert.Equal(c, expBndRegx1.MatchString(out), true, fmt.Sprintf("out: %s; unpPort1: %s", out, unpPort1)) // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort2) in docker ps output assert.Equal(c, expBndRegx2.MatchString(out), true, fmt.Sprintf("out: %s; unpPort2: %s", out, unpPort2)) // Run the container specifying explicit port bindings for the exposed ports offset := 10000 pFlag1 := fmt.Sprintf("%d:%d", offset+port1, port1) pFlag2 := fmt.Sprintf("%d:%d", offset+port2, port2) out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, expose1, expose2, "busybox", "sleep", "5") id := strings.TrimSpace(out) // Check docker ps o/p for last created container reports the specified port mappings expBnd1 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port1, unpPort1) expBnd2 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port2, unpPort2) out, _ = dockerCmd(c, "ps", "-n=1") // Cannot find expected port binding (expBnd1) in docker ps output assert.Assert(c, strings.Contains(out, expBnd1)) // Cannot find expected port binding (expBnd2) in docker ps output assert.Assert(c, strings.Contains(out, expBnd2)) // Remove container now otherwise it will interfere with next test stopRemoveContainer(id, c) // Run the container with explicit port bindings and no exposed ports out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, "busybox", "sleep", "5") id = strings.TrimSpace(out) // Check docker ps o/p for last created container reports the specified port mappings out, _ = dockerCmd(c, "ps", "-n=1") // Cannot find expected port binding (expBnd1) in docker ps output assert.Assert(c, strings.Contains(out, expBnd1)) // Cannot find expected port binding (expBnd2) in docker ps output assert.Assert(c, strings.Contains(out, expBnd2)) // Remove container now otherwise it will interfere with next test stopRemoveContainer(id, c) // Run the container with one unpublished exposed port and one explicit port binding dockerCmd(c, "run", "-d", expose1, "-p", pFlag2, "busybox", "sleep", "5") // Check docker ps o/p for last created container reports the specified unpublished port and port mapping out, _ = dockerCmd(c, "ps", "-n=1") // Missing unpublished exposed ports (unpPort1) in docker ps output assert.Assert(c, strings.Contains(out, unpPort1)) // Missing port binding (expBnd2) in docker ps output assert.Assert(c, strings.Contains(out, expBnd2)) } func (s *DockerSuite) TestPortHostBinding(c *testing.T) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "nc", "-l", "-p", "80") firstID := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", firstID, "80") err := assertPortList(c, out, []string{"0.0.0.0:9876"}) // Port list is not correct assert.NilError(c, err) dockerCmd(c, "run", "--net=host", "busybox", "nc", "localhost", "9876") dockerCmd(c, "rm", "-f", firstID) out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876") // Port is still bound after the Container is removed assert.Assert(c, err != nil, "out: %s", out) } func (s *DockerSuite) TestPortExposeHostBinding(c *testing.T) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "-d", "-P", "--expose", "80", "busybox", "nc", "-l", "-p", "80") firstID := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", firstID, "80") _, exposedPort, err := net.SplitHostPort(out) assert.Assert(c, err == nil, "out: %s", out) dockerCmd(c, "run", "--net=host", "busybox", "nc", "localhost", strings.TrimSpace(exposedPort)) dockerCmd(c, "rm", "-f", firstID) out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", strings.TrimSpace(exposedPort)) // Port is still bound after the Container is removed assert.Assert(c, err != nil, "out: %s", out) } func (s *DockerSuite) TestPortBindingOnSandbox(c *testing.T) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "--internal", "-d", "bridge", "internal-net") nr := getNetworkResource(c, "internal-net") assert.Equal(c, nr.Internal, true) dockerCmd(c, "run", "--net", "internal-net", "-d", "--name", "c1", "-p", "8080:8080", "busybox", "nc", "-l", "-p", "8080") assert.Assert(c, waitRun("c1") == nil) _, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") assert.Assert(c, err != nil, "Port mapping on internal network is expected to fail") // Connect container to another normal bridge network dockerCmd(c, "network", "create", "-d", "bridge", "foo-net") dockerCmd(c, "network", "connect", "foo-net", "c1") _, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") assert.Assert(c, err == nil, "Port mapping on the new network is expected to succeed") }
package main import ( "context" "fmt" "regexp" "sort" "strconv" "strings" "testing" "gotest.tools/v3/assert" ) func (s *DockerSuite) TestPortList(c *testing.T) { testRequires(c, DaemonIsLinux) // one port out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "top") firstID := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", firstID, "80") err := assertPortList(c, out, []string{"0.0.0.0:9876"}) // Port list is not correct assert.NilError(c, err) out, _ = dockerCmd(c, "port", firstID) err = assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876"}) // Port list is not correct assert.NilError(c, err) dockerCmd(c, "rm", "-f", firstID) // three port out, _ = dockerCmd(c, "run", "-d", "-p", "9876:80", "-p", "9877:81", "-p", "9878:82", "busybox", "top") ID := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", ID, "80") err = assertPortList(c, out, []string{"0.0.0.0:9876"}) // Port list is not correct assert.NilError(c, err) out, _ = dockerCmd(c, "port", ID) err = assertPortList(c, out, []string{ "80/tcp -> 0.0.0.0:9876", "81/tcp -> 0.0.0.0:9877", "82/tcp -> 0.0.0.0:9878", }) // Port list is not correct assert.NilError(c, err) dockerCmd(c, "rm", "-f", ID) // more and one port mapped to the same container port out, _ = dockerCmd(c, "run", "-d", "-p", "9876:80", "-p", "9999:80", "-p", "9877:81", "-p", "9878:82", "busybox", "top") ID = strings.TrimSpace(out) out, _ = dockerCmd(c, "port", ID, "80") err = assertPortList(c, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) // Port list is not correct assert.NilError(c, err) out, _ = dockerCmd(c, "port", ID) err = assertPortList(c, out, []string{ "80/tcp -> 0.0.0.0:9876", "80/tcp -> 0.0.0.0:9999", "81/tcp -> 0.0.0.0:9877", "82/tcp -> 0.0.0.0:9878", }) // Port list is not correct assert.NilError(c, err) dockerCmd(c, "rm", "-f", ID) testRange := func() { // host port ranges used IDs := make([]string, 3) for i := 0; i < 3; i++ { out, _ = dockerCmd(c, "run", "-d", "-p", "9090-9092:80", "busybox", "top") IDs[i] = strings.TrimSpace(out) out, _ = dockerCmd(c, "port", IDs[i]) err = assertPortList(c, out, []string{fmt.Sprintf("80/tcp -> 0.0.0.0:%d", 9090+i)}) // Port list is not correct assert.NilError(c, err) } // test port range exhaustion out, _, err = dockerCmdWithError("run", "-d", "-p", "9090-9092:80", "busybox", "top") // Exhausted port range did not return an error assert.Assert(c, err != nil, "out: %s", out) for i := 0; i < 3; i++ { dockerCmd(c, "rm", "-f", IDs[i]) } } testRange() // Verify we ran re-use port ranges after they are no longer in use. testRange() // test invalid port ranges for _, invalidRange := range []string{"9090-9089:80", "9090-:80", "-9090:80"} { out, _, err = dockerCmdWithError("run", "-d", "-p", invalidRange, "busybox", "top") // Port range should have returned an error assert.Assert(c, err != nil, "out: %s", out) } // test host range:container range spec. out, _ = dockerCmd(c, "run", "-d", "-p", "9800-9803:80-83", "busybox", "top") ID = strings.TrimSpace(out) out, _ = dockerCmd(c, "port", ID) err = assertPortList(c, out, []string{ "80/tcp -> 0.0.0.0:9800", "81/tcp -> 0.0.0.0:9801", "82/tcp -> 0.0.0.0:9802", "83/tcp -> 0.0.0.0:9803", }) // Port list is not correct assert.NilError(c, err) dockerCmd(c, "rm", "-f", ID) // test mixing protocols in same port range out, _ = dockerCmd(c, "run", "-d", "-p", "8000-8080:80", "-p", "8000-8080:80/udp", "busybox", "top") ID = strings.TrimSpace(out) out, _ = dockerCmd(c, "port", ID) // Running this test multiple times causes the TCP port to increment. err = assertPortRange(ID, []int{8000, 8080}, []int{8000, 8080}) // Port list is not correct assert.NilError(c, err) dockerCmd(c, "rm", "-f", ID) } func assertPortList(c *testing.T, out string, expected []string) error { c.Helper() lines := strings.Split(strings.Trim(out, "\n "), "\n") if len(lines) != len(expected) { return fmt.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) } sort.Strings(lines) sort.Strings(expected) // "docker port" does not yet have a "--format" flag, and older versions // of the CLI used an incorrect output format for mappings on IPv6 addresses // for example, "80/tcp -> :::80" instead of "80/tcp -> [::]:80". oldFormat := func(mapping string) string { old := strings.Replace(mapping, "-> [", "-> ", 1) old = strings.Replace(old, "]:", ":", 1) return old } for i := 0; i < len(expected); i++ { if lines[i] == expected[i] { continue } if lines[i] != oldFormat(expected[i]) { return fmt.Errorf("|" + lines[i] + "!=" + expected[i] + "|") } } return nil } func assertPortRange(id string, expectedTCP, expectedUDP []int) error { client := testEnv.APIClient() inspect, err := client.ContainerInspect(context.TODO(), id) if err != nil { return err } var validTCP, validUDP bool for portAndProto, binding := range inspect.NetworkSettings.Ports { if portAndProto.Proto() == "tcp" && len(expectedTCP) == 0 { continue } if portAndProto.Proto() == "udp" && len(expectedTCP) == 0 { continue } for _, b := range binding { port, err := strconv.Atoi(b.HostPort) if err != nil { return err } if len(expectedTCP) > 0 { if port < expectedTCP[0] || port > expectedTCP[1] { return fmt.Errorf("tcp port (%d) not in range expected range %d-%d", port, expectedTCP[0], expectedTCP[1]) } validTCP = true } if len(expectedUDP) > 0 { if port < expectedUDP[0] || port > expectedUDP[1] { return fmt.Errorf("udp port (%d) not in range expected range %d-%d", port, expectedUDP[0], expectedUDP[1]) } validUDP = true } } } if !validTCP { return fmt.Errorf("tcp port not found") } if !validUDP { return fmt.Errorf("udp port not found") } return nil } func stopRemoveContainer(id string, c *testing.T) { dockerCmd(c, "rm", "-f", id) } func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *testing.T) { testRequires(c, DaemonIsLinux) // Run busybox with command line expose (equivalent to EXPOSE in image's Dockerfile) for the following ports port1 := 80 port2 := 443 expose1 := fmt.Sprintf("--expose=%d", port1) expose2 := fmt.Sprintf("--expose=%d", port2) dockerCmd(c, "run", "-d", expose1, expose2, "busybox", "sleep", "5") // Check docker ps o/p for last created container reports the unpublished ports unpPort1 := fmt.Sprintf("%d/tcp", port1) unpPort2 := fmt.Sprintf("%d/tcp", port2) out, _ := dockerCmd(c, "ps", "-n=1") // Missing unpublished ports in docker ps output assert.Assert(c, strings.Contains(out, unpPort1)) // Missing unpublished ports in docker ps output assert.Assert(c, strings.Contains(out, unpPort2)) // Run the container forcing to publish the exposed ports dockerCmd(c, "run", "-d", "-P", expose1, expose2, "busybox", "sleep", "5") // Check docker ps o/p for last created container reports the exposed ports in the port bindings expBndRegx1 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort1) expBndRegx2 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort2) out, _ = dockerCmd(c, "ps", "-n=1") // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort1) in docker ps output assert.Equal(c, expBndRegx1.MatchString(out), true, fmt.Sprintf("out: %s; unpPort1: %s", out, unpPort1)) // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort2) in docker ps output assert.Equal(c, expBndRegx2.MatchString(out), true, fmt.Sprintf("out: %s; unpPort2: %s", out, unpPort2)) // Run the container specifying explicit port bindings for the exposed ports offset := 10000 pFlag1 := fmt.Sprintf("%d:%d", offset+port1, port1) pFlag2 := fmt.Sprintf("%d:%d", offset+port2, port2) out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, expose1, expose2, "busybox", "sleep", "5") id := strings.TrimSpace(out) // Check docker ps o/p for last created container reports the specified port mappings expBnd1 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port1, unpPort1) expBnd2 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port2, unpPort2) out, _ = dockerCmd(c, "ps", "-n=1") // Cannot find expected port binding (expBnd1) in docker ps output assert.Assert(c, strings.Contains(out, expBnd1)) // Cannot find expected port binding (expBnd2) in docker ps output assert.Assert(c, strings.Contains(out, expBnd2)) // Remove container now otherwise it will interfere with next test stopRemoveContainer(id, c) // Run the container with explicit port bindings and no exposed ports out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, "busybox", "sleep", "5") id = strings.TrimSpace(out) // Check docker ps o/p for last created container reports the specified port mappings out, _ = dockerCmd(c, "ps", "-n=1") // Cannot find expected port binding (expBnd1) in docker ps output assert.Assert(c, strings.Contains(out, expBnd1)) // Cannot find expected port binding (expBnd2) in docker ps output assert.Assert(c, strings.Contains(out, expBnd2)) // Remove container now otherwise it will interfere with next test stopRemoveContainer(id, c) // Run the container with one unpublished exposed port and one explicit port binding dockerCmd(c, "run", "-d", expose1, "-p", pFlag2, "busybox", "sleep", "5") // Check docker ps o/p for last created container reports the specified unpublished port and port mapping out, _ = dockerCmd(c, "ps", "-n=1") // Missing unpublished exposed ports (unpPort1) in docker ps output assert.Assert(c, strings.Contains(out, unpPort1)) // Missing port binding (expBnd2) in docker ps output assert.Assert(c, strings.Contains(out, expBnd2)) } func (s *DockerSuite) TestPortHostBinding(c *testing.T) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "nc", "-l", "-p", "80") firstID := strings.TrimSpace(out) out, _ = dockerCmd(c, "port", firstID, "80") err := assertPortList(c, out, []string{"0.0.0.0:9876"}) // Port list is not correct assert.NilError(c, err) dockerCmd(c, "run", "--net=host", "busybox", "nc", "localhost", "9876") dockerCmd(c, "rm", "-f", firstID) out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876") // Port is still bound after the Container is removed assert.Assert(c, err != nil, "out: %s", out) } func (s *DockerSuite) TestPortExposeHostBinding(c *testing.T) { testRequires(c, DaemonIsLinux, NotUserNamespace) out, _ := dockerCmd(c, "run", "-d", "-P", "--expose", "80", "busybox", "nc", "-l", "-p", "80") firstID := strings.TrimSpace(out) out, _ = dockerCmd(c, "inspect", "--format", `{{index .NetworkSettings.Ports "80/tcp" 0 "HostPort" }}`, firstID) exposedPort := strings.TrimSpace(out) dockerCmd(c, "run", "--net=host", "busybox", "nc", "127.0.0.1", exposedPort) dockerCmd(c, "rm", "-f", firstID) out, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "127.0.0.1", exposedPort) // Port is still bound after the Container is removed assert.Assert(c, err != nil, "out: %s", out) } func (s *DockerSuite) TestPortBindingOnSandbox(c *testing.T) { testRequires(c, DaemonIsLinux, NotUserNamespace) dockerCmd(c, "network", "create", "--internal", "-d", "bridge", "internal-net") nr := getNetworkResource(c, "internal-net") assert.Equal(c, nr.Internal, true) dockerCmd(c, "run", "--net", "internal-net", "-d", "--name", "c1", "-p", "8080:8080", "busybox", "nc", "-l", "-p", "8080") assert.Assert(c, waitRun("c1") == nil) _, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") assert.Assert(c, err != nil, "Port mapping on internal network is expected to fail") // Connect container to another normal bridge network dockerCmd(c, "network", "create", "-d", "bridge", "foo-net") dockerCmd(c, "network", "connect", "foo-net", "c1") _, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") assert.Assert(c, err == nil, "Port mapping on the new network is expected to succeed") }
thaJeztah
dea989ea2fb8015b21c22f728fe0cd0d78256f84
08e67904c9f7f7e70d4c3bb688d05301f5fdf7e4
Discussing with @tonistiigi - making this slightly more strict (only replace first `[` and `]` before port number
thaJeztah
4,964
moby/moby
42,034
dockerd-rootless.sh: prohibit running as root
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Prohibit running `dockerd-rootless.sh` as root **- How I did it** Added `if [ $(id -u) = 0 ]; then echo "(error)"; exit 1; fi` **- How to verify it** ```console $ sudo dockerd-rootless.sh ... Must not run as root + exit 1 ``` **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-02-17 06:00:15+00:00
2021-02-24 21:11:20+00:00
contrib/dockerd-rootless.sh
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/engine/security/rootless/ set -e -x if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -w $HOME ]; then echo "HOME needs to be set and writable" exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if which $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if which slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if which vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock exec dockerd $@ fi
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/engine/security/rootless/ set -e -x if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -w $HOME ]; then echo "HOME needs to be set and writable" exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if which $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if which slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if which vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock exec dockerd $@ fi
AkihiroSuda
271b43c43f31875aeba4bd62ad149cac5b12b8aa
e8227b1f0d5248a4bc7b393576bea424dda4c39e
Thought I left a comment, but I guess I forgot. Does this only apply to this part of the code, or to the script as a whole? If it's for the whole script, perhaps something like; ```suggestion echo "This script must be executed as a non-privileged user" ``` (and in that case, I think it would make sense to put it at the top of the script, together with the other checks at the start)
thaJeztah
4,965
moby/moby
42,034
dockerd-rootless.sh: prohibit running as root
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Prohibit running `dockerd-rootless.sh` as root **- How I did it** Added `if [ $(id -u) = 0 ]; then echo "(error)"; exit 1; fi` **- How to verify it** ```console $ sudo dockerd-rootless.sh ... Must not run as root + exit 1 ``` **- A picture of a cute animal (not mandatory but encouraged)** :penguin:
null
2021-02-17 06:00:15+00:00
2021-02-24 21:11:20+00:00
contrib/dockerd-rootless.sh
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/engine/security/rootless/ set -e -x if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -w $HOME ]; then echo "HOME needs to be set and writable" exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if which $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if which slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if which vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock exec dockerd $@ fi
#!/bin/sh # dockerd-rootless.sh executes dockerd in rootless mode. # # Usage: dockerd-rootless.sh [DOCKERD_OPTIONS] # # External dependencies: # * newuidmap and newgidmap needs to be installed. # * /etc/subuid and /etc/subgid needs to be configured for the current user. # * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed. # # Recognized environment variables: # * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit". # * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers. # * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto". # * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto". # # See the documentation for the further information: https://docs.docker.com/engine/security/rootless/ set -e -x if ! [ -w $XDG_RUNTIME_DIR ]; then echo "XDG_RUNTIME_DIR needs to be set and writable" exit 1 fi if ! [ -w $HOME ]; then echo "HOME needs to be set and writable" exit 1 fi rootlesskit="" for f in docker-rootlesskit rootlesskit; do if which $f > /dev/null 2>&1; then rootlesskit=$f break fi done if [ -z $rootlesskit ]; then echo "rootlesskit needs to be installed" exit 1 fi : "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}" : "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}" net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU if [ -z $net ]; then if which slirp4netns > /dev/null 2>&1; then # If --netns-type is present in --help, slirp4netns is >= v0.4.0. if slirp4netns --help | grep -qw -- --netns-type; then net=slirp4netns if [ -z $mtu ]; then mtu=65520 fi else echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit." fi fi if [ -z $net ]; then if which vpnkit > /dev/null 2>&1; then net=vpnkit else echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed" exit 1 fi fi fi if [ -z $mtu ]; then mtu=1500 fi if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then _DOCKERD_ROOTLESS_CHILD=1 export _DOCKERD_ROOTLESS_CHILD if [ "$(id -u)" = "0" ]; then echo "This script must be executed as a non-privileged user" exit 1 fi # Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces. # # --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks # * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the # namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host # (by either systemd-networkd or NetworkManager) # * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace exec $rootlesskit \ --net=$net --mtu=$mtu \ --slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \ --slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \ --disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \ --copy-up=/etc --copy-up=/run \ --propagation=rslave \ $DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \ $0 $@ else [ $_DOCKERD_ROOTLESS_CHILD = 1 ] # remove the symlinks for the existing files in the parent namespace if any, # so that we can create our own files in our mount namespace. rm -f /run/docker /run/containerd /run/xtables.lock exec dockerd $@ fi
AkihiroSuda
271b43c43f31875aeba4bd62ad149cac5b12b8aa
e8227b1f0d5248a4bc7b393576bea424dda4c39e
Updated the error message. > put it at the top of the script, together with the other checks at the start No. The script "reexecs" itself inside a user namespace, so the UID flips to be zero.
AkihiroSuda
4,966
moby/moby
42,028
volumes: only send "create" event when actually creating volume
fixes https://github.com/moby/moby/issues/40047 The VolumesService did not have information wether or not a volume was _created_ or if a volume already existed in the driver, and the existing volume was used. As a result, multiple "create" events could be generated for the same volume. For example: 1. Run `docker events` in a shell to start listening for events 2. Create a volume: docker volume create myvolume 3. Start a container that uses that volume: docker run -dit -v myvolume:/foo busybox 4. Check the events that were generated: 2021-02-15T18:49:55.874621004+01:00 volume create myvolume (driver=local) 2021-02-15T18:50:11.442759052+01:00 volume create myvolume (driver=local) 2021-02-15T18:50:11.487104176+01:00 container create 45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1 (image=busybox, name=gracious_hypatia) 2021-02-15T18:50:11.519288102+01:00 network connect a19f6bb8d44ff84d478670fa4e34c5bf5305f42786294d3d90e790ac74b6d3e0 (container=45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1, name=bridge, type=bridge) 2021-02-15T18:50:11.526407799+01:00 volume mount myvolume (container=45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1, destination=/foo, driver=local, propagation=, read/write=true) 2021-02-15T18:50:11.864134043+01:00 container start 45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1 (image=busybox, name=gracious_hypatia) 5. Notice that a "volume create" event is created twice; - once when `docker volume create` was ran - once when `docker run ...` was ran This patch moves the generation of (most) events to the volume _store_, and only generates an event if the volume did not yet exist. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ``` Fix multiple "volume create" events being created when using an existing volume ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-15 17:54:37+00:00
2021-06-09 10:01:22+00:00
volume/service/service.go
package service // import "github.com/docker/docker/volume/service" import ( "context" "strconv" "sync/atomic" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type ds interface { GetDriverList() []string } type volumeEventLogger interface { LogVolumeEvent(volumeID, action string, attributes map[string]string) } // VolumesService manages access to volumes // This is used as the main access point for volumes to higher level services and the API. type VolumesService struct { vs *VolumeStore ds ds pruneRunning int32 eventLogger volumeEventLogger } // NewVolumeService creates a new volume service func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.Identity, logger volumeEventLogger) (*VolumesService, error) { ds := drivers.NewStore(pg) if err := setupDefaultDriver(ds, root, rootIDs); err != nil { return nil, err } vs, err := NewStore(root, ds) if err != nil { return nil, err } return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil } // GetDriverList gets the list of registered volume drivers func (s *VolumesService) GetDriverList() []string { return s.ds.GetDriverList() } // Create creates a volume // If the caller is creating this volume to be consumed immediately, it is // expected that the caller specifies a reference ID. // This reference ID will protect this volume from removal. // // A good example for a reference ID is a container's ID. // When whatever is going to reference this volume is removed the caller should defeference the volume by calling `Release`. func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) { if name == "" { name = stringid.GenerateRandomID() } v, err := s.vs.Create(ctx, name, driverName, opts...) if err != nil { return nil, err } s.eventLogger.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) apiV := volumeToAPIType(v) return &apiV, nil } // Get returns details about a volume func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*types.Volume, error) { v, err := s.vs.Get(ctx, name, getOpts...) if err != nil { return nil, err } vol := volumeToAPIType(v) var cfg opts.GetConfig for _, o := range getOpts { o(&cfg) } if cfg.ResolveStatus { vol.Status = v.Status() } return &vol, nil } // Mount mounts the volume // Callers should specify a uniqe reference for each Mount/Unmount pair. // // Example: // ```go // mountID := "randomString" // s.Mount(ctx, vol, mountID) // s.Unmount(ctx, vol, mountID) // ``` func (s *VolumesService) Mount(ctx context.Context, vol *types.Volume, ref string) (string, error) { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return "", err } return v.Mount(ref) } // Unmount unmounts the volume. // Note that depending on the implementation, the volume may still be mounted due to other resources using it. // // The reference specified here should be the same reference specified during `Mount` and should be // unique for each mount/unmount pair. // See `Mount` documentation for an example. func (s *VolumesService) Unmount(ctx context.Context, vol *types.Volume, ref string) error { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return err } return v.Unmount(ref) } // Release releases a volume reference func (s *VolumesService) Release(ctx context.Context, name string, ref string) error { return s.vs.Release(ctx, name, ref) } // Remove removes a volume // An error is returned if the volume is still referenced. func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error { var cfg opts.RemoveConfig for _, o := range rmOpts { o(&cfg) } v, err := s.vs.Get(ctx, name) if err != nil { if IsNotExist(err) && cfg.PurgeOnError { return nil } return err } err = s.vs.Remove(ctx, v, rmOpts...) if IsNotExist(err) { err = nil } else if IsInUse(err) { err = errdefs.Conflict(err) } else if IsNotExist(err) && cfg.PurgeOnError { err = nil } if err == nil { s.eventLogger.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) } return err } var acceptedPruneFilters = map[string]bool{ "label": true, "label!": true, } var acceptedListFilters = map[string]bool{ "dangling": true, "name": true, "driver": true, "label": true, } // LocalVolumesSize gets all local volumes and fetches their size on disk // Note that this intentionally skips volumes which have mount options. Typically // volumes with mount options are not really local even if they are using the // local driver. func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]*types.Volume, error) { ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } return s.volumesToAPI(ctx, ls, calcSize(true)), nil } // Prune removes (local) volumes which match the past in filter arguments. // Note that this intentionally skips volumes with mount options as there would // be no space reclaimed in this case. func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types.VolumesPruneReport, error) { if !atomic.CompareAndSwapInt32(&s.pruneRunning, 0, 1) { return nil, errdefs.Conflict(errors.New("a prune operation is already running")) } defer atomic.StoreInt32(&s.pruneRunning, 0) by, err := filtersToBy(filter, acceptedPruneFilters) if err != nil { return nil, err } ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } rep := &types.VolumesPruneReport{VolumesDeleted: make([]string, 0, len(ls))} for _, v := range ls { select { case <-ctx.Done(): err := ctx.Err() if err == context.Canceled { err = nil } return rep, err default: } vSize, err := directory.Size(ctx, v.Path()) if err != nil { logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") } if err := s.vs.Remove(ctx, v); err != nil { logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") continue } rep.SpaceReclaimed += uint64(vSize) rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name()) } s.eventLogger.LogVolumeEvent("", "prune", map[string]string{ "reclaimed": strconv.FormatInt(int64(rep.SpaceReclaimed), 10), }) return rep, nil } // List gets the list of volumes which match the past in filters // If filters is nil or empty all volumes are returned. func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumesOut []*types.Volume, warnings []string, err error) { by, err := filtersToBy(filter, acceptedListFilters) if err != nil { return nil, nil, err } volumes, warnings, err := s.vs.Find(ctx, by) if err != nil { return nil, nil, err } return s.volumesToAPI(ctx, volumes, useCachedPath(true)), warnings, nil } // Shutdown shuts down the image service and dependencies func (s *VolumesService) Shutdown() error { return s.vs.Shutdown() }
package service // import "github.com/docker/docker/volume/service" import ( "context" "strconv" "sync/atomic" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type ds interface { GetDriverList() []string } // VolumeEventLogger interface provides methods to log volume-related events type VolumeEventLogger interface { // LogVolumeEvent generates an event related to a volume. LogVolumeEvent(volumeID, action string, attributes map[string]string) } // VolumesService manages access to volumes // This is used as the main access point for volumes to higher level services and the API. type VolumesService struct { vs *VolumeStore ds ds pruneRunning int32 eventLogger VolumeEventLogger } // NewVolumeService creates a new volume service func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.Identity, logger VolumeEventLogger) (*VolumesService, error) { ds := drivers.NewStore(pg) if err := setupDefaultDriver(ds, root, rootIDs); err != nil { return nil, err } vs, err := NewStore(root, ds, WithEventLogger(logger)) if err != nil { return nil, err } return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil } // GetDriverList gets the list of registered volume drivers func (s *VolumesService) GetDriverList() []string { return s.ds.GetDriverList() } // Create creates a volume // If the caller is creating this volume to be consumed immediately, it is // expected that the caller specifies a reference ID. // This reference ID will protect this volume from removal. // // A good example for a reference ID is a container's ID. // When whatever is going to reference this volume is removed the caller should defeference the volume by calling `Release`. func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) { if name == "" { name = stringid.GenerateRandomID() } v, err := s.vs.Create(ctx, name, driverName, opts...) if err != nil { return nil, err } apiV := volumeToAPIType(v) return &apiV, nil } // Get returns details about a volume func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*types.Volume, error) { v, err := s.vs.Get(ctx, name, getOpts...) if err != nil { return nil, err } vol := volumeToAPIType(v) var cfg opts.GetConfig for _, o := range getOpts { o(&cfg) } if cfg.ResolveStatus { vol.Status = v.Status() } return &vol, nil } // Mount mounts the volume // Callers should specify a uniqe reference for each Mount/Unmount pair. // // Example: // ```go // mountID := "randomString" // s.Mount(ctx, vol, mountID) // s.Unmount(ctx, vol, mountID) // ``` func (s *VolumesService) Mount(ctx context.Context, vol *types.Volume, ref string) (string, error) { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return "", err } return v.Mount(ref) } // Unmount unmounts the volume. // Note that depending on the implementation, the volume may still be mounted due to other resources using it. // // The reference specified here should be the same reference specified during `Mount` and should be // unique for each mount/unmount pair. // See `Mount` documentation for an example. func (s *VolumesService) Unmount(ctx context.Context, vol *types.Volume, ref string) error { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return err } return v.Unmount(ref) } // Release releases a volume reference func (s *VolumesService) Release(ctx context.Context, name string, ref string) error { return s.vs.Release(ctx, name, ref) } // Remove removes a volume // An error is returned if the volume is still referenced. func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error { var cfg opts.RemoveConfig for _, o := range rmOpts { o(&cfg) } v, err := s.vs.Get(ctx, name) if err != nil { if IsNotExist(err) && cfg.PurgeOnError { return nil } return err } err = s.vs.Remove(ctx, v, rmOpts...) if IsNotExist(err) { err = nil } else if IsInUse(err) { err = errdefs.Conflict(err) } else if IsNotExist(err) && cfg.PurgeOnError { err = nil } return err } var acceptedPruneFilters = map[string]bool{ "label": true, "label!": true, } var acceptedListFilters = map[string]bool{ "dangling": true, "name": true, "driver": true, "label": true, } // LocalVolumesSize gets all local volumes and fetches their size on disk // Note that this intentionally skips volumes which have mount options. Typically // volumes with mount options are not really local even if they are using the // local driver. func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]*types.Volume, error) { ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } return s.volumesToAPI(ctx, ls, calcSize(true)), nil } // Prune removes (local) volumes which match the past in filter arguments. // Note that this intentionally skips volumes with mount options as there would // be no space reclaimed in this case. func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types.VolumesPruneReport, error) { if !atomic.CompareAndSwapInt32(&s.pruneRunning, 0, 1) { return nil, errdefs.Conflict(errors.New("a prune operation is already running")) } defer atomic.StoreInt32(&s.pruneRunning, 0) by, err := filtersToBy(filter, acceptedPruneFilters) if err != nil { return nil, err } ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } rep := &types.VolumesPruneReport{VolumesDeleted: make([]string, 0, len(ls))} for _, v := range ls { select { case <-ctx.Done(): err := ctx.Err() if err == context.Canceled { err = nil } return rep, err default: } vSize, err := directory.Size(ctx, v.Path()) if err != nil { logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") } if err := s.vs.Remove(ctx, v); err != nil { logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") continue } rep.SpaceReclaimed += uint64(vSize) rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name()) } s.eventLogger.LogVolumeEvent("", "prune", map[string]string{ "reclaimed": strconv.FormatInt(int64(rep.SpaceReclaimed), 10), }) return rep, nil } // List gets the list of volumes which match the past in filters // If filters is nil or empty all volumes are returned. func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumesOut []*types.Volume, warnings []string, err error) { by, err := filtersToBy(filter, acceptedListFilters) if err != nil { return nil, nil, err } volumes, warnings, err := s.vs.Find(ctx, by) if err != nil { return nil, nil, err } return s.volumesToAPI(ctx, volumes, useCachedPath(true)), warnings, nil } // Shutdown shuts down the image service and dependencies func (s *VolumesService) Shutdown() error { return s.vs.Shutdown() }
thaJeztah
4c2ec79bf2a848761bc9162a25b08ec553fc69db
ef4d473401421d26633d43547b4a28978887386a
Not really happy with this; we could change `NewStore(root, ds)` to take an event logger (or take functional options (`WithEventLogger(..)`), or add a `SetEventLogger()`; both would change the Interface though, so I'm not sure what would be cleaner
thaJeztah
4,967
moby/moby
42,028
volumes: only send "create" event when actually creating volume
fixes https://github.com/moby/moby/issues/40047 The VolumesService did not have information wether or not a volume was _created_ or if a volume already existed in the driver, and the existing volume was used. As a result, multiple "create" events could be generated for the same volume. For example: 1. Run `docker events` in a shell to start listening for events 2. Create a volume: docker volume create myvolume 3. Start a container that uses that volume: docker run -dit -v myvolume:/foo busybox 4. Check the events that were generated: 2021-02-15T18:49:55.874621004+01:00 volume create myvolume (driver=local) 2021-02-15T18:50:11.442759052+01:00 volume create myvolume (driver=local) 2021-02-15T18:50:11.487104176+01:00 container create 45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1 (image=busybox, name=gracious_hypatia) 2021-02-15T18:50:11.519288102+01:00 network connect a19f6bb8d44ff84d478670fa4e34c5bf5305f42786294d3d90e790ac74b6d3e0 (container=45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1, name=bridge, type=bridge) 2021-02-15T18:50:11.526407799+01:00 volume mount myvolume (container=45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1, destination=/foo, driver=local, propagation=, read/write=true) 2021-02-15T18:50:11.864134043+01:00 container start 45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1 (image=busybox, name=gracious_hypatia) 5. Notice that a "volume create" event is created twice; - once when `docker volume create` was ran - once when `docker run ...` was ran This patch moves the generation of (most) events to the volume _store_, and only generates an event if the volume did not yet exist. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ``` Fix multiple "volume create" events being created when using an existing volume ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-15 17:54:37+00:00
2021-06-09 10:01:22+00:00
volume/service/service.go
package service // import "github.com/docker/docker/volume/service" import ( "context" "strconv" "sync/atomic" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type ds interface { GetDriverList() []string } type volumeEventLogger interface { LogVolumeEvent(volumeID, action string, attributes map[string]string) } // VolumesService manages access to volumes // This is used as the main access point for volumes to higher level services and the API. type VolumesService struct { vs *VolumeStore ds ds pruneRunning int32 eventLogger volumeEventLogger } // NewVolumeService creates a new volume service func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.Identity, logger volumeEventLogger) (*VolumesService, error) { ds := drivers.NewStore(pg) if err := setupDefaultDriver(ds, root, rootIDs); err != nil { return nil, err } vs, err := NewStore(root, ds) if err != nil { return nil, err } return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil } // GetDriverList gets the list of registered volume drivers func (s *VolumesService) GetDriverList() []string { return s.ds.GetDriverList() } // Create creates a volume // If the caller is creating this volume to be consumed immediately, it is // expected that the caller specifies a reference ID. // This reference ID will protect this volume from removal. // // A good example for a reference ID is a container's ID. // When whatever is going to reference this volume is removed the caller should defeference the volume by calling `Release`. func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) { if name == "" { name = stringid.GenerateRandomID() } v, err := s.vs.Create(ctx, name, driverName, opts...) if err != nil { return nil, err } s.eventLogger.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) apiV := volumeToAPIType(v) return &apiV, nil } // Get returns details about a volume func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*types.Volume, error) { v, err := s.vs.Get(ctx, name, getOpts...) if err != nil { return nil, err } vol := volumeToAPIType(v) var cfg opts.GetConfig for _, o := range getOpts { o(&cfg) } if cfg.ResolveStatus { vol.Status = v.Status() } return &vol, nil } // Mount mounts the volume // Callers should specify a uniqe reference for each Mount/Unmount pair. // // Example: // ```go // mountID := "randomString" // s.Mount(ctx, vol, mountID) // s.Unmount(ctx, vol, mountID) // ``` func (s *VolumesService) Mount(ctx context.Context, vol *types.Volume, ref string) (string, error) { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return "", err } return v.Mount(ref) } // Unmount unmounts the volume. // Note that depending on the implementation, the volume may still be mounted due to other resources using it. // // The reference specified here should be the same reference specified during `Mount` and should be // unique for each mount/unmount pair. // See `Mount` documentation for an example. func (s *VolumesService) Unmount(ctx context.Context, vol *types.Volume, ref string) error { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return err } return v.Unmount(ref) } // Release releases a volume reference func (s *VolumesService) Release(ctx context.Context, name string, ref string) error { return s.vs.Release(ctx, name, ref) } // Remove removes a volume // An error is returned if the volume is still referenced. func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error { var cfg opts.RemoveConfig for _, o := range rmOpts { o(&cfg) } v, err := s.vs.Get(ctx, name) if err != nil { if IsNotExist(err) && cfg.PurgeOnError { return nil } return err } err = s.vs.Remove(ctx, v, rmOpts...) if IsNotExist(err) { err = nil } else if IsInUse(err) { err = errdefs.Conflict(err) } else if IsNotExist(err) && cfg.PurgeOnError { err = nil } if err == nil { s.eventLogger.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) } return err } var acceptedPruneFilters = map[string]bool{ "label": true, "label!": true, } var acceptedListFilters = map[string]bool{ "dangling": true, "name": true, "driver": true, "label": true, } // LocalVolumesSize gets all local volumes and fetches their size on disk // Note that this intentionally skips volumes which have mount options. Typically // volumes with mount options are not really local even if they are using the // local driver. func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]*types.Volume, error) { ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } return s.volumesToAPI(ctx, ls, calcSize(true)), nil } // Prune removes (local) volumes which match the past in filter arguments. // Note that this intentionally skips volumes with mount options as there would // be no space reclaimed in this case. func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types.VolumesPruneReport, error) { if !atomic.CompareAndSwapInt32(&s.pruneRunning, 0, 1) { return nil, errdefs.Conflict(errors.New("a prune operation is already running")) } defer atomic.StoreInt32(&s.pruneRunning, 0) by, err := filtersToBy(filter, acceptedPruneFilters) if err != nil { return nil, err } ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } rep := &types.VolumesPruneReport{VolumesDeleted: make([]string, 0, len(ls))} for _, v := range ls { select { case <-ctx.Done(): err := ctx.Err() if err == context.Canceled { err = nil } return rep, err default: } vSize, err := directory.Size(ctx, v.Path()) if err != nil { logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") } if err := s.vs.Remove(ctx, v); err != nil { logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") continue } rep.SpaceReclaimed += uint64(vSize) rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name()) } s.eventLogger.LogVolumeEvent("", "prune", map[string]string{ "reclaimed": strconv.FormatInt(int64(rep.SpaceReclaimed), 10), }) return rep, nil } // List gets the list of volumes which match the past in filters // If filters is nil or empty all volumes are returned. func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumesOut []*types.Volume, warnings []string, err error) { by, err := filtersToBy(filter, acceptedListFilters) if err != nil { return nil, nil, err } volumes, warnings, err := s.vs.Find(ctx, by) if err != nil { return nil, nil, err } return s.volumesToAPI(ctx, volumes, useCachedPath(true)), warnings, nil } // Shutdown shuts down the image service and dependencies func (s *VolumesService) Shutdown() error { return s.vs.Shutdown() }
package service // import "github.com/docker/docker/volume/service" import ( "context" "strconv" "sync/atomic" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type ds interface { GetDriverList() []string } // VolumeEventLogger interface provides methods to log volume-related events type VolumeEventLogger interface { // LogVolumeEvent generates an event related to a volume. LogVolumeEvent(volumeID, action string, attributes map[string]string) } // VolumesService manages access to volumes // This is used as the main access point for volumes to higher level services and the API. type VolumesService struct { vs *VolumeStore ds ds pruneRunning int32 eventLogger VolumeEventLogger } // NewVolumeService creates a new volume service func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.Identity, logger VolumeEventLogger) (*VolumesService, error) { ds := drivers.NewStore(pg) if err := setupDefaultDriver(ds, root, rootIDs); err != nil { return nil, err } vs, err := NewStore(root, ds, WithEventLogger(logger)) if err != nil { return nil, err } return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil } // GetDriverList gets the list of registered volume drivers func (s *VolumesService) GetDriverList() []string { return s.ds.GetDriverList() } // Create creates a volume // If the caller is creating this volume to be consumed immediately, it is // expected that the caller specifies a reference ID. // This reference ID will protect this volume from removal. // // A good example for a reference ID is a container's ID. // When whatever is going to reference this volume is removed the caller should defeference the volume by calling `Release`. func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) { if name == "" { name = stringid.GenerateRandomID() } v, err := s.vs.Create(ctx, name, driverName, opts...) if err != nil { return nil, err } apiV := volumeToAPIType(v) return &apiV, nil } // Get returns details about a volume func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*types.Volume, error) { v, err := s.vs.Get(ctx, name, getOpts...) if err != nil { return nil, err } vol := volumeToAPIType(v) var cfg opts.GetConfig for _, o := range getOpts { o(&cfg) } if cfg.ResolveStatus { vol.Status = v.Status() } return &vol, nil } // Mount mounts the volume // Callers should specify a uniqe reference for each Mount/Unmount pair. // // Example: // ```go // mountID := "randomString" // s.Mount(ctx, vol, mountID) // s.Unmount(ctx, vol, mountID) // ``` func (s *VolumesService) Mount(ctx context.Context, vol *types.Volume, ref string) (string, error) { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return "", err } return v.Mount(ref) } // Unmount unmounts the volume. // Note that depending on the implementation, the volume may still be mounted due to other resources using it. // // The reference specified here should be the same reference specified during `Mount` and should be // unique for each mount/unmount pair. // See `Mount` documentation for an example. func (s *VolumesService) Unmount(ctx context.Context, vol *types.Volume, ref string) error { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return err } return v.Unmount(ref) } // Release releases a volume reference func (s *VolumesService) Release(ctx context.Context, name string, ref string) error { return s.vs.Release(ctx, name, ref) } // Remove removes a volume // An error is returned if the volume is still referenced. func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error { var cfg opts.RemoveConfig for _, o := range rmOpts { o(&cfg) } v, err := s.vs.Get(ctx, name) if err != nil { if IsNotExist(err) && cfg.PurgeOnError { return nil } return err } err = s.vs.Remove(ctx, v, rmOpts...) if IsNotExist(err) { err = nil } else if IsInUse(err) { err = errdefs.Conflict(err) } else if IsNotExist(err) && cfg.PurgeOnError { err = nil } return err } var acceptedPruneFilters = map[string]bool{ "label": true, "label!": true, } var acceptedListFilters = map[string]bool{ "dangling": true, "name": true, "driver": true, "label": true, } // LocalVolumesSize gets all local volumes and fetches their size on disk // Note that this intentionally skips volumes which have mount options. Typically // volumes with mount options are not really local even if they are using the // local driver. func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]*types.Volume, error) { ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } return s.volumesToAPI(ctx, ls, calcSize(true)), nil } // Prune removes (local) volumes which match the past in filter arguments. // Note that this intentionally skips volumes with mount options as there would // be no space reclaimed in this case. func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types.VolumesPruneReport, error) { if !atomic.CompareAndSwapInt32(&s.pruneRunning, 0, 1) { return nil, errdefs.Conflict(errors.New("a prune operation is already running")) } defer atomic.StoreInt32(&s.pruneRunning, 0) by, err := filtersToBy(filter, acceptedPruneFilters) if err != nil { return nil, err } ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } rep := &types.VolumesPruneReport{VolumesDeleted: make([]string, 0, len(ls))} for _, v := range ls { select { case <-ctx.Done(): err := ctx.Err() if err == context.Canceled { err = nil } return rep, err default: } vSize, err := directory.Size(ctx, v.Path()) if err != nil { logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") } if err := s.vs.Remove(ctx, v); err != nil { logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") continue } rep.SpaceReclaimed += uint64(vSize) rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name()) } s.eventLogger.LogVolumeEvent("", "prune", map[string]string{ "reclaimed": strconv.FormatInt(int64(rep.SpaceReclaimed), 10), }) return rep, nil } // List gets the list of volumes which match the past in filters // If filters is nil or empty all volumes are returned. func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumesOut []*types.Volume, warnings []string, err error) { by, err := filtersToBy(filter, acceptedListFilters) if err != nil { return nil, nil, err } volumes, warnings, err := s.vs.Find(ctx, by) if err != nil { return nil, nil, err } return s.volumesToAPI(ctx, volumes, useCachedPath(true)), warnings, nil } // Shutdown shuts down the image service and dependencies func (s *VolumesService) Shutdown() error { return s.vs.Shutdown() }
thaJeztah
4c2ec79bf2a848761bc9162a25b08ec553fc69db
ef4d473401421d26633d43547b4a28978887386a
Not super happy with this either; problem here is that "prune" events are specific to the _service_, but now we're using the event logger that's attached to the "store". Perhaps it would be cleaner to keep a reference in _both_ `VolumesService` _and_ `VolumeStore` (open to input on that)
thaJeztah
4,968
moby/moby
42,028
volumes: only send "create" event when actually creating volume
fixes https://github.com/moby/moby/issues/40047 The VolumesService did not have information wether or not a volume was _created_ or if a volume already existed in the driver, and the existing volume was used. As a result, multiple "create" events could be generated for the same volume. For example: 1. Run `docker events` in a shell to start listening for events 2. Create a volume: docker volume create myvolume 3. Start a container that uses that volume: docker run -dit -v myvolume:/foo busybox 4. Check the events that were generated: 2021-02-15T18:49:55.874621004+01:00 volume create myvolume (driver=local) 2021-02-15T18:50:11.442759052+01:00 volume create myvolume (driver=local) 2021-02-15T18:50:11.487104176+01:00 container create 45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1 (image=busybox, name=gracious_hypatia) 2021-02-15T18:50:11.519288102+01:00 network connect a19f6bb8d44ff84d478670fa4e34c5bf5305f42786294d3d90e790ac74b6d3e0 (container=45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1, name=bridge, type=bridge) 2021-02-15T18:50:11.526407799+01:00 volume mount myvolume (container=45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1, destination=/foo, driver=local, propagation=, read/write=true) 2021-02-15T18:50:11.864134043+01:00 container start 45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1 (image=busybox, name=gracious_hypatia) 5. Notice that a "volume create" event is created twice; - once when `docker volume create` was ran - once when `docker run ...` was ran This patch moves the generation of (most) events to the volume _store_, and only generates an event if the volume did not yet exist. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ``` Fix multiple "volume create" events being created when using an existing volume ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-15 17:54:37+00:00
2021-06-09 10:01:22+00:00
volume/service/service.go
package service // import "github.com/docker/docker/volume/service" import ( "context" "strconv" "sync/atomic" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type ds interface { GetDriverList() []string } type volumeEventLogger interface { LogVolumeEvent(volumeID, action string, attributes map[string]string) } // VolumesService manages access to volumes // This is used as the main access point for volumes to higher level services and the API. type VolumesService struct { vs *VolumeStore ds ds pruneRunning int32 eventLogger volumeEventLogger } // NewVolumeService creates a new volume service func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.Identity, logger volumeEventLogger) (*VolumesService, error) { ds := drivers.NewStore(pg) if err := setupDefaultDriver(ds, root, rootIDs); err != nil { return nil, err } vs, err := NewStore(root, ds) if err != nil { return nil, err } return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil } // GetDriverList gets the list of registered volume drivers func (s *VolumesService) GetDriverList() []string { return s.ds.GetDriverList() } // Create creates a volume // If the caller is creating this volume to be consumed immediately, it is // expected that the caller specifies a reference ID. // This reference ID will protect this volume from removal. // // A good example for a reference ID is a container's ID. // When whatever is going to reference this volume is removed the caller should defeference the volume by calling `Release`. func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) { if name == "" { name = stringid.GenerateRandomID() } v, err := s.vs.Create(ctx, name, driverName, opts...) if err != nil { return nil, err } s.eventLogger.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) apiV := volumeToAPIType(v) return &apiV, nil } // Get returns details about a volume func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*types.Volume, error) { v, err := s.vs.Get(ctx, name, getOpts...) if err != nil { return nil, err } vol := volumeToAPIType(v) var cfg opts.GetConfig for _, o := range getOpts { o(&cfg) } if cfg.ResolveStatus { vol.Status = v.Status() } return &vol, nil } // Mount mounts the volume // Callers should specify a uniqe reference for each Mount/Unmount pair. // // Example: // ```go // mountID := "randomString" // s.Mount(ctx, vol, mountID) // s.Unmount(ctx, vol, mountID) // ``` func (s *VolumesService) Mount(ctx context.Context, vol *types.Volume, ref string) (string, error) { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return "", err } return v.Mount(ref) } // Unmount unmounts the volume. // Note that depending on the implementation, the volume may still be mounted due to other resources using it. // // The reference specified here should be the same reference specified during `Mount` and should be // unique for each mount/unmount pair. // See `Mount` documentation for an example. func (s *VolumesService) Unmount(ctx context.Context, vol *types.Volume, ref string) error { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return err } return v.Unmount(ref) } // Release releases a volume reference func (s *VolumesService) Release(ctx context.Context, name string, ref string) error { return s.vs.Release(ctx, name, ref) } // Remove removes a volume // An error is returned if the volume is still referenced. func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error { var cfg opts.RemoveConfig for _, o := range rmOpts { o(&cfg) } v, err := s.vs.Get(ctx, name) if err != nil { if IsNotExist(err) && cfg.PurgeOnError { return nil } return err } err = s.vs.Remove(ctx, v, rmOpts...) if IsNotExist(err) { err = nil } else if IsInUse(err) { err = errdefs.Conflict(err) } else if IsNotExist(err) && cfg.PurgeOnError { err = nil } if err == nil { s.eventLogger.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) } return err } var acceptedPruneFilters = map[string]bool{ "label": true, "label!": true, } var acceptedListFilters = map[string]bool{ "dangling": true, "name": true, "driver": true, "label": true, } // LocalVolumesSize gets all local volumes and fetches their size on disk // Note that this intentionally skips volumes which have mount options. Typically // volumes with mount options are not really local even if they are using the // local driver. func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]*types.Volume, error) { ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } return s.volumesToAPI(ctx, ls, calcSize(true)), nil } // Prune removes (local) volumes which match the past in filter arguments. // Note that this intentionally skips volumes with mount options as there would // be no space reclaimed in this case. func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types.VolumesPruneReport, error) { if !atomic.CompareAndSwapInt32(&s.pruneRunning, 0, 1) { return nil, errdefs.Conflict(errors.New("a prune operation is already running")) } defer atomic.StoreInt32(&s.pruneRunning, 0) by, err := filtersToBy(filter, acceptedPruneFilters) if err != nil { return nil, err } ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } rep := &types.VolumesPruneReport{VolumesDeleted: make([]string, 0, len(ls))} for _, v := range ls { select { case <-ctx.Done(): err := ctx.Err() if err == context.Canceled { err = nil } return rep, err default: } vSize, err := directory.Size(ctx, v.Path()) if err != nil { logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") } if err := s.vs.Remove(ctx, v); err != nil { logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") continue } rep.SpaceReclaimed += uint64(vSize) rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name()) } s.eventLogger.LogVolumeEvent("", "prune", map[string]string{ "reclaimed": strconv.FormatInt(int64(rep.SpaceReclaimed), 10), }) return rep, nil } // List gets the list of volumes which match the past in filters // If filters is nil or empty all volumes are returned. func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumesOut []*types.Volume, warnings []string, err error) { by, err := filtersToBy(filter, acceptedListFilters) if err != nil { return nil, nil, err } volumes, warnings, err := s.vs.Find(ctx, by) if err != nil { return nil, nil, err } return s.volumesToAPI(ctx, volumes, useCachedPath(true)), warnings, nil } // Shutdown shuts down the image service and dependencies func (s *VolumesService) Shutdown() error { return s.vs.Shutdown() }
package service // import "github.com/docker/docker/volume/service" import ( "context" "strconv" "sync/atomic" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type ds interface { GetDriverList() []string } // VolumeEventLogger interface provides methods to log volume-related events type VolumeEventLogger interface { // LogVolumeEvent generates an event related to a volume. LogVolumeEvent(volumeID, action string, attributes map[string]string) } // VolumesService manages access to volumes // This is used as the main access point for volumes to higher level services and the API. type VolumesService struct { vs *VolumeStore ds ds pruneRunning int32 eventLogger VolumeEventLogger } // NewVolumeService creates a new volume service func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.Identity, logger VolumeEventLogger) (*VolumesService, error) { ds := drivers.NewStore(pg) if err := setupDefaultDriver(ds, root, rootIDs); err != nil { return nil, err } vs, err := NewStore(root, ds, WithEventLogger(logger)) if err != nil { return nil, err } return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil } // GetDriverList gets the list of registered volume drivers func (s *VolumesService) GetDriverList() []string { return s.ds.GetDriverList() } // Create creates a volume // If the caller is creating this volume to be consumed immediately, it is // expected that the caller specifies a reference ID. // This reference ID will protect this volume from removal. // // A good example for a reference ID is a container's ID. // When whatever is going to reference this volume is removed the caller should defeference the volume by calling `Release`. func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) { if name == "" { name = stringid.GenerateRandomID() } v, err := s.vs.Create(ctx, name, driverName, opts...) if err != nil { return nil, err } apiV := volumeToAPIType(v) return &apiV, nil } // Get returns details about a volume func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*types.Volume, error) { v, err := s.vs.Get(ctx, name, getOpts...) if err != nil { return nil, err } vol := volumeToAPIType(v) var cfg opts.GetConfig for _, o := range getOpts { o(&cfg) } if cfg.ResolveStatus { vol.Status = v.Status() } return &vol, nil } // Mount mounts the volume // Callers should specify a uniqe reference for each Mount/Unmount pair. // // Example: // ```go // mountID := "randomString" // s.Mount(ctx, vol, mountID) // s.Unmount(ctx, vol, mountID) // ``` func (s *VolumesService) Mount(ctx context.Context, vol *types.Volume, ref string) (string, error) { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return "", err } return v.Mount(ref) } // Unmount unmounts the volume. // Note that depending on the implementation, the volume may still be mounted due to other resources using it. // // The reference specified here should be the same reference specified during `Mount` and should be // unique for each mount/unmount pair. // See `Mount` documentation for an example. func (s *VolumesService) Unmount(ctx context.Context, vol *types.Volume, ref string) error { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return err } return v.Unmount(ref) } // Release releases a volume reference func (s *VolumesService) Release(ctx context.Context, name string, ref string) error { return s.vs.Release(ctx, name, ref) } // Remove removes a volume // An error is returned if the volume is still referenced. func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error { var cfg opts.RemoveConfig for _, o := range rmOpts { o(&cfg) } v, err := s.vs.Get(ctx, name) if err != nil { if IsNotExist(err) && cfg.PurgeOnError { return nil } return err } err = s.vs.Remove(ctx, v, rmOpts...) if IsNotExist(err) { err = nil } else if IsInUse(err) { err = errdefs.Conflict(err) } else if IsNotExist(err) && cfg.PurgeOnError { err = nil } return err } var acceptedPruneFilters = map[string]bool{ "label": true, "label!": true, } var acceptedListFilters = map[string]bool{ "dangling": true, "name": true, "driver": true, "label": true, } // LocalVolumesSize gets all local volumes and fetches their size on disk // Note that this intentionally skips volumes which have mount options. Typically // volumes with mount options are not really local even if they are using the // local driver. func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]*types.Volume, error) { ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } return s.volumesToAPI(ctx, ls, calcSize(true)), nil } // Prune removes (local) volumes which match the past in filter arguments. // Note that this intentionally skips volumes with mount options as there would // be no space reclaimed in this case. func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types.VolumesPruneReport, error) { if !atomic.CompareAndSwapInt32(&s.pruneRunning, 0, 1) { return nil, errdefs.Conflict(errors.New("a prune operation is already running")) } defer atomic.StoreInt32(&s.pruneRunning, 0) by, err := filtersToBy(filter, acceptedPruneFilters) if err != nil { return nil, err } ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } rep := &types.VolumesPruneReport{VolumesDeleted: make([]string, 0, len(ls))} for _, v := range ls { select { case <-ctx.Done(): err := ctx.Err() if err == context.Canceled { err = nil } return rep, err default: } vSize, err := directory.Size(ctx, v.Path()) if err != nil { logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") } if err := s.vs.Remove(ctx, v); err != nil { logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") continue } rep.SpaceReclaimed += uint64(vSize) rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name()) } s.eventLogger.LogVolumeEvent("", "prune", map[string]string{ "reclaimed": strconv.FormatInt(int64(rep.SpaceReclaimed), 10), }) return rep, nil } // List gets the list of volumes which match the past in filters // If filters is nil or empty all volumes are returned. func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumesOut []*types.Volume, warnings []string, err error) { by, err := filtersToBy(filter, acceptedListFilters) if err != nil { return nil, nil, err } volumes, warnings, err := s.vs.Find(ctx, by) if err != nil { return nil, nil, err } return s.volumesToAPI(ctx, volumes, useCachedPath(true)), warnings, nil } // Shutdown shuts down the image service and dependencies func (s *VolumesService) Shutdown() error { return s.vs.Shutdown() }
thaJeztah
4c2ec79bf2a848761bc9162a25b08ec553fc69db
ef4d473401421d26633d43547b4a28978887386a
See my other comment below; perhaps we should keep _both_ this _and_ the one in `VolumeStore`
thaJeztah
4,969
moby/moby
42,028
volumes: only send "create" event when actually creating volume
fixes https://github.com/moby/moby/issues/40047 The VolumesService did not have information wether or not a volume was _created_ or if a volume already existed in the driver, and the existing volume was used. As a result, multiple "create" events could be generated for the same volume. For example: 1. Run `docker events` in a shell to start listening for events 2. Create a volume: docker volume create myvolume 3. Start a container that uses that volume: docker run -dit -v myvolume:/foo busybox 4. Check the events that were generated: 2021-02-15T18:49:55.874621004+01:00 volume create myvolume (driver=local) 2021-02-15T18:50:11.442759052+01:00 volume create myvolume (driver=local) 2021-02-15T18:50:11.487104176+01:00 container create 45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1 (image=busybox, name=gracious_hypatia) 2021-02-15T18:50:11.519288102+01:00 network connect a19f6bb8d44ff84d478670fa4e34c5bf5305f42786294d3d90e790ac74b6d3e0 (container=45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1, name=bridge, type=bridge) 2021-02-15T18:50:11.526407799+01:00 volume mount myvolume (container=45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1, destination=/foo, driver=local, propagation=, read/write=true) 2021-02-15T18:50:11.864134043+01:00 container start 45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1 (image=busybox, name=gracious_hypatia) 5. Notice that a "volume create" event is created twice; - once when `docker volume create` was ran - once when `docker run ...` was ran This patch moves the generation of (most) events to the volume _store_, and only generates an event if the volume did not yet exist. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ``` Fix multiple "volume create" events being created when using an existing volume ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-15 17:54:37+00:00
2021-06-09 10:01:22+00:00
volume/service/service.go
package service // import "github.com/docker/docker/volume/service" import ( "context" "strconv" "sync/atomic" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type ds interface { GetDriverList() []string } type volumeEventLogger interface { LogVolumeEvent(volumeID, action string, attributes map[string]string) } // VolumesService manages access to volumes // This is used as the main access point for volumes to higher level services and the API. type VolumesService struct { vs *VolumeStore ds ds pruneRunning int32 eventLogger volumeEventLogger } // NewVolumeService creates a new volume service func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.Identity, logger volumeEventLogger) (*VolumesService, error) { ds := drivers.NewStore(pg) if err := setupDefaultDriver(ds, root, rootIDs); err != nil { return nil, err } vs, err := NewStore(root, ds) if err != nil { return nil, err } return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil } // GetDriverList gets the list of registered volume drivers func (s *VolumesService) GetDriverList() []string { return s.ds.GetDriverList() } // Create creates a volume // If the caller is creating this volume to be consumed immediately, it is // expected that the caller specifies a reference ID. // This reference ID will protect this volume from removal. // // A good example for a reference ID is a container's ID. // When whatever is going to reference this volume is removed the caller should defeference the volume by calling `Release`. func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) { if name == "" { name = stringid.GenerateRandomID() } v, err := s.vs.Create(ctx, name, driverName, opts...) if err != nil { return nil, err } s.eventLogger.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) apiV := volumeToAPIType(v) return &apiV, nil } // Get returns details about a volume func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*types.Volume, error) { v, err := s.vs.Get(ctx, name, getOpts...) if err != nil { return nil, err } vol := volumeToAPIType(v) var cfg opts.GetConfig for _, o := range getOpts { o(&cfg) } if cfg.ResolveStatus { vol.Status = v.Status() } return &vol, nil } // Mount mounts the volume // Callers should specify a uniqe reference for each Mount/Unmount pair. // // Example: // ```go // mountID := "randomString" // s.Mount(ctx, vol, mountID) // s.Unmount(ctx, vol, mountID) // ``` func (s *VolumesService) Mount(ctx context.Context, vol *types.Volume, ref string) (string, error) { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return "", err } return v.Mount(ref) } // Unmount unmounts the volume. // Note that depending on the implementation, the volume may still be mounted due to other resources using it. // // The reference specified here should be the same reference specified during `Mount` and should be // unique for each mount/unmount pair. // See `Mount` documentation for an example. func (s *VolumesService) Unmount(ctx context.Context, vol *types.Volume, ref string) error { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return err } return v.Unmount(ref) } // Release releases a volume reference func (s *VolumesService) Release(ctx context.Context, name string, ref string) error { return s.vs.Release(ctx, name, ref) } // Remove removes a volume // An error is returned if the volume is still referenced. func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error { var cfg opts.RemoveConfig for _, o := range rmOpts { o(&cfg) } v, err := s.vs.Get(ctx, name) if err != nil { if IsNotExist(err) && cfg.PurgeOnError { return nil } return err } err = s.vs.Remove(ctx, v, rmOpts...) if IsNotExist(err) { err = nil } else if IsInUse(err) { err = errdefs.Conflict(err) } else if IsNotExist(err) && cfg.PurgeOnError { err = nil } if err == nil { s.eventLogger.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) } return err } var acceptedPruneFilters = map[string]bool{ "label": true, "label!": true, } var acceptedListFilters = map[string]bool{ "dangling": true, "name": true, "driver": true, "label": true, } // LocalVolumesSize gets all local volumes and fetches their size on disk // Note that this intentionally skips volumes which have mount options. Typically // volumes with mount options are not really local even if they are using the // local driver. func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]*types.Volume, error) { ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } return s.volumesToAPI(ctx, ls, calcSize(true)), nil } // Prune removes (local) volumes which match the past in filter arguments. // Note that this intentionally skips volumes with mount options as there would // be no space reclaimed in this case. func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types.VolumesPruneReport, error) { if !atomic.CompareAndSwapInt32(&s.pruneRunning, 0, 1) { return nil, errdefs.Conflict(errors.New("a prune operation is already running")) } defer atomic.StoreInt32(&s.pruneRunning, 0) by, err := filtersToBy(filter, acceptedPruneFilters) if err != nil { return nil, err } ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } rep := &types.VolumesPruneReport{VolumesDeleted: make([]string, 0, len(ls))} for _, v := range ls { select { case <-ctx.Done(): err := ctx.Err() if err == context.Canceled { err = nil } return rep, err default: } vSize, err := directory.Size(ctx, v.Path()) if err != nil { logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") } if err := s.vs.Remove(ctx, v); err != nil { logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") continue } rep.SpaceReclaimed += uint64(vSize) rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name()) } s.eventLogger.LogVolumeEvent("", "prune", map[string]string{ "reclaimed": strconv.FormatInt(int64(rep.SpaceReclaimed), 10), }) return rep, nil } // List gets the list of volumes which match the past in filters // If filters is nil or empty all volumes are returned. func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumesOut []*types.Volume, warnings []string, err error) { by, err := filtersToBy(filter, acceptedListFilters) if err != nil { return nil, nil, err } volumes, warnings, err := s.vs.Find(ctx, by) if err != nil { return nil, nil, err } return s.volumesToAPI(ctx, volumes, useCachedPath(true)), warnings, nil } // Shutdown shuts down the image service and dependencies func (s *VolumesService) Shutdown() error { return s.vs.Shutdown() }
package service // import "github.com/docker/docker/volume/service" import ( "context" "strconv" "sync/atomic" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type ds interface { GetDriverList() []string } // VolumeEventLogger interface provides methods to log volume-related events type VolumeEventLogger interface { // LogVolumeEvent generates an event related to a volume. LogVolumeEvent(volumeID, action string, attributes map[string]string) } // VolumesService manages access to volumes // This is used as the main access point for volumes to higher level services and the API. type VolumesService struct { vs *VolumeStore ds ds pruneRunning int32 eventLogger VolumeEventLogger } // NewVolumeService creates a new volume service func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.Identity, logger VolumeEventLogger) (*VolumesService, error) { ds := drivers.NewStore(pg) if err := setupDefaultDriver(ds, root, rootIDs); err != nil { return nil, err } vs, err := NewStore(root, ds, WithEventLogger(logger)) if err != nil { return nil, err } return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil } // GetDriverList gets the list of registered volume drivers func (s *VolumesService) GetDriverList() []string { return s.ds.GetDriverList() } // Create creates a volume // If the caller is creating this volume to be consumed immediately, it is // expected that the caller specifies a reference ID. // This reference ID will protect this volume from removal. // // A good example for a reference ID is a container's ID. // When whatever is going to reference this volume is removed the caller should defeference the volume by calling `Release`. func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) { if name == "" { name = stringid.GenerateRandomID() } v, err := s.vs.Create(ctx, name, driverName, opts...) if err != nil { return nil, err } apiV := volumeToAPIType(v) return &apiV, nil } // Get returns details about a volume func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*types.Volume, error) { v, err := s.vs.Get(ctx, name, getOpts...) if err != nil { return nil, err } vol := volumeToAPIType(v) var cfg opts.GetConfig for _, o := range getOpts { o(&cfg) } if cfg.ResolveStatus { vol.Status = v.Status() } return &vol, nil } // Mount mounts the volume // Callers should specify a uniqe reference for each Mount/Unmount pair. // // Example: // ```go // mountID := "randomString" // s.Mount(ctx, vol, mountID) // s.Unmount(ctx, vol, mountID) // ``` func (s *VolumesService) Mount(ctx context.Context, vol *types.Volume, ref string) (string, error) { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return "", err } return v.Mount(ref) } // Unmount unmounts the volume. // Note that depending on the implementation, the volume may still be mounted due to other resources using it. // // The reference specified here should be the same reference specified during `Mount` and should be // unique for each mount/unmount pair. // See `Mount` documentation for an example. func (s *VolumesService) Unmount(ctx context.Context, vol *types.Volume, ref string) error { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return err } return v.Unmount(ref) } // Release releases a volume reference func (s *VolumesService) Release(ctx context.Context, name string, ref string) error { return s.vs.Release(ctx, name, ref) } // Remove removes a volume // An error is returned if the volume is still referenced. func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error { var cfg opts.RemoveConfig for _, o := range rmOpts { o(&cfg) } v, err := s.vs.Get(ctx, name) if err != nil { if IsNotExist(err) && cfg.PurgeOnError { return nil } return err } err = s.vs.Remove(ctx, v, rmOpts...) if IsNotExist(err) { err = nil } else if IsInUse(err) { err = errdefs.Conflict(err) } else if IsNotExist(err) && cfg.PurgeOnError { err = nil } return err } var acceptedPruneFilters = map[string]bool{ "label": true, "label!": true, } var acceptedListFilters = map[string]bool{ "dangling": true, "name": true, "driver": true, "label": true, } // LocalVolumesSize gets all local volumes and fetches their size on disk // Note that this intentionally skips volumes which have mount options. Typically // volumes with mount options are not really local even if they are using the // local driver. func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]*types.Volume, error) { ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } return s.volumesToAPI(ctx, ls, calcSize(true)), nil } // Prune removes (local) volumes which match the past in filter arguments. // Note that this intentionally skips volumes with mount options as there would // be no space reclaimed in this case. func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types.VolumesPruneReport, error) { if !atomic.CompareAndSwapInt32(&s.pruneRunning, 0, 1) { return nil, errdefs.Conflict(errors.New("a prune operation is already running")) } defer atomic.StoreInt32(&s.pruneRunning, 0) by, err := filtersToBy(filter, acceptedPruneFilters) if err != nil { return nil, err } ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } rep := &types.VolumesPruneReport{VolumesDeleted: make([]string, 0, len(ls))} for _, v := range ls { select { case <-ctx.Done(): err := ctx.Err() if err == context.Canceled { err = nil } return rep, err default: } vSize, err := directory.Size(ctx, v.Path()) if err != nil { logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") } if err := s.vs.Remove(ctx, v); err != nil { logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") continue } rep.SpaceReclaimed += uint64(vSize) rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name()) } s.eventLogger.LogVolumeEvent("", "prune", map[string]string{ "reclaimed": strconv.FormatInt(int64(rep.SpaceReclaimed), 10), }) return rep, nil } // List gets the list of volumes which match the past in filters // If filters is nil or empty all volumes are returned. func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumesOut []*types.Volume, warnings []string, err error) { by, err := filtersToBy(filter, acceptedListFilters) if err != nil { return nil, nil, err } volumes, warnings, err := s.vs.Find(ctx, by) if err != nil { return nil, nil, err } return s.volumesToAPI(ctx, volumes, useCachedPath(true)), warnings, nil } // Shutdown shuts down the image service and dependencies func (s *VolumesService) Shutdown() error { return s.vs.Shutdown() }
thaJeztah
4c2ec79bf2a848761bc9162a25b08ec553fc69db
ef4d473401421d26633d43547b4a28978887386a
I guess it makes more sense if they both keep the `eventLogger` since they are using it. You know right away that the service sends events if you see the `eventLogger` in the struct.
rumpl
4,970
moby/moby
42,028
volumes: only send "create" event when actually creating volume
fixes https://github.com/moby/moby/issues/40047 The VolumesService did not have information wether or not a volume was _created_ or if a volume already existed in the driver, and the existing volume was used. As a result, multiple "create" events could be generated for the same volume. For example: 1. Run `docker events` in a shell to start listening for events 2. Create a volume: docker volume create myvolume 3. Start a container that uses that volume: docker run -dit -v myvolume:/foo busybox 4. Check the events that were generated: 2021-02-15T18:49:55.874621004+01:00 volume create myvolume (driver=local) 2021-02-15T18:50:11.442759052+01:00 volume create myvolume (driver=local) 2021-02-15T18:50:11.487104176+01:00 container create 45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1 (image=busybox, name=gracious_hypatia) 2021-02-15T18:50:11.519288102+01:00 network connect a19f6bb8d44ff84d478670fa4e34c5bf5305f42786294d3d90e790ac74b6d3e0 (container=45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1, name=bridge, type=bridge) 2021-02-15T18:50:11.526407799+01:00 volume mount myvolume (container=45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1, destination=/foo, driver=local, propagation=, read/write=true) 2021-02-15T18:50:11.864134043+01:00 container start 45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1 (image=busybox, name=gracious_hypatia) 5. Notice that a "volume create" event is created twice; - once when `docker volume create` was ran - once when `docker run ...` was ran This patch moves the generation of (most) events to the volume _store_, and only generates an event if the volume did not yet exist. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ``` Fix multiple "volume create" events being created when using an existing volume ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-15 17:54:37+00:00
2021-06-09 10:01:22+00:00
volume/service/service.go
package service // import "github.com/docker/docker/volume/service" import ( "context" "strconv" "sync/atomic" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type ds interface { GetDriverList() []string } type volumeEventLogger interface { LogVolumeEvent(volumeID, action string, attributes map[string]string) } // VolumesService manages access to volumes // This is used as the main access point for volumes to higher level services and the API. type VolumesService struct { vs *VolumeStore ds ds pruneRunning int32 eventLogger volumeEventLogger } // NewVolumeService creates a new volume service func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.Identity, logger volumeEventLogger) (*VolumesService, error) { ds := drivers.NewStore(pg) if err := setupDefaultDriver(ds, root, rootIDs); err != nil { return nil, err } vs, err := NewStore(root, ds) if err != nil { return nil, err } return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil } // GetDriverList gets the list of registered volume drivers func (s *VolumesService) GetDriverList() []string { return s.ds.GetDriverList() } // Create creates a volume // If the caller is creating this volume to be consumed immediately, it is // expected that the caller specifies a reference ID. // This reference ID will protect this volume from removal. // // A good example for a reference ID is a container's ID. // When whatever is going to reference this volume is removed the caller should defeference the volume by calling `Release`. func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) { if name == "" { name = stringid.GenerateRandomID() } v, err := s.vs.Create(ctx, name, driverName, opts...) if err != nil { return nil, err } s.eventLogger.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) apiV := volumeToAPIType(v) return &apiV, nil } // Get returns details about a volume func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*types.Volume, error) { v, err := s.vs.Get(ctx, name, getOpts...) if err != nil { return nil, err } vol := volumeToAPIType(v) var cfg opts.GetConfig for _, o := range getOpts { o(&cfg) } if cfg.ResolveStatus { vol.Status = v.Status() } return &vol, nil } // Mount mounts the volume // Callers should specify a uniqe reference for each Mount/Unmount pair. // // Example: // ```go // mountID := "randomString" // s.Mount(ctx, vol, mountID) // s.Unmount(ctx, vol, mountID) // ``` func (s *VolumesService) Mount(ctx context.Context, vol *types.Volume, ref string) (string, error) { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return "", err } return v.Mount(ref) } // Unmount unmounts the volume. // Note that depending on the implementation, the volume may still be mounted due to other resources using it. // // The reference specified here should be the same reference specified during `Mount` and should be // unique for each mount/unmount pair. // See `Mount` documentation for an example. func (s *VolumesService) Unmount(ctx context.Context, vol *types.Volume, ref string) error { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return err } return v.Unmount(ref) } // Release releases a volume reference func (s *VolumesService) Release(ctx context.Context, name string, ref string) error { return s.vs.Release(ctx, name, ref) } // Remove removes a volume // An error is returned if the volume is still referenced. func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error { var cfg opts.RemoveConfig for _, o := range rmOpts { o(&cfg) } v, err := s.vs.Get(ctx, name) if err != nil { if IsNotExist(err) && cfg.PurgeOnError { return nil } return err } err = s.vs.Remove(ctx, v, rmOpts...) if IsNotExist(err) { err = nil } else if IsInUse(err) { err = errdefs.Conflict(err) } else if IsNotExist(err) && cfg.PurgeOnError { err = nil } if err == nil { s.eventLogger.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) } return err } var acceptedPruneFilters = map[string]bool{ "label": true, "label!": true, } var acceptedListFilters = map[string]bool{ "dangling": true, "name": true, "driver": true, "label": true, } // LocalVolumesSize gets all local volumes and fetches their size on disk // Note that this intentionally skips volumes which have mount options. Typically // volumes with mount options are not really local even if they are using the // local driver. func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]*types.Volume, error) { ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } return s.volumesToAPI(ctx, ls, calcSize(true)), nil } // Prune removes (local) volumes which match the past in filter arguments. // Note that this intentionally skips volumes with mount options as there would // be no space reclaimed in this case. func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types.VolumesPruneReport, error) { if !atomic.CompareAndSwapInt32(&s.pruneRunning, 0, 1) { return nil, errdefs.Conflict(errors.New("a prune operation is already running")) } defer atomic.StoreInt32(&s.pruneRunning, 0) by, err := filtersToBy(filter, acceptedPruneFilters) if err != nil { return nil, err } ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } rep := &types.VolumesPruneReport{VolumesDeleted: make([]string, 0, len(ls))} for _, v := range ls { select { case <-ctx.Done(): err := ctx.Err() if err == context.Canceled { err = nil } return rep, err default: } vSize, err := directory.Size(ctx, v.Path()) if err != nil { logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") } if err := s.vs.Remove(ctx, v); err != nil { logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") continue } rep.SpaceReclaimed += uint64(vSize) rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name()) } s.eventLogger.LogVolumeEvent("", "prune", map[string]string{ "reclaimed": strconv.FormatInt(int64(rep.SpaceReclaimed), 10), }) return rep, nil } // List gets the list of volumes which match the past in filters // If filters is nil or empty all volumes are returned. func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumesOut []*types.Volume, warnings []string, err error) { by, err := filtersToBy(filter, acceptedListFilters) if err != nil { return nil, nil, err } volumes, warnings, err := s.vs.Find(ctx, by) if err != nil { return nil, nil, err } return s.volumesToAPI(ctx, volumes, useCachedPath(true)), warnings, nil } // Shutdown shuts down the image service and dependencies func (s *VolumesService) Shutdown() error { return s.vs.Shutdown() }
package service // import "github.com/docker/docker/volume/service" import ( "context" "strconv" "sync/atomic" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/plugingetter" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) type ds interface { GetDriverList() []string } // VolumeEventLogger interface provides methods to log volume-related events type VolumeEventLogger interface { // LogVolumeEvent generates an event related to a volume. LogVolumeEvent(volumeID, action string, attributes map[string]string) } // VolumesService manages access to volumes // This is used as the main access point for volumes to higher level services and the API. type VolumesService struct { vs *VolumeStore ds ds pruneRunning int32 eventLogger VolumeEventLogger } // NewVolumeService creates a new volume service func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.Identity, logger VolumeEventLogger) (*VolumesService, error) { ds := drivers.NewStore(pg) if err := setupDefaultDriver(ds, root, rootIDs); err != nil { return nil, err } vs, err := NewStore(root, ds, WithEventLogger(logger)) if err != nil { return nil, err } return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil } // GetDriverList gets the list of registered volume drivers func (s *VolumesService) GetDriverList() []string { return s.ds.GetDriverList() } // Create creates a volume // If the caller is creating this volume to be consumed immediately, it is // expected that the caller specifies a reference ID. // This reference ID will protect this volume from removal. // // A good example for a reference ID is a container's ID. // When whatever is going to reference this volume is removed the caller should defeference the volume by calling `Release`. func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) { if name == "" { name = stringid.GenerateRandomID() } v, err := s.vs.Create(ctx, name, driverName, opts...) if err != nil { return nil, err } apiV := volumeToAPIType(v) return &apiV, nil } // Get returns details about a volume func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*types.Volume, error) { v, err := s.vs.Get(ctx, name, getOpts...) if err != nil { return nil, err } vol := volumeToAPIType(v) var cfg opts.GetConfig for _, o := range getOpts { o(&cfg) } if cfg.ResolveStatus { vol.Status = v.Status() } return &vol, nil } // Mount mounts the volume // Callers should specify a uniqe reference for each Mount/Unmount pair. // // Example: // ```go // mountID := "randomString" // s.Mount(ctx, vol, mountID) // s.Unmount(ctx, vol, mountID) // ``` func (s *VolumesService) Mount(ctx context.Context, vol *types.Volume, ref string) (string, error) { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return "", err } return v.Mount(ref) } // Unmount unmounts the volume. // Note that depending on the implementation, the volume may still be mounted due to other resources using it. // // The reference specified here should be the same reference specified during `Mount` and should be // unique for each mount/unmount pair. // See `Mount` documentation for an example. func (s *VolumesService) Unmount(ctx context.Context, vol *types.Volume, ref string) error { v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) if err != nil { if IsNotExist(err) { err = errdefs.NotFound(err) } return err } return v.Unmount(ref) } // Release releases a volume reference func (s *VolumesService) Release(ctx context.Context, name string, ref string) error { return s.vs.Release(ctx, name, ref) } // Remove removes a volume // An error is returned if the volume is still referenced. func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error { var cfg opts.RemoveConfig for _, o := range rmOpts { o(&cfg) } v, err := s.vs.Get(ctx, name) if err != nil { if IsNotExist(err) && cfg.PurgeOnError { return nil } return err } err = s.vs.Remove(ctx, v, rmOpts...) if IsNotExist(err) { err = nil } else if IsInUse(err) { err = errdefs.Conflict(err) } else if IsNotExist(err) && cfg.PurgeOnError { err = nil } return err } var acceptedPruneFilters = map[string]bool{ "label": true, "label!": true, } var acceptedListFilters = map[string]bool{ "dangling": true, "name": true, "driver": true, "label": true, } // LocalVolumesSize gets all local volumes and fetches their size on disk // Note that this intentionally skips volumes which have mount options. Typically // volumes with mount options are not really local even if they are using the // local driver. func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]*types.Volume, error) { ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } return s.volumesToAPI(ctx, ls, calcSize(true)), nil } // Prune removes (local) volumes which match the past in filter arguments. // Note that this intentionally skips volumes with mount options as there would // be no space reclaimed in this case. func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types.VolumesPruneReport, error) { if !atomic.CompareAndSwapInt32(&s.pruneRunning, 0, 1) { return nil, errdefs.Conflict(errors.New("a prune operation is already running")) } defer atomic.StoreInt32(&s.pruneRunning, 0) by, err := filtersToBy(filter, acceptedPruneFilters) if err != nil { return nil, err } ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool { dv, ok := v.(volume.DetailedVolume) return ok && len(dv.Options()) == 0 }))) if err != nil { return nil, err } rep := &types.VolumesPruneReport{VolumesDeleted: make([]string, 0, len(ls))} for _, v := range ls { select { case <-ctx.Done(): err := ctx.Err() if err == context.Canceled { err = nil } return rep, err default: } vSize, err := directory.Size(ctx, v.Path()) if err != nil { logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") } if err := s.vs.Remove(ctx, v); err != nil { logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") continue } rep.SpaceReclaimed += uint64(vSize) rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name()) } s.eventLogger.LogVolumeEvent("", "prune", map[string]string{ "reclaimed": strconv.FormatInt(int64(rep.SpaceReclaimed), 10), }) return rep, nil } // List gets the list of volumes which match the past in filters // If filters is nil or empty all volumes are returned. func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumesOut []*types.Volume, warnings []string, err error) { by, err := filtersToBy(filter, acceptedListFilters) if err != nil { return nil, nil, err } volumes, warnings, err := s.vs.Find(ctx, by) if err != nil { return nil, nil, err } return s.volumesToAPI(ctx, volumes, useCachedPath(true)), warnings, nil } // Shutdown shuts down the image service and dependencies func (s *VolumesService) Shutdown() error { return s.vs.Shutdown() }
thaJeztah
4c2ec79bf2a848761bc9162a25b08ec553fc69db
ef4d473401421d26633d43547b4a28978887386a
Yup, looking at it again, I think that's cleaner; updating
thaJeztah
4,971
moby/moby
42,028
volumes: only send "create" event when actually creating volume
fixes https://github.com/moby/moby/issues/40047 The VolumesService did not have information wether or not a volume was _created_ or if a volume already existed in the driver, and the existing volume was used. As a result, multiple "create" events could be generated for the same volume. For example: 1. Run `docker events` in a shell to start listening for events 2. Create a volume: docker volume create myvolume 3. Start a container that uses that volume: docker run -dit -v myvolume:/foo busybox 4. Check the events that were generated: 2021-02-15T18:49:55.874621004+01:00 volume create myvolume (driver=local) 2021-02-15T18:50:11.442759052+01:00 volume create myvolume (driver=local) 2021-02-15T18:50:11.487104176+01:00 container create 45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1 (image=busybox, name=gracious_hypatia) 2021-02-15T18:50:11.519288102+01:00 network connect a19f6bb8d44ff84d478670fa4e34c5bf5305f42786294d3d90e790ac74b6d3e0 (container=45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1, name=bridge, type=bridge) 2021-02-15T18:50:11.526407799+01:00 volume mount myvolume (container=45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1, destination=/foo, driver=local, propagation=, read/write=true) 2021-02-15T18:50:11.864134043+01:00 container start 45112157c8b1382626bf5e01ef18445a4c680f3846c5e32d01775dddee8ca6d1 (image=busybox, name=gracious_hypatia) 5. Notice that a "volume create" event is created twice; - once when `docker volume create` was ran - once when `docker run ...` was ran This patch moves the generation of (most) events to the volume _store_, and only generates an event if the volume did not yet exist. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> ``` Fix multiple "volume create" events being created when using an existing volume ``` **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-15 17:54:37+00:00
2021-06-09 10:01:22+00:00
volume/service/store.go
package service // import "github.com/docker/docker/volume/service" import ( "context" "fmt" "net" "os" "path/filepath" "runtime" "sync" "time" "github.com/docker/docker/errdefs" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" volumemounts "github.com/docker/docker/volume/mounts" "github.com/docker/docker/volume/service/opts" "github.com/moby/locker" "github.com/pkg/errors" "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" ) const ( volumeDataDir = "volumes" ) type volumeWrapper struct { volume.Volume labels map[string]string scope string options map[string]string } func (v volumeWrapper) Options() map[string]string { if v.options == nil { return nil } options := make(map[string]string, len(v.options)) for key, value := range v.options { options[key] = value } return options } func (v volumeWrapper) Labels() map[string]string { if v.labels == nil { return nil } labels := make(map[string]string, len(v.labels)) for key, value := range v.labels { labels[key] = value } return labels } func (v volumeWrapper) Scope() string { return v.scope } func (v volumeWrapper) CachedPath() string { if vv, ok := v.Volume.(interface { CachedPath() string }); ok { return vv.CachedPath() } return v.Volume.Path() } // NewStore creates a new volume store at the given path func NewStore(rootPath string, drivers *drivers.Store) (*VolumeStore, error) { vs := &VolumeStore{ locks: &locker.Locker{}, names: make(map[string]volume.Volume), refs: make(map[string]map[string]struct{}), labels: make(map[string]map[string]string), options: make(map[string]map[string]string), drivers: drivers, } if rootPath != "" { // initialize metadata store volPath := filepath.Join(rootPath, volumeDataDir) if err := os.MkdirAll(volPath, 0750); err != nil { return nil, err } var err error vs.db, err = bolt.Open(filepath.Join(volPath, "metadata.db"), 0600, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { return nil, errors.Wrap(err, "error while opening volume store metadata database") } // initialize volumes bucket if err := vs.db.Update(func(tx *bolt.Tx) error { if _, err := tx.CreateBucketIfNotExists(volumeBucketName); err != nil { return errors.Wrap(err, "error while setting up volume store metadata database") } return nil }); err != nil { return nil, err } } vs.restore() return vs, nil } func (s *VolumeStore) getNamed(name string) (volume.Volume, bool) { s.globalLock.RLock() v, exists := s.names[name] s.globalLock.RUnlock() return v, exists } func (s *VolumeStore) setNamed(v volume.Volume, ref string) { name := v.Name() s.globalLock.Lock() s.names[name] = v if len(ref) > 0 { if s.refs[name] == nil { s.refs[name] = make(map[string]struct{}) } s.refs[name][ref] = struct{}{} } s.globalLock.Unlock() } // hasRef returns true if the given name has at least one ref. // Callers of this function are expected to hold the name lock. func (s *VolumeStore) hasRef(name string) bool { s.globalLock.RLock() l := len(s.refs[name]) s.globalLock.RUnlock() return l > 0 } // getRefs gets the list of refs for a given name // Callers of this function are expected to hold the name lock. func (s *VolumeStore) getRefs(name string) []string { s.globalLock.RLock() defer s.globalLock.RUnlock() refs := make([]string, 0, len(s.refs[name])) for r := range s.refs[name] { refs = append(refs, r) } return refs } // purge allows the cleanup of internal data on docker in case // the internal data is out of sync with volumes driver plugins. func (s *VolumeStore) purge(ctx context.Context, name string) error { s.globalLock.Lock() defer s.globalLock.Unlock() select { case <-ctx.Done(): return ctx.Err() default: } v, exists := s.names[name] if exists { driverName := v.DriverName() if _, err := s.drivers.ReleaseDriver(driverName); err != nil { logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver") } } if err := s.removeMeta(name); err != nil { logrus.Errorf("Error removing volume metadata for volume %q: %v", name, err) } delete(s.names, name) delete(s.refs, name) delete(s.labels, name) delete(s.options, name) return nil } // VolumeStore is responsible for storing and reference counting volumes. type VolumeStore struct { // locks ensures that only one action is being performed on a particular volume at a time without locking the entire store // since actions on volumes can be quite slow, this ensures the store is free to handle requests for other volumes. locks *locker.Locker drivers *drivers.Store // globalLock is used to protect access to mutable structures used by the store object globalLock sync.RWMutex // names stores the volume name -> volume relationship. // This is used for making lookups faster so we don't have to probe all drivers names map[string]volume.Volume // refs stores the volume name and the list of things referencing it refs map[string]map[string]struct{} // labels stores volume labels for each volume labels map[string]map[string]string // options stores volume options for each volume options map[string]map[string]string db *bolt.DB } func filterByDriver(names []string) filterFunc { return func(v volume.Volume) bool { for _, name := range names { if name == v.DriverName() { return true } } return false } } func (s *VolumeStore) byReferenced(referenced bool) filterFunc { return func(v volume.Volume) bool { return s.hasRef(v.Name()) == referenced } } func (s *VolumeStore) filter(ctx context.Context, vols *[]volume.Volume, by By) (warnings []string, err error) { // note that this specifically does not support the `FromList` By type. switch f := by.(type) { case nil: if *vols == nil { var ls []volume.Volume ls, warnings, err = s.list(ctx) if err != nil { return warnings, err } *vols = ls } case byDriver: if *vols != nil { filter(vols, filterByDriver([]string(f))) return nil, nil } var ls []volume.Volume ls, warnings, err = s.list(ctx, []string(f)...) if err != nil { return nil, err } *vols = ls case ByReferenced: // TODO(@cpuguy83): It would be nice to optimize this by looking at the list // of referenced volumes, however the locking strategy makes this difficult // without either providing inconsistent data or deadlocks. if *vols == nil { var ls []volume.Volume ls, warnings, err = s.list(ctx) if err != nil { return nil, err } *vols = ls } filter(vols, s.byReferenced(bool(f))) case andCombinator: for _, by := range f { w, err := s.filter(ctx, vols, by) if err != nil { return warnings, err } warnings = append(warnings, w...) } case orCombinator: for _, by := range f { switch by.(type) { case byDriver: var ls []volume.Volume w, err := s.filter(ctx, &ls, by) if err != nil { return warnings, err } warnings = append(warnings, w...) default: ls, w, err := s.list(ctx) if err != nil { return warnings, err } warnings = append(warnings, w...) w, err = s.filter(ctx, &ls, by) if err != nil { return warnings, err } warnings = append(warnings, w...) *vols = append(*vols, ls...) } } unique(vols) case CustomFilter: if *vols == nil { var ls []volume.Volume ls, warnings, err = s.list(ctx) if err != nil { return nil, err } *vols = ls } filter(vols, filterFunc(f)) default: return nil, errdefs.InvalidParameter(errors.Errorf("unsupported filter: %T", f)) } return warnings, nil } func unique(ls *[]volume.Volume) { names := make(map[string]bool, len(*ls)) filter(ls, func(v volume.Volume) bool { if names[v.Name()] { return false } names[v.Name()] = true return true }) } // Find lists volumes filtered by the past in filter. // If a driver returns a volume that has name which conflicts with another volume from a different driver, // the first volume is chosen and the conflicting volume is dropped. func (s *VolumeStore) Find(ctx context.Context, by By) (vols []volume.Volume, warnings []string, err error) { logrus.WithField("ByType", fmt.Sprintf("%T", by)).WithField("ByValue", fmt.Sprintf("%+v", by)).Debug("VolumeStore.Find") switch f := by.(type) { case nil, orCombinator, andCombinator, byDriver, ByReferenced, CustomFilter: warnings, err = s.filter(ctx, &vols, by) case fromList: warnings, err = s.filter(ctx, f.ls, f.by) default: // Really shouldn't be possible, but makes sure that any new By's are added to this check. err = errdefs.InvalidParameter(errors.Errorf("unsupported filter type: %T", f)) } if err != nil { return nil, nil, &OpErr{Err: err, Op: "list"} } var out []volume.Volume for _, v := range vols { name := normalizeVolumeName(v.Name()) s.locks.Lock(name) storedV, exists := s.getNamed(name) // Note: it's not safe to populate the cache here because the volume may have been // deleted before we acquire a lock on its name if exists && storedV.DriverName() != v.DriverName() { logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) s.locks.Unlock(v.Name()) continue } out = append(out, v) s.locks.Unlock(v.Name()) } return out, warnings, nil } type filterFunc func(volume.Volume) bool func filter(vols *[]volume.Volume, fn filterFunc) { var evict []int for i, v := range *vols { if !fn(v) { evict = append(evict, i) } } for n, i := range evict { copy((*vols)[i-n:], (*vols)[i-n+1:]) (*vols)[len(*vols)-1] = nil *vols = (*vols)[:len(*vols)-1] } } // list goes through each volume driver and asks for its list of volumes. // TODO(@cpuguy83): plumb context through func (s *VolumeStore) list(ctx context.Context, driverNames ...string) ([]volume.Volume, []string, error) { var ( ls = []volume.Volume{} // do not return a nil value as this affects filtering warnings []string ) var dls []volume.Driver all, err := s.drivers.GetAllDrivers() if err != nil { return nil, nil, err } if len(driverNames) == 0 { dls = all } else { idx := make(map[string]bool, len(driverNames)) for _, name := range driverNames { idx[name] = true } for _, d := range all { if idx[d.Name()] { dls = append(dls, d) } } } type vols struct { vols []volume.Volume err error driverName string } chVols := make(chan vols, len(dls)) for _, vd := range dls { go func(d volume.Driver) { vs, err := d.List() if err != nil { chVols <- vols{driverName: d.Name(), err: &OpErr{Err: err, Name: d.Name(), Op: "list"}} return } for i, v := range vs { s.globalLock.RLock() vs[i] = volumeWrapper{v, s.labels[v.Name()], d.Scope(), s.options[v.Name()]} s.globalLock.RUnlock() } chVols <- vols{vols: vs} }(vd) } badDrivers := make(map[string]struct{}) for i := 0; i < len(dls); i++ { vs := <-chVols if vs.err != nil { warnings = append(warnings, vs.err.Error()) badDrivers[vs.driverName] = struct{}{} } ls = append(ls, vs.vols...) } if len(badDrivers) > 0 { s.globalLock.RLock() for _, v := range s.names { if _, exists := badDrivers[v.DriverName()]; exists { ls = append(ls, v) } } s.globalLock.RUnlock() } return ls, warnings, nil } // Create creates a volume with the given name and driver // If the volume needs to be created with a reference to prevent race conditions // with volume cleanup, make sure to use the `CreateWithReference` option. func (s *VolumeStore) Create(ctx context.Context, name, driverName string, createOpts ...opts.CreateOption) (volume.Volume, error) { var cfg opts.CreateConfig for _, o := range createOpts { o(&cfg) } name = normalizeVolumeName(name) s.locks.Lock(name) defer s.locks.Unlock(name) select { case <-ctx.Done(): return nil, ctx.Err() default: } v, err := s.create(ctx, name, driverName, cfg.Options, cfg.Labels) if err != nil { if _, ok := err.(*OpErr); ok { return nil, err } return nil, &OpErr{Err: err, Name: name, Op: "create"} } s.setNamed(v, cfg.Reference) return v, nil } // checkConflict checks the local cache for name collisions with the passed in name, // for existing volumes with the same name but in a different driver. // This is used by `Create` as a best effort to prevent name collisions for volumes. // If a matching volume is found that is not a conflict that is returned so the caller // does not need to perform an additional lookup. // When no matching volume is found, both returns will be nil // // Note: This does not probe all the drivers for name collisions because v1 plugins // are very slow, particularly if the plugin is down, and cause other issues, // particularly around locking the store. // TODO(cpuguy83): With v2 plugins this shouldn't be a problem. Could also potentially // use a connect timeout for this kind of check to ensure we aren't blocking for a // long time. func (s *VolumeStore) checkConflict(ctx context.Context, name, driverName string) (volume.Volume, error) { // check the local cache v, _ := s.getNamed(name) if v == nil { return nil, nil } vDriverName := v.DriverName() var conflict bool if driverName != "" { // Retrieve canonical driver name to avoid inconsistencies (for example // "plugin" vs. "plugin:latest") vd, err := s.drivers.GetDriver(driverName) if err != nil { return nil, err } if vDriverName != vd.Name() { conflict = true } } // let's check if the found volume ref // is stale by checking with the driver if it still exists exists, err := volumeExists(ctx, s.drivers, v) if err != nil { return nil, errors.Wrapf(errNameConflict, "found reference to volume '%s' in driver '%s', but got an error while checking the driver: %v", name, vDriverName, err) } if exists { if conflict { return nil, errors.Wrapf(errNameConflict, "driver '%s' already has volume '%s'", vDriverName, name) } return v, nil } if s.hasRef(v.Name()) { // Containers are referencing this volume but it doesn't seem to exist anywhere. // Return a conflict error here, the user can fix this with `docker volume rm -f` return nil, errors.Wrapf(errNameConflict, "found references to volume '%s' in driver '%s' but the volume was not found in the driver -- you may need to remove containers referencing this volume or force remove the volume to re-create it", name, vDriverName) } // doesn't exist, so purge it from the cache s.purge(ctx, name) return nil, nil } // volumeExists returns if the volume is still present in the driver. // An error is returned if there was an issue communicating with the driver. func volumeExists(ctx context.Context, store *drivers.Store, v volume.Volume) (bool, error) { exists, err := lookupVolume(ctx, store, v.DriverName(), v.Name()) if err != nil { return false, err } return exists != nil, nil } // create asks the given driver to create a volume with the name/opts. // If a volume with the name is already known, it will ask the stored driver for the volume. // If the passed in driver name does not match the driver name which is stored // for the given volume name, an error is returned after checking if the reference is stale. // If the reference is stale, it will be purged and this create can continue. // It is expected that callers of this function hold any necessary locks. func (s *VolumeStore) create(ctx context.Context, name, driverName string, opts, labels map[string]string) (volume.Volume, error) { // Validate the name in a platform-specific manner // volume name validation is specific to the host os and not on container image // windows/lcow should have an equivalent volumename validation logic so we create a parser for current host OS parser := volumemounts.NewParser(runtime.GOOS) err := parser.ValidateVolumeName(name) if err != nil { return nil, err } v, err := s.checkConflict(ctx, name, driverName) if err != nil { return nil, err } if v != nil { // there is an existing volume, if we already have this stored locally, return it. // TODO: there could be some inconsistent details such as labels here if vv, _ := s.getNamed(v.Name()); vv != nil { return vv, nil } } // Since there isn't a specified driver name, let's see if any of the existing drivers have this volume name if driverName == "" { v, _ = s.getVolume(ctx, name, "") if v != nil { return v, nil } } if driverName == "" { driverName = volume.DefaultDriverName } vd, err := s.drivers.CreateDriver(driverName) if err != nil { return nil, &OpErr{Op: "create", Name: name, Err: err} } logrus.Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) if v, _ = vd.Get(name); v == nil { v, err = vd.Create(name, opts) if err != nil { if _, err := s.drivers.ReleaseDriver(driverName); err != nil { logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver") } return nil, err } } s.globalLock.Lock() s.labels[name] = labels s.options[name] = opts s.refs[name] = make(map[string]struct{}) s.globalLock.Unlock() metadata := volumeMetadata{ Name: name, Driver: vd.Name(), Labels: labels, Options: opts, } if err := s.setMeta(name, metadata); err != nil { return nil, err } return volumeWrapper{v, labels, vd.Scope(), opts}, nil } // Get looks if a volume with the given name exists and returns it if so func (s *VolumeStore) Get(ctx context.Context, name string, getOptions ...opts.GetOption) (volume.Volume, error) { var cfg opts.GetConfig for _, o := range getOptions { o(&cfg) } name = normalizeVolumeName(name) s.locks.Lock(name) defer s.locks.Unlock(name) v, err := s.getVolume(ctx, name, cfg.Driver) if err != nil { return nil, &OpErr{Err: err, Name: name, Op: "get"} } if cfg.Driver != "" && v.DriverName() != cfg.Driver { return nil, &OpErr{Name: name, Op: "get", Err: errdefs.Conflict(errors.New("found volume driver does not match passed in driver"))} } s.setNamed(v, cfg.Reference) return v, nil } // getVolume requests the volume, if the driver info is stored it just accesses that driver, // if the driver is unknown it probes all drivers until it finds the first volume with that name. // it is expected that callers of this function hold any necessary locks func (s *VolumeStore) getVolume(ctx context.Context, name, driverName string) (volume.Volume, error) { var meta volumeMetadata meta, err := s.getMeta(name) if err != nil { return nil, err } if driverName != "" { if meta.Driver == "" { meta.Driver = driverName } if driverName != meta.Driver { return nil, errdefs.Conflict(errors.New("provided volume driver does not match stored driver")) } } if driverName == "" { driverName = meta.Driver } if driverName == "" { s.globalLock.RLock() select { case <-ctx.Done(): s.globalLock.RUnlock() return nil, ctx.Err() default: } v, exists := s.names[name] s.globalLock.RUnlock() if exists { meta.Driver = v.DriverName() if err := s.setMeta(name, meta); err != nil { return nil, err } } } if meta.Driver != "" { vol, err := lookupVolume(ctx, s.drivers, meta.Driver, name) if err != nil { return nil, err } if vol == nil { s.purge(ctx, name) return nil, errNoSuchVolume } var scope string vd, err := s.drivers.GetDriver(meta.Driver) if err == nil { scope = vd.Scope() } return volumeWrapper{vol, meta.Labels, scope, meta.Options}, nil } logrus.Debugf("Probing all drivers for volume with name: %s", name) drivers, err := s.drivers.GetAllDrivers() if err != nil { return nil, err } for _, d := range drivers { select { case <-ctx.Done(): return nil, ctx.Err() default: } v, err := d.Get(name) if err != nil || v == nil { continue } meta.Driver = v.DriverName() if err := s.setMeta(name, meta); err != nil { return nil, err } return volumeWrapper{v, meta.Labels, d.Scope(), meta.Options}, nil } return nil, errNoSuchVolume } // lookupVolume gets the specified volume from the specified driver. // This will only return errors related to communications with the driver. // If the driver returns an error that is not communication related the // error is logged but not returned. // If the volume is not found it will return `nil, nil`` // TODO(@cpuguy83): plumb through the context to lower level components func lookupVolume(ctx context.Context, store *drivers.Store, driverName, volumeName string) (volume.Volume, error) { if driverName == "" { driverName = volume.DefaultDriverName } vd, err := store.GetDriver(driverName) if err != nil { return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) } v, err := vd.Get(volumeName) if err != nil { var nErr net.Error if errors.As(err, &nErr) { if v != nil { volumeName = v.Name() driverName = v.DriverName() } return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) } // At this point, the error could be anything from the driver, such as "no such volume" // Let's not check an error here, and instead check if the driver returned a volume logrus.WithError(err).WithField("driver", driverName).WithField("volume", volumeName).Debug("Error while looking up volume") } return v, nil } // Remove removes the requested volume. A volume is not removed if it has any refs func (s *VolumeStore) Remove(ctx context.Context, v volume.Volume, rmOpts ...opts.RemoveOption) error { var cfg opts.RemoveConfig for _, o := range rmOpts { o(&cfg) } name := v.Name() s.locks.Lock(name) defer s.locks.Unlock(name) select { case <-ctx.Done(): return ctx.Err() default: } if s.hasRef(name) { return &OpErr{Err: errVolumeInUse, Name: name, Op: "remove", Refs: s.getRefs(name)} } v, err := s.getVolume(ctx, name, v.DriverName()) if err != nil { return err } vd, err := s.drivers.GetDriver(v.DriverName()) if err != nil { return &OpErr{Err: err, Name: v.DriverName(), Op: "remove"} } logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) vol := unwrapVolume(v) err = vd.Remove(vol) if err != nil { err = &OpErr{Err: err, Name: name, Op: "remove"} } if err == nil || cfg.PurgeOnError { if e := s.purge(ctx, name); e != nil && err == nil { err = e } } return err } // Release releases the specified reference to the volume func (s *VolumeStore) Release(ctx context.Context, name string, ref string) error { s.locks.Lock(name) defer s.locks.Unlock(name) select { case <-ctx.Done(): return ctx.Err() default: } s.globalLock.Lock() defer s.globalLock.Unlock() select { case <-ctx.Done(): return ctx.Err() default: } if s.refs[name] != nil { delete(s.refs[name], ref) } return nil } // CountReferences gives a count of all references for a given volume. func (s *VolumeStore) CountReferences(v volume.Volume) int { name := normalizeVolumeName(v.Name()) s.locks.Lock(name) defer s.locks.Unlock(name) s.globalLock.Lock() defer s.globalLock.Unlock() return len(s.refs[name]) } func unwrapVolume(v volume.Volume) volume.Volume { if vol, ok := v.(volumeWrapper); ok { return vol.Volume } return v } // Shutdown releases all resources used by the volume store // It does not make any changes to volumes, drivers, etc. func (s *VolumeStore) Shutdown() error { return s.db.Close() }
package service // import "github.com/docker/docker/volume/service" import ( "context" "fmt" "net" "os" "path/filepath" "runtime" "sync" "time" "github.com/docker/docker/errdefs" "github.com/docker/docker/volume" "github.com/docker/docker/volume/drivers" volumemounts "github.com/docker/docker/volume/mounts" "github.com/docker/docker/volume/service/opts" "github.com/moby/locker" "github.com/pkg/errors" "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" ) const ( volumeDataDir = "volumes" ) type volumeWrapper struct { volume.Volume labels map[string]string scope string options map[string]string } func (v volumeWrapper) Options() map[string]string { if v.options == nil { return nil } options := make(map[string]string, len(v.options)) for key, value := range v.options { options[key] = value } return options } func (v volumeWrapper) Labels() map[string]string { if v.labels == nil { return nil } labels := make(map[string]string, len(v.labels)) for key, value := range v.labels { labels[key] = value } return labels } func (v volumeWrapper) Scope() string { return v.scope } func (v volumeWrapper) CachedPath() string { if vv, ok := v.Volume.(interface { CachedPath() string }); ok { return vv.CachedPath() } return v.Volume.Path() } // StoreOpt sets options for a VolumeStore type StoreOpt func(store *VolumeStore) error // NewStore creates a new volume store at the given path func NewStore(rootPath string, drivers *drivers.Store, opts ...StoreOpt) (*VolumeStore, error) { vs := &VolumeStore{ locks: &locker.Locker{}, names: make(map[string]volume.Volume), refs: make(map[string]map[string]struct{}), labels: make(map[string]map[string]string), options: make(map[string]map[string]string), drivers: drivers, } for _, o := range opts { if err := o(vs); err != nil { return nil, err } } if rootPath != "" { // initialize metadata store volPath := filepath.Join(rootPath, volumeDataDir) if err := os.MkdirAll(volPath, 0750); err != nil { return nil, err } var err error vs.db, err = bolt.Open(filepath.Join(volPath, "metadata.db"), 0600, &bolt.Options{Timeout: 1 * time.Second}) if err != nil { return nil, errors.Wrap(err, "error while opening volume store metadata database") } // initialize volumes bucket if err := vs.db.Update(func(tx *bolt.Tx) error { if _, err := tx.CreateBucketIfNotExists(volumeBucketName); err != nil { return errors.Wrap(err, "error while setting up volume store metadata database") } return nil }); err != nil { return nil, err } } vs.restore() return vs, nil } // WithEventLogger configures the VolumeStore with the given VolumeEventLogger func WithEventLogger(logger VolumeEventLogger) StoreOpt { return func(store *VolumeStore) error { store.eventLogger = logger return nil } } func (s *VolumeStore) getNamed(name string) (volume.Volume, bool) { s.globalLock.RLock() v, exists := s.names[name] s.globalLock.RUnlock() return v, exists } func (s *VolumeStore) setNamed(v volume.Volume, ref string) { name := v.Name() s.globalLock.Lock() s.names[name] = v if len(ref) > 0 { if s.refs[name] == nil { s.refs[name] = make(map[string]struct{}) } s.refs[name][ref] = struct{}{} } s.globalLock.Unlock() } // hasRef returns true if the given name has at least one ref. // Callers of this function are expected to hold the name lock. func (s *VolumeStore) hasRef(name string) bool { s.globalLock.RLock() l := len(s.refs[name]) s.globalLock.RUnlock() return l > 0 } // getRefs gets the list of refs for a given name // Callers of this function are expected to hold the name lock. func (s *VolumeStore) getRefs(name string) []string { s.globalLock.RLock() defer s.globalLock.RUnlock() refs := make([]string, 0, len(s.refs[name])) for r := range s.refs[name] { refs = append(refs, r) } return refs } // purge allows the cleanup of internal data on docker in case // the internal data is out of sync with volumes driver plugins. func (s *VolumeStore) purge(ctx context.Context, name string) error { s.globalLock.Lock() defer s.globalLock.Unlock() select { case <-ctx.Done(): return ctx.Err() default: } v, exists := s.names[name] if exists { driverName := v.DriverName() if _, err := s.drivers.ReleaseDriver(driverName); err != nil { logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver") } } if err := s.removeMeta(name); err != nil { logrus.Errorf("Error removing volume metadata for volume %q: %v", name, err) } delete(s.names, name) delete(s.refs, name) delete(s.labels, name) delete(s.options, name) return nil } // VolumeStore is responsible for storing and reference counting volumes. type VolumeStore struct { // locks ensures that only one action is being performed on a particular volume at a time without locking the entire store // since actions on volumes can be quite slow, this ensures the store is free to handle requests for other volumes. locks *locker.Locker drivers *drivers.Store // globalLock is used to protect access to mutable structures used by the store object globalLock sync.RWMutex // names stores the volume name -> volume relationship. // This is used for making lookups faster so we don't have to probe all drivers names map[string]volume.Volume // refs stores the volume name and the list of things referencing it refs map[string]map[string]struct{} // labels stores volume labels for each volume labels map[string]map[string]string // options stores volume options for each volume options map[string]map[string]string db *bolt.DB eventLogger VolumeEventLogger } func filterByDriver(names []string) filterFunc { return func(v volume.Volume) bool { for _, name := range names { if name == v.DriverName() { return true } } return false } } func (s *VolumeStore) byReferenced(referenced bool) filterFunc { return func(v volume.Volume) bool { return s.hasRef(v.Name()) == referenced } } func (s *VolumeStore) filter(ctx context.Context, vols *[]volume.Volume, by By) (warnings []string, err error) { // note that this specifically does not support the `FromList` By type. switch f := by.(type) { case nil: if *vols == nil { var ls []volume.Volume ls, warnings, err = s.list(ctx) if err != nil { return warnings, err } *vols = ls } case byDriver: if *vols != nil { filter(vols, filterByDriver([]string(f))) return nil, nil } var ls []volume.Volume ls, warnings, err = s.list(ctx, []string(f)...) if err != nil { return nil, err } *vols = ls case ByReferenced: // TODO(@cpuguy83): It would be nice to optimize this by looking at the list // of referenced volumes, however the locking strategy makes this difficult // without either providing inconsistent data or deadlocks. if *vols == nil { var ls []volume.Volume ls, warnings, err = s.list(ctx) if err != nil { return nil, err } *vols = ls } filter(vols, s.byReferenced(bool(f))) case andCombinator: for _, by := range f { w, err := s.filter(ctx, vols, by) if err != nil { return warnings, err } warnings = append(warnings, w...) } case orCombinator: for _, by := range f { switch by.(type) { case byDriver: var ls []volume.Volume w, err := s.filter(ctx, &ls, by) if err != nil { return warnings, err } warnings = append(warnings, w...) default: ls, w, err := s.list(ctx) if err != nil { return warnings, err } warnings = append(warnings, w...) w, err = s.filter(ctx, &ls, by) if err != nil { return warnings, err } warnings = append(warnings, w...) *vols = append(*vols, ls...) } } unique(vols) case CustomFilter: if *vols == nil { var ls []volume.Volume ls, warnings, err = s.list(ctx) if err != nil { return nil, err } *vols = ls } filter(vols, filterFunc(f)) default: return nil, errdefs.InvalidParameter(errors.Errorf("unsupported filter: %T", f)) } return warnings, nil } func unique(ls *[]volume.Volume) { names := make(map[string]bool, len(*ls)) filter(ls, func(v volume.Volume) bool { if names[v.Name()] { return false } names[v.Name()] = true return true }) } // Find lists volumes filtered by the past in filter. // If a driver returns a volume that has name which conflicts with another volume from a different driver, // the first volume is chosen and the conflicting volume is dropped. func (s *VolumeStore) Find(ctx context.Context, by By) (vols []volume.Volume, warnings []string, err error) { logrus.WithField("ByType", fmt.Sprintf("%T", by)).WithField("ByValue", fmt.Sprintf("%+v", by)).Debug("VolumeStore.Find") switch f := by.(type) { case nil, orCombinator, andCombinator, byDriver, ByReferenced, CustomFilter: warnings, err = s.filter(ctx, &vols, by) case fromList: warnings, err = s.filter(ctx, f.ls, f.by) default: // Really shouldn't be possible, but makes sure that any new By's are added to this check. err = errdefs.InvalidParameter(errors.Errorf("unsupported filter type: %T", f)) } if err != nil { return nil, nil, &OpErr{Err: err, Op: "list"} } var out []volume.Volume for _, v := range vols { name := normalizeVolumeName(v.Name()) s.locks.Lock(name) storedV, exists := s.getNamed(name) // Note: it's not safe to populate the cache here because the volume may have been // deleted before we acquire a lock on its name if exists && storedV.DriverName() != v.DriverName() { logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) s.locks.Unlock(v.Name()) continue } out = append(out, v) s.locks.Unlock(v.Name()) } return out, warnings, nil } type filterFunc func(volume.Volume) bool func filter(vols *[]volume.Volume, fn filterFunc) { var evict []int for i, v := range *vols { if !fn(v) { evict = append(evict, i) } } for n, i := range evict { copy((*vols)[i-n:], (*vols)[i-n+1:]) (*vols)[len(*vols)-1] = nil *vols = (*vols)[:len(*vols)-1] } } // list goes through each volume driver and asks for its list of volumes. // TODO(@cpuguy83): plumb context through func (s *VolumeStore) list(ctx context.Context, driverNames ...string) ([]volume.Volume, []string, error) { var ( ls = []volume.Volume{} // do not return a nil value as this affects filtering warnings []string ) var dls []volume.Driver all, err := s.drivers.GetAllDrivers() if err != nil { return nil, nil, err } if len(driverNames) == 0 { dls = all } else { idx := make(map[string]bool, len(driverNames)) for _, name := range driverNames { idx[name] = true } for _, d := range all { if idx[d.Name()] { dls = append(dls, d) } } } type vols struct { vols []volume.Volume err error driverName string } chVols := make(chan vols, len(dls)) for _, vd := range dls { go func(d volume.Driver) { vs, err := d.List() if err != nil { chVols <- vols{driverName: d.Name(), err: &OpErr{Err: err, Name: d.Name(), Op: "list"}} return } for i, v := range vs { s.globalLock.RLock() vs[i] = volumeWrapper{v, s.labels[v.Name()], d.Scope(), s.options[v.Name()]} s.globalLock.RUnlock() } chVols <- vols{vols: vs} }(vd) } badDrivers := make(map[string]struct{}) for i := 0; i < len(dls); i++ { vs := <-chVols if vs.err != nil { warnings = append(warnings, vs.err.Error()) badDrivers[vs.driverName] = struct{}{} } ls = append(ls, vs.vols...) } if len(badDrivers) > 0 { s.globalLock.RLock() for _, v := range s.names { if _, exists := badDrivers[v.DriverName()]; exists { ls = append(ls, v) } } s.globalLock.RUnlock() } return ls, warnings, nil } // Create creates a volume with the given name and driver // If the volume needs to be created with a reference to prevent race conditions // with volume cleanup, make sure to use the `CreateWithReference` option. func (s *VolumeStore) Create(ctx context.Context, name, driverName string, createOpts ...opts.CreateOption) (volume.Volume, error) { var cfg opts.CreateConfig for _, o := range createOpts { o(&cfg) } name = normalizeVolumeName(name) s.locks.Lock(name) defer s.locks.Unlock(name) select { case <-ctx.Done(): return nil, ctx.Err() default: } v, created, err := s.create(ctx, name, driverName, cfg.Options, cfg.Labels) if err != nil { if _, ok := err.(*OpErr); ok { return nil, err } return nil, &OpErr{Err: err, Name: name, Op: "create"} } if created && s.eventLogger != nil { s.eventLogger.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) } s.setNamed(v, cfg.Reference) return v, nil } // checkConflict checks the local cache for name collisions with the passed in name, // for existing volumes with the same name but in a different driver. // This is used by `Create` as a best effort to prevent name collisions for volumes. // If a matching volume is found that is not a conflict that is returned so the caller // does not need to perform an additional lookup. // When no matching volume is found, both returns will be nil // // Note: This does not probe all the drivers for name collisions because v1 plugins // are very slow, particularly if the plugin is down, and cause other issues, // particularly around locking the store. // TODO(cpuguy83): With v2 plugins this shouldn't be a problem. Could also potentially // use a connect timeout for this kind of check to ensure we aren't blocking for a // long time. func (s *VolumeStore) checkConflict(ctx context.Context, name, driverName string) (volume.Volume, error) { // check the local cache v, _ := s.getNamed(name) if v == nil { return nil, nil } vDriverName := v.DriverName() var conflict bool if driverName != "" { // Retrieve canonical driver name to avoid inconsistencies (for example // "plugin" vs. "plugin:latest") vd, err := s.drivers.GetDriver(driverName) if err != nil { return nil, err } if vDriverName != vd.Name() { conflict = true } } // let's check if the found volume ref // is stale by checking with the driver if it still exists exists, err := volumeExists(ctx, s.drivers, v) if err != nil { return nil, errors.Wrapf(errNameConflict, "found reference to volume '%s' in driver '%s', but got an error while checking the driver: %v", name, vDriverName, err) } if exists { if conflict { return nil, errors.Wrapf(errNameConflict, "driver '%s' already has volume '%s'", vDriverName, name) } return v, nil } if s.hasRef(v.Name()) { // Containers are referencing this volume but it doesn't seem to exist anywhere. // Return a conflict error here, the user can fix this with `docker volume rm -f` return nil, errors.Wrapf(errNameConflict, "found references to volume '%s' in driver '%s' but the volume was not found in the driver -- you may need to remove containers referencing this volume or force remove the volume to re-create it", name, vDriverName) } // doesn't exist, so purge it from the cache s.purge(ctx, name) return nil, nil } // volumeExists returns if the volume is still present in the driver. // An error is returned if there was an issue communicating with the driver. func volumeExists(ctx context.Context, store *drivers.Store, v volume.Volume) (bool, error) { exists, err := lookupVolume(ctx, store, v.DriverName(), v.Name()) if err != nil { return false, err } return exists != nil, nil } // create asks the given driver to create a volume with the name/opts. // If a volume with the name is already known, it will ask the stored driver for the volume. // If the passed in driver name does not match the driver name which is stored // for the given volume name, an error is returned after checking if the reference is stale. // If the reference is stale, it will be purged and this create can continue. // It is expected that callers of this function hold any necessary locks. func (s *VolumeStore) create(ctx context.Context, name, driverName string, opts, labels map[string]string) (volume.Volume, bool, error) { // Validate the name in a platform-specific manner // volume name validation is specific to the host os and not on container image // windows/lcow should have an equivalent volumename validation logic so we create a parser for current host OS parser := volumemounts.NewParser(runtime.GOOS) err := parser.ValidateVolumeName(name) if err != nil { return nil, false, err } v, err := s.checkConflict(ctx, name, driverName) if err != nil { return nil, false, err } if v != nil { // there is an existing volume, if we already have this stored locally, return it. // TODO: there could be some inconsistent details such as labels here if vv, _ := s.getNamed(v.Name()); vv != nil { return vv, false, nil } } // Since there isn't a specified driver name, let's see if any of the existing drivers have this volume name if driverName == "" { v, _ = s.getVolume(ctx, name, "") if v != nil { return v, false, nil } } if driverName == "" { driverName = volume.DefaultDriverName } vd, err := s.drivers.CreateDriver(driverName) if err != nil { return nil, false, &OpErr{Op: "create", Name: name, Err: err} } logrus.Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) if v, _ = vd.Get(name); v == nil { v, err = vd.Create(name, opts) if err != nil { if _, err := s.drivers.ReleaseDriver(driverName); err != nil { logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver") } return nil, false, err } } s.globalLock.Lock() s.labels[name] = labels s.options[name] = opts s.refs[name] = make(map[string]struct{}) s.globalLock.Unlock() metadata := volumeMetadata{ Name: name, Driver: vd.Name(), Labels: labels, Options: opts, } if err := s.setMeta(name, metadata); err != nil { return nil, true, err } return volumeWrapper{v, labels, vd.Scope(), opts}, true, nil } // Get looks if a volume with the given name exists and returns it if so func (s *VolumeStore) Get(ctx context.Context, name string, getOptions ...opts.GetOption) (volume.Volume, error) { var cfg opts.GetConfig for _, o := range getOptions { o(&cfg) } name = normalizeVolumeName(name) s.locks.Lock(name) defer s.locks.Unlock(name) v, err := s.getVolume(ctx, name, cfg.Driver) if err != nil { return nil, &OpErr{Err: err, Name: name, Op: "get"} } if cfg.Driver != "" && v.DriverName() != cfg.Driver { return nil, &OpErr{Name: name, Op: "get", Err: errdefs.Conflict(errors.New("found volume driver does not match passed in driver"))} } s.setNamed(v, cfg.Reference) return v, nil } // getVolume requests the volume, if the driver info is stored it just accesses that driver, // if the driver is unknown it probes all drivers until it finds the first volume with that name. // it is expected that callers of this function hold any necessary locks func (s *VolumeStore) getVolume(ctx context.Context, name, driverName string) (volume.Volume, error) { var meta volumeMetadata meta, err := s.getMeta(name) if err != nil { return nil, err } if driverName != "" { if meta.Driver == "" { meta.Driver = driverName } if driverName != meta.Driver { return nil, errdefs.Conflict(errors.New("provided volume driver does not match stored driver")) } } if driverName == "" { driverName = meta.Driver } if driverName == "" { s.globalLock.RLock() select { case <-ctx.Done(): s.globalLock.RUnlock() return nil, ctx.Err() default: } v, exists := s.names[name] s.globalLock.RUnlock() if exists { meta.Driver = v.DriverName() if err := s.setMeta(name, meta); err != nil { return nil, err } } } if meta.Driver != "" { vol, err := lookupVolume(ctx, s.drivers, meta.Driver, name) if err != nil { return nil, err } if vol == nil { s.purge(ctx, name) return nil, errNoSuchVolume } var scope string vd, err := s.drivers.GetDriver(meta.Driver) if err == nil { scope = vd.Scope() } return volumeWrapper{vol, meta.Labels, scope, meta.Options}, nil } logrus.Debugf("Probing all drivers for volume with name: %s", name) drivers, err := s.drivers.GetAllDrivers() if err != nil { return nil, err } for _, d := range drivers { select { case <-ctx.Done(): return nil, ctx.Err() default: } v, err := d.Get(name) if err != nil || v == nil { continue } meta.Driver = v.DriverName() if err := s.setMeta(name, meta); err != nil { return nil, err } return volumeWrapper{v, meta.Labels, d.Scope(), meta.Options}, nil } return nil, errNoSuchVolume } // lookupVolume gets the specified volume from the specified driver. // This will only return errors related to communications with the driver. // If the driver returns an error that is not communication related the // error is logged but not returned. // If the volume is not found it will return `nil, nil`` // TODO(@cpuguy83): plumb through the context to lower level components func lookupVolume(ctx context.Context, store *drivers.Store, driverName, volumeName string) (volume.Volume, error) { if driverName == "" { driverName = volume.DefaultDriverName } vd, err := store.GetDriver(driverName) if err != nil { return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) } v, err := vd.Get(volumeName) if err != nil { var nErr net.Error if errors.As(err, &nErr) { if v != nil { volumeName = v.Name() driverName = v.DriverName() } return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) } // At this point, the error could be anything from the driver, such as "no such volume" // Let's not check an error here, and instead check if the driver returned a volume logrus.WithError(err).WithField("driver", driverName).WithField("volume", volumeName).Debug("Error while looking up volume") } return v, nil } // Remove removes the requested volume. A volume is not removed if it has any refs func (s *VolumeStore) Remove(ctx context.Context, v volume.Volume, rmOpts ...opts.RemoveOption) error { var cfg opts.RemoveConfig for _, o := range rmOpts { o(&cfg) } name := v.Name() s.locks.Lock(name) defer s.locks.Unlock(name) select { case <-ctx.Done(): return ctx.Err() default: } if s.hasRef(name) { return &OpErr{Err: errVolumeInUse, Name: name, Op: "remove", Refs: s.getRefs(name)} } v, err := s.getVolume(ctx, name, v.DriverName()) if err != nil { return err } vd, err := s.drivers.GetDriver(v.DriverName()) if err != nil { return &OpErr{Err: err, Name: v.DriverName(), Op: "remove"} } logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) vol := unwrapVolume(v) err = vd.Remove(vol) if err != nil { err = &OpErr{Err: err, Name: name, Op: "remove"} } if err == nil || cfg.PurgeOnError { if e := s.purge(ctx, name); e != nil && err == nil { err = e } } if err == nil && s.eventLogger != nil { s.eventLogger.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) } return err } // Release releases the specified reference to the volume func (s *VolumeStore) Release(ctx context.Context, name string, ref string) error { s.locks.Lock(name) defer s.locks.Unlock(name) select { case <-ctx.Done(): return ctx.Err() default: } s.globalLock.Lock() defer s.globalLock.Unlock() select { case <-ctx.Done(): return ctx.Err() default: } if s.refs[name] != nil { delete(s.refs[name], ref) } return nil } // CountReferences gives a count of all references for a given volume. func (s *VolumeStore) CountReferences(v volume.Volume) int { name := normalizeVolumeName(v.Name()) s.locks.Lock(name) defer s.locks.Unlock(name) s.globalLock.Lock() defer s.globalLock.Unlock() return len(s.refs[name]) } func unwrapVolume(v volume.Volume) volume.Volume { if vol, ok := v.(volumeWrapper); ok { return vol.Volume } return v } // Shutdown releases all resources used by the volume store // It does not make any changes to volumes, drivers, etc. func (s *VolumeStore) Shutdown() error { return s.db.Close() }
thaJeztah
4c2ec79bf2a848761bc9162a25b08ec553fc69db
ef4d473401421d26633d43547b4a28978887386a
Noticed this while playing with these changes; at this point, the volume _is_ created, but we return `nil` (and an error) because we were unable to set metadata for the volume. Wondering if this is correct (and/or if this would have consequences, because code may _think _the volume wasn't created) Because of the above, I wasn't sure if we should return `true` (volume created) or `false` (volume "not" created) here
thaJeztah
4,972
moby/moby
42,024
Fix grammar in client function comments
<!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** I was looking through the client documentation and found a small grammar mistake in one of the comments, and ended up falling down this rabbit hole. I change certain words and added punctuation to the comments of functions. I didn't get everything, nor is my grammar perfect, and I'd be happy to move things around. **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Grammar fixes in exported client functions **- A picture of a cute animal (not mandatory but encouraged)** ![image](https://user-images.githubusercontent.com/54278938/107890945-c535ba00-6ee9-11eb-9a83-bd4fbb4ec610.png)
null
2021-02-14 22:27:15+00:00
2021-02-16 17:57:12+00:00
client/image_build.go
package client // import "github.com/docker/docker/client" import ( "context" "encoding/base64" "encoding/json" "io" "net/http" "net/url" "strconv" "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" ) // ImageBuild sends request to the daemon to build images. // The Body in the response implement an io.ReadCloser and it's up to the caller to // close it. func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { query, err := cli.imageBuildOptionsToQuery(options) if err != nil { return types.ImageBuildResponse{}, err } headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(options.AuthConfigs) if err != nil { return types.ImageBuildResponse{}, err } headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) headers.Set("Content-Type", "application/x-tar") serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) if err != nil { return types.ImageBuildResponse{}, err } osType := getDockerOS(serverResp.header.Get("Server")) return types.ImageBuildResponse{ Body: serverResp.body, OSType: osType, }, nil } func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { query := url.Values{ "t": options.Tags, "securityopt": options.SecurityOpt, "extrahosts": options.ExtraHosts, } if options.SuppressOutput { query.Set("q", "1") } if options.RemoteContext != "" { query.Set("remote", options.RemoteContext) } if options.NoCache { query.Set("nocache", "1") } if options.Remove { query.Set("rm", "1") } else { query.Set("rm", "0") } if options.ForceRemove { query.Set("forcerm", "1") } if options.PullParent { query.Set("pull", "1") } if options.Squash { if err := cli.NewVersionError("1.25", "squash"); err != nil { return query, err } query.Set("squash", "1") } if !container.Isolation.IsDefault(options.Isolation) { query.Set("isolation", string(options.Isolation)) } query.Set("cpusetcpus", options.CPUSetCPUs) query.Set("networkmode", options.NetworkMode) query.Set("cpusetmems", options.CPUSetMems) query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) query.Set("memory", strconv.FormatInt(options.Memory, 10)) query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) query.Set("cgroupparent", options.CgroupParent) query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) query.Set("dockerfile", options.Dockerfile) query.Set("target", options.Target) ulimitsJSON, err := json.Marshal(options.Ulimits) if err != nil { return query, err } query.Set("ulimits", string(ulimitsJSON)) buildArgsJSON, err := json.Marshal(options.BuildArgs) if err != nil { return query, err } query.Set("buildargs", string(buildArgsJSON)) labelsJSON, err := json.Marshal(options.Labels) if err != nil { return query, err } query.Set("labels", string(labelsJSON)) cacheFromJSON, err := json.Marshal(options.CacheFrom) if err != nil { return query, err } query.Set("cachefrom", string(cacheFromJSON)) if options.SessionID != "" { query.Set("session", options.SessionID) } if options.Platform != "" { if err := cli.NewVersionError("1.32", "platform"); err != nil { return query, err } query.Set("platform", strings.ToLower(options.Platform)) } if options.BuildID != "" { query.Set("buildid", options.BuildID) } query.Set("version", string(options.Version)) if options.Outputs != nil { outputsJSON, err := json.Marshal(options.Outputs) if err != nil { return query, err } query.Set("outputs", string(outputsJSON)) } return query, nil }
package client // import "github.com/docker/docker/client" import ( "context" "encoding/base64" "encoding/json" "io" "net/http" "net/url" "strconv" "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" ) // ImageBuild sends a request to the daemon to build images. // The Body in the response implements an io.ReadCloser and it's up to the caller to // close it. func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { query, err := cli.imageBuildOptionsToQuery(options) if err != nil { return types.ImageBuildResponse{}, err } headers := http.Header(make(map[string][]string)) buf, err := json.Marshal(options.AuthConfigs) if err != nil { return types.ImageBuildResponse{}, err } headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) headers.Set("Content-Type", "application/x-tar") serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) if err != nil { return types.ImageBuildResponse{}, err } osType := getDockerOS(serverResp.header.Get("Server")) return types.ImageBuildResponse{ Body: serverResp.body, OSType: osType, }, nil } func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { query := url.Values{ "t": options.Tags, "securityopt": options.SecurityOpt, "extrahosts": options.ExtraHosts, } if options.SuppressOutput { query.Set("q", "1") } if options.RemoteContext != "" { query.Set("remote", options.RemoteContext) } if options.NoCache { query.Set("nocache", "1") } if options.Remove { query.Set("rm", "1") } else { query.Set("rm", "0") } if options.ForceRemove { query.Set("forcerm", "1") } if options.PullParent { query.Set("pull", "1") } if options.Squash { if err := cli.NewVersionError("1.25", "squash"); err != nil { return query, err } query.Set("squash", "1") } if !container.Isolation.IsDefault(options.Isolation) { query.Set("isolation", string(options.Isolation)) } query.Set("cpusetcpus", options.CPUSetCPUs) query.Set("networkmode", options.NetworkMode) query.Set("cpusetmems", options.CPUSetMems) query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) query.Set("memory", strconv.FormatInt(options.Memory, 10)) query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) query.Set("cgroupparent", options.CgroupParent) query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) query.Set("dockerfile", options.Dockerfile) query.Set("target", options.Target) ulimitsJSON, err := json.Marshal(options.Ulimits) if err != nil { return query, err } query.Set("ulimits", string(ulimitsJSON)) buildArgsJSON, err := json.Marshal(options.BuildArgs) if err != nil { return query, err } query.Set("buildargs", string(buildArgsJSON)) labelsJSON, err := json.Marshal(options.Labels) if err != nil { return query, err } query.Set("labels", string(labelsJSON)) cacheFromJSON, err := json.Marshal(options.CacheFrom) if err != nil { return query, err } query.Set("cachefrom", string(cacheFromJSON)) if options.SessionID != "" { query.Set("session", options.SessionID) } if options.Platform != "" { if err := cli.NewVersionError("1.32", "platform"); err != nil { return query, err } query.Set("platform", strings.ToLower(options.Platform)) } if options.BuildID != "" { query.Set("buildid", options.BuildID) } query.Set("version", string(options.Version)) if options.Outputs != nil { outputsJSON, err := json.Marshal(options.Outputs) if err != nil { return query, err } query.Set("outputs", string(outputsJSON)) } return query, nil }
LeviHarrison
3d966826874e3ecb00f2b49422faff050c61e8f8
646072ed6524f159c214f830f0049369db5a9441
I think in this case, `Body` refers to the `Body` field in the response; https://github.com/moby/moby/blob/46cdcd206c56172b95ba5c77b827a722dab426c5/client/image_build.go#L42-L45
thaJeztah
4,973
moby/moby
42,021
Update rootlesskit to v0.13.1 to fix handling of IPv6 addresses
testing the changes I made in https://github.com/rootless-containers/rootlesskit/pull/213 separate from https://github.com/moby/moby/pull/41908 ## v0.13.1 - Refactor `ParsePortSpec` to handle IPv6 addresses, and improve validation ## v0.13.0 - `rootlesskit --pidns`: fix propagating exit status - Support cgroup2 evacuation, e.g., `systemd-run -p Delegate=yes --user -t rootlesskit --cgroupns --pidns --evacuate-cgroup2=evac --net=slirp4netns bash` ## v0.12.0 - Port forwarding API now supports setting `ChildIP` - The `vendor` directory is no longer included in this repo. Run `go mod vendor` if you need **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-13 12:50:57+00:00
2021-02-18 16:31:41+00:00
hack/dockerfile/install/rootlesskit.installer
#!/bin/sh # v0.11.0 : ${ROOTLESSKIT_COMMIT:=2886253e467c5444a4d2ac7084e53aa3cc50055d} install_rootlesskit() { case "$1" in "dynamic") install_rootlesskit_dynamic return ;; "") export CGO_ENABLED=0 _install_rootlesskit ;; *) echo 'Usage: $0 [dynamic]' ;; esac } install_rootlesskit_dynamic() { export ROOTLESSKIT_LDFLAGS="-linkmode=external" install_rootlesskit export BUILD_MODE=${GO_BUILDMODE} _install_rootlesskit } _install_rootlesskit() { echo "Install rootlesskit version $ROOTLESSKIT_COMMIT" git clone https://github.com/rootless-containers/rootlesskit.git "$GOPATH/src/github.com/rootless-containers/rootlesskit" cd "$GOPATH/src/github.com/rootless-containers/rootlesskit" git checkout -q "$ROOTLESSKIT_COMMIT" for f in rootlesskit rootlesskit-docker-proxy; do go build $BUILD_MODE -ldflags="$ROOTLESSKIT_LDFLAGS" -o "${PREFIX}/$f" github.com/rootless-containers/rootlesskit/cmd/$f done }
#!/bin/sh # v0.13.1 : "${ROOTLESSKIT_COMMIT:=5c30c9c2586add2ad659132990fdc230f05035fa}" install_rootlesskit() { case "$1" in "dynamic") install_rootlesskit_dynamic return ;; "") export CGO_ENABLED=0 _install_rootlesskit ;; *) echo 'Usage: $0 [dynamic]' ;; esac } install_rootlesskit_dynamic() { export ROOTLESSKIT_LDFLAGS="-linkmode=external" install_rootlesskit export BUILD_MODE=${GO_BUILDMODE} _install_rootlesskit } _install_rootlesskit() ( echo "Install rootlesskit version $ROOTLESSKIT_COMMIT" git clone https://github.com/rootless-containers/rootlesskit.git "$GOPATH/src/github.com/rootless-containers/rootlesskit" cd "$GOPATH/src/github.com/rootless-containers/rootlesskit" || exit 1 git checkout -q "$ROOTLESSKIT_COMMIT" export GO111MODULE=on for f in rootlesskit rootlesskit-docker-proxy; do go build $BUILD_MODE -ldflags="$ROOTLESSKIT_LDFLAGS" -o "${PREFIX}/$f" github.com/rootless-containers/rootlesskit/cmd/$f done )
thaJeztah
546c33cedef6283101e8280159647b2b3aa9cc41
dca8ff8f47341141560d73311e6f6bab551d60dd
v0.13.1 is now ready https://github.com/rootless-containers/rootlesskit/releases/tag/v0.13.1
AkihiroSuda
4,974
moby/moby
42,021
Update rootlesskit to v0.13.1 to fix handling of IPv6 addresses
testing the changes I made in https://github.com/rootless-containers/rootlesskit/pull/213 separate from https://github.com/moby/moby/pull/41908 ## v0.13.1 - Refactor `ParsePortSpec` to handle IPv6 addresses, and improve validation ## v0.13.0 - `rootlesskit --pidns`: fix propagating exit status - Support cgroup2 evacuation, e.g., `systemd-run -p Delegate=yes --user -t rootlesskit --cgroupns --pidns --evacuate-cgroup2=evac --net=slirp4netns bash` ## v0.12.0 - Port forwarding API now supports setting `ChildIP` - The `vendor` directory is no longer included in this repo. Run `go mod vendor` if you need **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-13 12:50:57+00:00
2021-02-18 16:31:41+00:00
hack/dockerfile/install/rootlesskit.installer
#!/bin/sh # v0.11.0 : ${ROOTLESSKIT_COMMIT:=2886253e467c5444a4d2ac7084e53aa3cc50055d} install_rootlesskit() { case "$1" in "dynamic") install_rootlesskit_dynamic return ;; "") export CGO_ENABLED=0 _install_rootlesskit ;; *) echo 'Usage: $0 [dynamic]' ;; esac } install_rootlesskit_dynamic() { export ROOTLESSKIT_LDFLAGS="-linkmode=external" install_rootlesskit export BUILD_MODE=${GO_BUILDMODE} _install_rootlesskit } _install_rootlesskit() { echo "Install rootlesskit version $ROOTLESSKIT_COMMIT" git clone https://github.com/rootless-containers/rootlesskit.git "$GOPATH/src/github.com/rootless-containers/rootlesskit" cd "$GOPATH/src/github.com/rootless-containers/rootlesskit" git checkout -q "$ROOTLESSKIT_COMMIT" for f in rootlesskit rootlesskit-docker-proxy; do go build $BUILD_MODE -ldflags="$ROOTLESSKIT_LDFLAGS" -o "${PREFIX}/$f" github.com/rootless-containers/rootlesskit/cmd/$f done }
#!/bin/sh # v0.13.1 : "${ROOTLESSKIT_COMMIT:=5c30c9c2586add2ad659132990fdc230f05035fa}" install_rootlesskit() { case "$1" in "dynamic") install_rootlesskit_dynamic return ;; "") export CGO_ENABLED=0 _install_rootlesskit ;; *) echo 'Usage: $0 [dynamic]' ;; esac } install_rootlesskit_dynamic() { export ROOTLESSKIT_LDFLAGS="-linkmode=external" install_rootlesskit export BUILD_MODE=${GO_BUILDMODE} _install_rootlesskit } _install_rootlesskit() ( echo "Install rootlesskit version $ROOTLESSKIT_COMMIT" git clone https://github.com/rootless-containers/rootlesskit.git "$GOPATH/src/github.com/rootless-containers/rootlesskit" cd "$GOPATH/src/github.com/rootless-containers/rootlesskit" || exit 1 git checkout -q "$ROOTLESSKIT_COMMIT" export GO111MODULE=on for f in rootlesskit rootlesskit-docker-proxy; do go build $BUILD_MODE -ldflags="$ROOTLESSKIT_LDFLAGS" -o "${PREFIX}/$f" github.com/rootless-containers/rootlesskit/cmd/$f done )
thaJeztah
546c33cedef6283101e8280159647b2b3aa9cc41
dca8ff8f47341141560d73311e6f6bab551d60dd
Thanks! Updated
thaJeztah
4,975
moby/moby
42,011
Revert "Temporarily disable CAP_PERFMON, CAP_BPF, and CAP_CHECKPOINT_RESTORE
fixes https://github.com/moby/moby/issues/42601 ## Revert "Temporarily disable CAP_PERFMON, CAP_BPF, and CAP_CHECKPOINT_RESTORE" Now that runc v1.0.0-rc93 is used, we can revert this temporary workaround This reverts commit a38b96b8cdc4d345a050b417c4c492b75329e5a6 (https://github.com/moby/moby/pull/41563). relates to: - https://github.com/moby/moby/issues/41562 [20.10 beta] Unknown capability "CAP_PERFMON" on Linux 5.8.14 - https://github.com/kubernetes-sigs/kind/issues/2058 [master] kube-proxy doesn't start up due to "apply caps: operation not permitted" error - https://github.com/opencontainers/runtime-spec/issues/1071 **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-11 20:48:38+00:00
2021-08-04 09:15:09+00:00
oci/caps/utils.go
package caps // import "github.com/docker/docker/oci/caps" import ( "fmt" "strings" "github.com/docker/docker/errdefs" "github.com/syndtr/gocapability/capability" ) var capabilityList Capabilities func init() { last := capability.CAP_LAST_CAP // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap if last == capability.Cap(63) { last = capability.CAP_BLOCK_SUSPEND } if last > capability.CAP_AUDIT_READ { // Prevents docker from setting CAP_PERFMON, CAP_BPF, and CAP_CHECKPOINT_RESTORE // capabilities on privileged (or CAP_ALL) containers on Kernel 5.8 and up. // While these kernels support these capabilities, the current release of // runc ships with an older version of /gocapability/capability, and does // not know about them, causing an error to be produced. // // FIXME remove once https://github.com/opencontainers/runc/commit/6dfbe9b80707b1ca188255e8def15263348e0f9a // is included in a runc release and once we stop supporting containerd 1.3.x // (which ships with runc v1.0.0-rc92) last = capability.CAP_AUDIT_READ } for _, cap := range capability.List() { if cap > last { continue } capabilityList = append(capabilityList, &CapabilityMapping{ Key: "CAP_" + strings.ToUpper(cap.String()), Value: cap, }, ) } } type ( // CapabilityMapping maps linux capability name to its value of capability.Cap type // Capabilities is one of the security systems in Linux Security Module (LSM) // framework provided by the kernel. // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html CapabilityMapping struct { Key string `json:"key,omitempty"` Value capability.Cap `json:"value,omitempty"` } // Capabilities contains all CapabilityMapping Capabilities []*CapabilityMapping ) // String returns <key> of CapabilityMapping func (c *CapabilityMapping) String() string { return c.Key } // GetAllCapabilities returns all of the capabilities func GetAllCapabilities() []string { output := make([]string, len(capabilityList)) for i, capability := range capabilityList { output[i] = capability.String() } return output } // inSlice tests whether a string is contained in a slice of strings or not. func inSlice(slice []string, s string) bool { for _, ss := range slice { if s == ss { return true } } return false } const allCapabilities = "ALL" // NormalizeLegacyCapabilities normalizes, and validates CapAdd/CapDrop capabilities // by upper-casing them, and adding a CAP_ prefix (if not yet present). // // This function also accepts the "ALL" magic-value, that's used by CapAdd/CapDrop. func NormalizeLegacyCapabilities(caps []string) ([]string, error) { var normalized []string valids := GetAllCapabilities() for _, c := range caps { c = strings.ToUpper(c) if c == allCapabilities { normalized = append(normalized, c) continue } if !strings.HasPrefix(c, "CAP_") { c = "CAP_" + c } if !inSlice(valids, c) { return nil, errdefs.InvalidParameter(fmt.Errorf("unknown capability: %q", c)) } normalized = append(normalized, c) } return normalized, nil } // TweakCapabilities tweaks capabilities by adding, dropping, or overriding // capabilities in the basics capabilities list. func TweakCapabilities(basics, adds, drops []string, privileged bool) ([]string, error) { switch { case privileged: // Privileged containers get all capabilities return GetAllCapabilities(), nil case len(adds) == 0 && len(drops) == 0: // Nothing to tweak; we're done return basics, nil } capDrop, err := NormalizeLegacyCapabilities(drops) if err != nil { return nil, err } capAdd, err := NormalizeLegacyCapabilities(adds) if err != nil { return nil, err } var caps []string switch { case inSlice(capAdd, allCapabilities): // Add all capabilities except ones on capDrop for _, c := range GetAllCapabilities() { if !inSlice(capDrop, c) { caps = append(caps, c) } } case inSlice(capDrop, allCapabilities): // "Drop" all capabilities; use what's in capAdd instead caps = capAdd default: // First drop some capabilities for _, c := range basics { if !inSlice(capDrop, c) { caps = append(caps, c) } } // Then add the list of capabilities from capAdd caps = append(caps, capAdd...) } return caps, nil }
package caps // import "github.com/docker/docker/oci/caps" import ( "fmt" "strings" "github.com/docker/docker/errdefs" "github.com/syndtr/gocapability/capability" ) var capabilityList Capabilities func init() { last := capability.CAP_LAST_CAP // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap if last == capability.Cap(63) { last = capability.CAP_BLOCK_SUSPEND } for _, cap := range capability.List() { if cap > last { continue } capabilityList = append(capabilityList, &CapabilityMapping{ Key: "CAP_" + strings.ToUpper(cap.String()), Value: cap, }, ) } } type ( // CapabilityMapping maps linux capability name to its value of capability.Cap type // Capabilities is one of the security systems in Linux Security Module (LSM) // framework provided by the kernel. // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html CapabilityMapping struct { Key string `json:"key,omitempty"` Value capability.Cap `json:"value,omitempty"` } // Capabilities contains all CapabilityMapping Capabilities []*CapabilityMapping ) // String returns <key> of CapabilityMapping func (c *CapabilityMapping) String() string { return c.Key } // GetAllCapabilities returns all of the capabilities func GetAllCapabilities() []string { output := make([]string, len(capabilityList)) for i, capability := range capabilityList { output[i] = capability.String() } return output } // inSlice tests whether a string is contained in a slice of strings or not. func inSlice(slice []string, s string) bool { for _, ss := range slice { if s == ss { return true } } return false } const allCapabilities = "ALL" // NormalizeLegacyCapabilities normalizes, and validates CapAdd/CapDrop capabilities // by upper-casing them, and adding a CAP_ prefix (if not yet present). // // This function also accepts the "ALL" magic-value, that's used by CapAdd/CapDrop. func NormalizeLegacyCapabilities(caps []string) ([]string, error) { var normalized []string valids := GetAllCapabilities() for _, c := range caps { c = strings.ToUpper(c) if c == allCapabilities { normalized = append(normalized, c) continue } if !strings.HasPrefix(c, "CAP_") { c = "CAP_" + c } if !inSlice(valids, c) { return nil, errdefs.InvalidParameter(fmt.Errorf("unknown capability: %q", c)) } normalized = append(normalized, c) } return normalized, nil } // TweakCapabilities tweaks capabilities by adding, dropping, or overriding // capabilities in the basics capabilities list. func TweakCapabilities(basics, adds, drops []string, privileged bool) ([]string, error) { switch { case privileged: // Privileged containers get all capabilities return GetAllCapabilities(), nil case len(adds) == 0 && len(drops) == 0: // Nothing to tweak; we're done return basics, nil } capDrop, err := NormalizeLegacyCapabilities(drops) if err != nil { return nil, err } capAdd, err := NormalizeLegacyCapabilities(adds) if err != nil { return nil, err } var caps []string switch { case inSlice(capAdd, allCapabilities): // Add all capabilities except ones on capDrop for _, c := range GetAllCapabilities() { if !inSlice(capDrop, c) { caps = append(caps, c) } } case inSlice(capDrop, allCapabilities): // "Drop" all capabilities; use what's in capAdd instead caps = capAdd default: // First drop some capabilities for _, c := range basics { if !inSlice(capDrop, c) { caps = append(caps, c) } } // Then add the list of capabilities from capAdd caps = append(caps, capAdd...) } return caps, nil }
thaJeztah
52af46671691dfb76772cbf6bac0f688e464fb5d
e6a3313f162999e3ef84aacb8b68fee1057debe9
This will break dind (Docker 21 in Docker 20.10). I still think https://github.com/containerd/containerd/pull/5017 is the right approach.
AkihiroSuda
4,976
moby/moby
42,011
Revert "Temporarily disable CAP_PERFMON, CAP_BPF, and CAP_CHECKPOINT_RESTORE
fixes https://github.com/moby/moby/issues/42601 ## Revert "Temporarily disable CAP_PERFMON, CAP_BPF, and CAP_CHECKPOINT_RESTORE" Now that runc v1.0.0-rc93 is used, we can revert this temporary workaround This reverts commit a38b96b8cdc4d345a050b417c4c492b75329e5a6 (https://github.com/moby/moby/pull/41563). relates to: - https://github.com/moby/moby/issues/41562 [20.10 beta] Unknown capability "CAP_PERFMON" on Linux 5.8.14 - https://github.com/kubernetes-sigs/kind/issues/2058 [master] kube-proxy doesn't start up due to "apply caps: operation not permitted" error - https://github.com/opencontainers/runtime-spec/issues/1071 **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-11 20:48:38+00:00
2021-08-04 09:15:09+00:00
oci/caps/utils.go
package caps // import "github.com/docker/docker/oci/caps" import ( "fmt" "strings" "github.com/docker/docker/errdefs" "github.com/syndtr/gocapability/capability" ) var capabilityList Capabilities func init() { last := capability.CAP_LAST_CAP // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap if last == capability.Cap(63) { last = capability.CAP_BLOCK_SUSPEND } if last > capability.CAP_AUDIT_READ { // Prevents docker from setting CAP_PERFMON, CAP_BPF, and CAP_CHECKPOINT_RESTORE // capabilities on privileged (or CAP_ALL) containers on Kernel 5.8 and up. // While these kernels support these capabilities, the current release of // runc ships with an older version of /gocapability/capability, and does // not know about them, causing an error to be produced. // // FIXME remove once https://github.com/opencontainers/runc/commit/6dfbe9b80707b1ca188255e8def15263348e0f9a // is included in a runc release and once we stop supporting containerd 1.3.x // (which ships with runc v1.0.0-rc92) last = capability.CAP_AUDIT_READ } for _, cap := range capability.List() { if cap > last { continue } capabilityList = append(capabilityList, &CapabilityMapping{ Key: "CAP_" + strings.ToUpper(cap.String()), Value: cap, }, ) } } type ( // CapabilityMapping maps linux capability name to its value of capability.Cap type // Capabilities is one of the security systems in Linux Security Module (LSM) // framework provided by the kernel. // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html CapabilityMapping struct { Key string `json:"key,omitempty"` Value capability.Cap `json:"value,omitempty"` } // Capabilities contains all CapabilityMapping Capabilities []*CapabilityMapping ) // String returns <key> of CapabilityMapping func (c *CapabilityMapping) String() string { return c.Key } // GetAllCapabilities returns all of the capabilities func GetAllCapabilities() []string { output := make([]string, len(capabilityList)) for i, capability := range capabilityList { output[i] = capability.String() } return output } // inSlice tests whether a string is contained in a slice of strings or not. func inSlice(slice []string, s string) bool { for _, ss := range slice { if s == ss { return true } } return false } const allCapabilities = "ALL" // NormalizeLegacyCapabilities normalizes, and validates CapAdd/CapDrop capabilities // by upper-casing them, and adding a CAP_ prefix (if not yet present). // // This function also accepts the "ALL" magic-value, that's used by CapAdd/CapDrop. func NormalizeLegacyCapabilities(caps []string) ([]string, error) { var normalized []string valids := GetAllCapabilities() for _, c := range caps { c = strings.ToUpper(c) if c == allCapabilities { normalized = append(normalized, c) continue } if !strings.HasPrefix(c, "CAP_") { c = "CAP_" + c } if !inSlice(valids, c) { return nil, errdefs.InvalidParameter(fmt.Errorf("unknown capability: %q", c)) } normalized = append(normalized, c) } return normalized, nil } // TweakCapabilities tweaks capabilities by adding, dropping, or overriding // capabilities in the basics capabilities list. func TweakCapabilities(basics, adds, drops []string, privileged bool) ([]string, error) { switch { case privileged: // Privileged containers get all capabilities return GetAllCapabilities(), nil case len(adds) == 0 && len(drops) == 0: // Nothing to tweak; we're done return basics, nil } capDrop, err := NormalizeLegacyCapabilities(drops) if err != nil { return nil, err } capAdd, err := NormalizeLegacyCapabilities(adds) if err != nil { return nil, err } var caps []string switch { case inSlice(capAdd, allCapabilities): // Add all capabilities except ones on capDrop for _, c := range GetAllCapabilities() { if !inSlice(capDrop, c) { caps = append(caps, c) } } case inSlice(capDrop, allCapabilities): // "Drop" all capabilities; use what's in capAdd instead caps = capAdd default: // First drop some capabilities for _, c := range basics { if !inSlice(capDrop, c) { caps = append(caps, c) } } // Then add the list of capabilities from capAdd caps = append(caps, capAdd...) } return caps, nil }
package caps // import "github.com/docker/docker/oci/caps" import ( "fmt" "strings" "github.com/docker/docker/errdefs" "github.com/syndtr/gocapability/capability" ) var capabilityList Capabilities func init() { last := capability.CAP_LAST_CAP // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap if last == capability.Cap(63) { last = capability.CAP_BLOCK_SUSPEND } for _, cap := range capability.List() { if cap > last { continue } capabilityList = append(capabilityList, &CapabilityMapping{ Key: "CAP_" + strings.ToUpper(cap.String()), Value: cap, }, ) } } type ( // CapabilityMapping maps linux capability name to its value of capability.Cap type // Capabilities is one of the security systems in Linux Security Module (LSM) // framework provided by the kernel. // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html CapabilityMapping struct { Key string `json:"key,omitempty"` Value capability.Cap `json:"value,omitempty"` } // Capabilities contains all CapabilityMapping Capabilities []*CapabilityMapping ) // String returns <key> of CapabilityMapping func (c *CapabilityMapping) String() string { return c.Key } // GetAllCapabilities returns all of the capabilities func GetAllCapabilities() []string { output := make([]string, len(capabilityList)) for i, capability := range capabilityList { output[i] = capability.String() } return output } // inSlice tests whether a string is contained in a slice of strings or not. func inSlice(slice []string, s string) bool { for _, ss := range slice { if s == ss { return true } } return false } const allCapabilities = "ALL" // NormalizeLegacyCapabilities normalizes, and validates CapAdd/CapDrop capabilities // by upper-casing them, and adding a CAP_ prefix (if not yet present). // // This function also accepts the "ALL" magic-value, that's used by CapAdd/CapDrop. func NormalizeLegacyCapabilities(caps []string) ([]string, error) { var normalized []string valids := GetAllCapabilities() for _, c := range caps { c = strings.ToUpper(c) if c == allCapabilities { normalized = append(normalized, c) continue } if !strings.HasPrefix(c, "CAP_") { c = "CAP_" + c } if !inSlice(valids, c) { return nil, errdefs.InvalidParameter(fmt.Errorf("unknown capability: %q", c)) } normalized = append(normalized, c) } return normalized, nil } // TweakCapabilities tweaks capabilities by adding, dropping, or overriding // capabilities in the basics capabilities list. func TweakCapabilities(basics, adds, drops []string, privileged bool) ([]string, error) { switch { case privileged: // Privileged containers get all capabilities return GetAllCapabilities(), nil case len(adds) == 0 && len(drops) == 0: // Nothing to tweak; we're done return basics, nil } capDrop, err := NormalizeLegacyCapabilities(drops) if err != nil { return nil, err } capAdd, err := NormalizeLegacyCapabilities(adds) if err != nil { return nil, err } var caps []string switch { case inSlice(capAdd, allCapabilities): // Add all capabilities except ones on capDrop for _, c := range GetAllCapabilities() { if !inSlice(capDrop, c) { caps = append(caps, c) } } case inSlice(capDrop, allCapabilities): // "Drop" all capabilities; use what's in capAdd instead caps = capAdd default: // First drop some capabilities for _, c := range basics { if !inSlice(capDrop, c) { caps = append(caps, c) } } // Then add the list of capabilities from capAdd caps = append(caps, capAdd...) } return caps, nil }
thaJeztah
52af46671691dfb76772cbf6bac0f688e464fb5d
e6a3313f162999e3ef84aacb8b68fee1057debe9
(Might be fine for runc >= rc94, though, as runc >= rc94 does not raise error on unknown caps https://github.com/opencontainers/runc/pull/2854)
AkihiroSuda
4,977
moby/moby
42,004
Fix userns-remap option when username & UID match
Signed-off-by: Grant Millar <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Fixed #42003 by only matching UID or username. **- How I did it** Check `subuidRangesWithUID` first then `subuidRangesWithUserName`. **- How to verify it** 1. `echo "10426:x:10426:10426::/nonexistent:/bin/false" >> /etc/passwd` 2. `echo "10426:x:10426:" >> /etc/group` 3. `echo "10426:10426000:10000" >> /etc/subuid` 4. `echo "10426:10426000:10000" >> /etc/subgid` 5. `dockerd --userns-remap=10426 &` 6. `docker run hello-world` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fix userns-remap option when username & UID match **- A picture of a cute animal (not mandatory but encouraged)** ![06-beaver-attack](https://user-images.githubusercontent.com/3407496/107393024-33e1d480-6af2-11eb-9fdd-ab74acf03985.jpg)
null
2021-02-09 16:17:29+00:00
2021-02-11 19:55:47+00:00
pkg/idtools/idtools_unix.go
// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bytes" "fmt" "io" "os" "path/filepath" "strconv" "sync" "syscall" "github.com/docker/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/user" "github.com/pkg/errors" ) var ( entOnce sync.Once getentCmd string ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists var paths []string stat, err := system.Stat(path) if err == nil { if !stat.IsDir() { return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} } if !chownExisting { return nil } // short-circuit--we were called with an existing directory and chown was requested return setPermissions(path, mode, owner.UID, owner.GID, stat) } if os.IsNotExist(err) { paths = []string{path} } if mkAll { // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { paths = append(paths, dirPath) } } if err := system.MkdirAll(path, mode); err != nil { return err } } else { if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { return err } } // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil { return err } } return nil } // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory func CanAccess(path string, pair Identity) bool { statInfo, err := system.Stat(path) if err != nil { return false } fileMode := os.FileMode(statInfo.Mode()) permBits := fileMode.Perm() return accessible(statInfo.UID() == uint32(pair.UID), statInfo.GID() == uint32(pair.GID), permBits) } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { if isOwner && (perms&0100 == 0100) { return true } if isGroup && (perms&0010 == 0010) { return true } if perms&0001 == 0001 { return true } return false } // LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUser(name string) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUser(name) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs usr, err = getentUser(name) if err != nil { return user.User{}, err } return usr, nil } // LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUID(uid int) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUid(uid) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs return getentUser(strconv.Itoa(uid)) } func getentUser(name string) (user.User, error) { reader, err := callGetent("passwd", name) if err != nil { return user.User{}, err } users, err := user.ParsePasswd(reader) if err != nil { return user.User{}, err } if len(users) == 0 { return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) } return users[0], nil } // LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGroup(name string) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGroup(name) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(name) } // LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGID(gid int) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGid(gid) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(strconv.Itoa(gid)) } func getentGroup(name string) (user.Group, error) { reader, err := callGetent("group", name) if err != nil { return user.Group{}, err } groups, err := user.ParseGroup(reader) if err != nil { return user.Group{}, err } if len(groups) == 0 { return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) } return groups[0], nil } func callGetent(database, key string) (io.Reader, error) { entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) // if no `getent` command on host, can't do anything else if getentCmd == "" { return nil, fmt.Errorf("unable to find getent command") } out, err := execCmd(getentCmd, database, key) if err != nil { exitCode, errC := system.GetExitCode(err) if errC != nil { return nil, err } switch exitCode { case 1: return nil, fmt.Errorf("getent reported invalid parameters/database unknown") case 2: return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) case 3: return nil, fmt.Errorf("getent database doesn't support enumeration") default: return nil, err } } return bytes.NewReader(out), nil } // setPermissions performs a chown/chmod only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. // Likewise for setting permissions. func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error { if stat == nil { var err error stat, err = system.Stat(p) if err != nil { return err } } if os.FileMode(stat.Mode()).Perm() != mode.Perm() { if err := os.Chmod(p, mode.Perm()); err != nil { return err } } if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil } return os.Chown(p, uid, gid) } // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair func NewIdentityMapping(name string) (*IdentityMapping, error) { usr, err := LookupUser(name) if err != nil { return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) } uid := strconv.Itoa(usr.Uid) subuidRangesWithUserName, err := parseSubuid(name) if err != nil { return nil, err } subgidRangesWithUserName, err := parseSubgid(name) if err != nil { return nil, err } subuidRangesWithUID, err := parseSubuid(uid) if err != nil { return nil, err } subgidRangesWithUID, err := parseSubgid(uid) if err != nil { return nil, err } subuidRanges := append(subuidRangesWithUserName, subuidRangesWithUID...) subgidRanges := append(subgidRangesWithUserName, subgidRangesWithUID...) if len(subuidRanges) == 0 { return nil, errors.Errorf("no subuid ranges found for user %q", name) } if len(subgidRanges) == 0 { return nil, errors.Errorf("no subgid ranges found for user %q", name) } return &IdentityMapping{ uids: createIDMap(subuidRanges), gids: createIDMap(subgidRanges), }, nil }
// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bytes" "fmt" "io" "os" "path/filepath" "strconv" "sync" "syscall" "github.com/docker/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/user" "github.com/pkg/errors" ) var ( entOnce sync.Once getentCmd string ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists var paths []string stat, err := system.Stat(path) if err == nil { if !stat.IsDir() { return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} } if !chownExisting { return nil } // short-circuit--we were called with an existing directory and chown was requested return setPermissions(path, mode, owner.UID, owner.GID, stat) } if os.IsNotExist(err) { paths = []string{path} } if mkAll { // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { paths = append(paths, dirPath) } } if err := system.MkdirAll(path, mode); err != nil { return err } } else { if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { return err } } // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil { return err } } return nil } // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory func CanAccess(path string, pair Identity) bool { statInfo, err := system.Stat(path) if err != nil { return false } fileMode := os.FileMode(statInfo.Mode()) permBits := fileMode.Perm() return accessible(statInfo.UID() == uint32(pair.UID), statInfo.GID() == uint32(pair.GID), permBits) } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { if isOwner && (perms&0100 == 0100) { return true } if isGroup && (perms&0010 == 0010) { return true } if perms&0001 == 0001 { return true } return false } // LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUser(name string) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUser(name) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs usr, err = getentUser(name) if err != nil { return user.User{}, err } return usr, nil } // LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUID(uid int) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUid(uid) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs return getentUser(strconv.Itoa(uid)) } func getentUser(name string) (user.User, error) { reader, err := callGetent("passwd", name) if err != nil { return user.User{}, err } users, err := user.ParsePasswd(reader) if err != nil { return user.User{}, err } if len(users) == 0 { return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) } return users[0], nil } // LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGroup(name string) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGroup(name) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(name) } // LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGID(gid int) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGid(gid) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(strconv.Itoa(gid)) } func getentGroup(name string) (user.Group, error) { reader, err := callGetent("group", name) if err != nil { return user.Group{}, err } groups, err := user.ParseGroup(reader) if err != nil { return user.Group{}, err } if len(groups) == 0 { return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) } return groups[0], nil } func callGetent(database, key string) (io.Reader, error) { entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) // if no `getent` command on host, can't do anything else if getentCmd == "" { return nil, fmt.Errorf("unable to find getent command") } out, err := execCmd(getentCmd, database, key) if err != nil { exitCode, errC := system.GetExitCode(err) if errC != nil { return nil, err } switch exitCode { case 1: return nil, fmt.Errorf("getent reported invalid parameters/database unknown") case 2: return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) case 3: return nil, fmt.Errorf("getent database doesn't support enumeration") default: return nil, err } } return bytes.NewReader(out), nil } // setPermissions performs a chown/chmod only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. // Likewise for setting permissions. func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error { if stat == nil { var err error stat, err = system.Stat(p) if err != nil { return err } } if os.FileMode(stat.Mode()).Perm() != mode.Perm() { if err := os.Chmod(p, mode.Perm()); err != nil { return err } } if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil } return os.Chown(p, uid, gid) } // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair func NewIdentityMapping(name string) (*IdentityMapping, error) { usr, err := LookupUser(name) if err != nil { return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) } subuidRanges, err := lookupSubUIDRanges(usr) if err != nil { return nil, err } subgidRanges, err := lookupSubGIDRanges(usr) if err != nil { return nil, err } return &IdentityMapping{ uids: subuidRanges, gids: subgidRanges, }, nil } func lookupSubUIDRanges(usr user.User) ([]IDMap, error) { rangeList, err := parseSubuid(strconv.Itoa(usr.Uid)) if err != nil { return nil, err } if len(rangeList) == 0 { rangeList, err = parseSubuid(usr.Name) if err != nil { return nil, err } } if len(rangeList) == 0 { return nil, errors.Errorf("no subuid ranges found for user %q", usr.Name) } return createIDMap(rangeList), nil } func lookupSubGIDRanges(usr user.User) ([]IDMap, error) { rangeList, err := parseSubgid(strconv.Itoa(usr.Uid)) if err != nil { return nil, err } if len(rangeList) == 0 { rangeList, err = parseSubgid(usr.Name) if err != nil { return nil, err } } if len(rangeList) == 0 { return nil, errors.Errorf("no subgid ranges found for user %q", usr.Name) } return createIDMap(rangeList), nil }
Rid
93ab21a193d726ab622f8b66f672774593c059a3
c9bbc68e75f224c4490ff6e3121be50a612edc22
Actually wondering why we're looking up both; perhaps instead we should return early (try lookup sub-uids by UID -> if error, or no results -> lookup by Name -> repeat for sub-gids). That said; ISTR there was some discussion about wether "user-name" or "user-id" takes precedence (user-name should probably not be able to take precedence (i.e., a user _named_ `0` should not be able to "trump" `root`), but linux disagreed on that (some discussion on https://github.com/moby/moby/issues/21436, https://github.com/opencontainers/runc/issues/695, and https://github.com/opencontainers/runc/commit/0a5293fa4ee5b38d1eeaa041754fd01f15960ad9 (https://github.com/opencontainers/runc/pull/708))
thaJeztah
4,978
moby/moby
42,004
Fix userns-remap option when username & UID match
Signed-off-by: Grant Millar <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Fixed #42003 by only matching UID or username. **- How I did it** Check `subuidRangesWithUID` first then `subuidRangesWithUserName`. **- How to verify it** 1. `echo "10426:x:10426:10426::/nonexistent:/bin/false" >> /etc/passwd` 2. `echo "10426:x:10426:" >> /etc/group` 3. `echo "10426:10426000:10000" >> /etc/subuid` 4. `echo "10426:10426000:10000" >> /etc/subgid` 5. `dockerd --userns-remap=10426 &` 6. `docker run hello-world` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fix userns-remap option when username & UID match **- A picture of a cute animal (not mandatory but encouraged)** ![06-beaver-attack](https://user-images.githubusercontent.com/3407496/107393024-33e1d480-6af2-11eb-9fdd-ab74acf03985.jpg)
null
2021-02-09 16:17:29+00:00
2021-02-11 19:55:47+00:00
pkg/idtools/idtools_unix.go
// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bytes" "fmt" "io" "os" "path/filepath" "strconv" "sync" "syscall" "github.com/docker/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/user" "github.com/pkg/errors" ) var ( entOnce sync.Once getentCmd string ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists var paths []string stat, err := system.Stat(path) if err == nil { if !stat.IsDir() { return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} } if !chownExisting { return nil } // short-circuit--we were called with an existing directory and chown was requested return setPermissions(path, mode, owner.UID, owner.GID, stat) } if os.IsNotExist(err) { paths = []string{path} } if mkAll { // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { paths = append(paths, dirPath) } } if err := system.MkdirAll(path, mode); err != nil { return err } } else { if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { return err } } // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil { return err } } return nil } // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory func CanAccess(path string, pair Identity) bool { statInfo, err := system.Stat(path) if err != nil { return false } fileMode := os.FileMode(statInfo.Mode()) permBits := fileMode.Perm() return accessible(statInfo.UID() == uint32(pair.UID), statInfo.GID() == uint32(pair.GID), permBits) } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { if isOwner && (perms&0100 == 0100) { return true } if isGroup && (perms&0010 == 0010) { return true } if perms&0001 == 0001 { return true } return false } // LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUser(name string) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUser(name) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs usr, err = getentUser(name) if err != nil { return user.User{}, err } return usr, nil } // LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUID(uid int) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUid(uid) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs return getentUser(strconv.Itoa(uid)) } func getentUser(name string) (user.User, error) { reader, err := callGetent("passwd", name) if err != nil { return user.User{}, err } users, err := user.ParsePasswd(reader) if err != nil { return user.User{}, err } if len(users) == 0 { return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) } return users[0], nil } // LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGroup(name string) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGroup(name) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(name) } // LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGID(gid int) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGid(gid) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(strconv.Itoa(gid)) } func getentGroup(name string) (user.Group, error) { reader, err := callGetent("group", name) if err != nil { return user.Group{}, err } groups, err := user.ParseGroup(reader) if err != nil { return user.Group{}, err } if len(groups) == 0 { return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) } return groups[0], nil } func callGetent(database, key string) (io.Reader, error) { entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) // if no `getent` command on host, can't do anything else if getentCmd == "" { return nil, fmt.Errorf("unable to find getent command") } out, err := execCmd(getentCmd, database, key) if err != nil { exitCode, errC := system.GetExitCode(err) if errC != nil { return nil, err } switch exitCode { case 1: return nil, fmt.Errorf("getent reported invalid parameters/database unknown") case 2: return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) case 3: return nil, fmt.Errorf("getent database doesn't support enumeration") default: return nil, err } } return bytes.NewReader(out), nil } // setPermissions performs a chown/chmod only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. // Likewise for setting permissions. func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error { if stat == nil { var err error stat, err = system.Stat(p) if err != nil { return err } } if os.FileMode(stat.Mode()).Perm() != mode.Perm() { if err := os.Chmod(p, mode.Perm()); err != nil { return err } } if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil } return os.Chown(p, uid, gid) } // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair func NewIdentityMapping(name string) (*IdentityMapping, error) { usr, err := LookupUser(name) if err != nil { return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) } uid := strconv.Itoa(usr.Uid) subuidRangesWithUserName, err := parseSubuid(name) if err != nil { return nil, err } subgidRangesWithUserName, err := parseSubgid(name) if err != nil { return nil, err } subuidRangesWithUID, err := parseSubuid(uid) if err != nil { return nil, err } subgidRangesWithUID, err := parseSubgid(uid) if err != nil { return nil, err } subuidRanges := append(subuidRangesWithUserName, subuidRangesWithUID...) subgidRanges := append(subgidRangesWithUserName, subgidRangesWithUID...) if len(subuidRanges) == 0 { return nil, errors.Errorf("no subuid ranges found for user %q", name) } if len(subgidRanges) == 0 { return nil, errors.Errorf("no subgid ranges found for user %q", name) } return &IdentityMapping{ uids: createIDMap(subuidRanges), gids: createIDMap(subgidRanges), }, nil }
// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bytes" "fmt" "io" "os" "path/filepath" "strconv" "sync" "syscall" "github.com/docker/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/user" "github.com/pkg/errors" ) var ( entOnce sync.Once getentCmd string ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists var paths []string stat, err := system.Stat(path) if err == nil { if !stat.IsDir() { return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} } if !chownExisting { return nil } // short-circuit--we were called with an existing directory and chown was requested return setPermissions(path, mode, owner.UID, owner.GID, stat) } if os.IsNotExist(err) { paths = []string{path} } if mkAll { // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { paths = append(paths, dirPath) } } if err := system.MkdirAll(path, mode); err != nil { return err } } else { if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { return err } } // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil { return err } } return nil } // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory func CanAccess(path string, pair Identity) bool { statInfo, err := system.Stat(path) if err != nil { return false } fileMode := os.FileMode(statInfo.Mode()) permBits := fileMode.Perm() return accessible(statInfo.UID() == uint32(pair.UID), statInfo.GID() == uint32(pair.GID), permBits) } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { if isOwner && (perms&0100 == 0100) { return true } if isGroup && (perms&0010 == 0010) { return true } if perms&0001 == 0001 { return true } return false } // LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUser(name string) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUser(name) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs usr, err = getentUser(name) if err != nil { return user.User{}, err } return usr, nil } // LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUID(uid int) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUid(uid) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs return getentUser(strconv.Itoa(uid)) } func getentUser(name string) (user.User, error) { reader, err := callGetent("passwd", name) if err != nil { return user.User{}, err } users, err := user.ParsePasswd(reader) if err != nil { return user.User{}, err } if len(users) == 0 { return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) } return users[0], nil } // LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGroup(name string) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGroup(name) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(name) } // LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGID(gid int) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGid(gid) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(strconv.Itoa(gid)) } func getentGroup(name string) (user.Group, error) { reader, err := callGetent("group", name) if err != nil { return user.Group{}, err } groups, err := user.ParseGroup(reader) if err != nil { return user.Group{}, err } if len(groups) == 0 { return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) } return groups[0], nil } func callGetent(database, key string) (io.Reader, error) { entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) // if no `getent` command on host, can't do anything else if getentCmd == "" { return nil, fmt.Errorf("unable to find getent command") } out, err := execCmd(getentCmd, database, key) if err != nil { exitCode, errC := system.GetExitCode(err) if errC != nil { return nil, err } switch exitCode { case 1: return nil, fmt.Errorf("getent reported invalid parameters/database unknown") case 2: return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) case 3: return nil, fmt.Errorf("getent database doesn't support enumeration") default: return nil, err } } return bytes.NewReader(out), nil } // setPermissions performs a chown/chmod only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. // Likewise for setting permissions. func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error { if stat == nil { var err error stat, err = system.Stat(p) if err != nil { return err } } if os.FileMode(stat.Mode()).Perm() != mode.Perm() { if err := os.Chmod(p, mode.Perm()); err != nil { return err } } if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil } return os.Chown(p, uid, gid) } // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair func NewIdentityMapping(name string) (*IdentityMapping, error) { usr, err := LookupUser(name) if err != nil { return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) } subuidRanges, err := lookupSubUIDRanges(usr) if err != nil { return nil, err } subgidRanges, err := lookupSubGIDRanges(usr) if err != nil { return nil, err } return &IdentityMapping{ uids: subuidRanges, gids: subgidRanges, }, nil } func lookupSubUIDRanges(usr user.User) ([]IDMap, error) { rangeList, err := parseSubuid(strconv.Itoa(usr.Uid)) if err != nil { return nil, err } if len(rangeList) == 0 { rangeList, err = parseSubuid(usr.Name) if err != nil { return nil, err } } if len(rangeList) == 0 { return nil, errors.Errorf("no subuid ranges found for user %q", usr.Name) } return createIDMap(rangeList), nil } func lookupSubGIDRanges(usr user.User) ([]IDMap, error) { rangeList, err := parseSubgid(strconv.Itoa(usr.Uid)) if err != nil { return nil, err } if len(rangeList) == 0 { rangeList, err = parseSubgid(usr.Name) if err != nil { return nil, err } } if len(rangeList) == 0 { return nil, errors.Errorf("no subgid ranges found for user %q", usr.Name) } return createIDMap(rangeList), nil }
Rid
93ab21a193d726ab622f8b66f672774593c059a3
c9bbc68e75f224c4490ff6e3121be50a612edc22
Did a quick try at what it _could_ look like (feedback welcome) earlier; ```go // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair func NewIdentityMapping(name string) (*IdentityMapping, error) { usr, err := LookupUser(name) if err != nil { return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) } subuidRanges, err := lookupSubUIDRanges(usr) if err != nil { return nil, err } subgidRanges, err := lookupSubGIDRanges(usr) if err != nil { return nil, err } return &IdentityMapping{ uids: subuidRanges, gids: subgidRanges, }, nil } func lookupSubUIDRanges(usr user.User) ([]IDMap, error) { r, err := parseSubuid(strconv.Itoa(usr.Uid)) if err != nil { return nil, err } if len(r) == 0 { r, err = parseSubuid(usr.Name) if err != nil { return nil, err } } if len(r) == 0 { return nil, errors.Errorf("no subuid ranges found for user %q", usr.Name) } return createIDMap(r), nil } func lookupSubGIDRanges(usr user.User) ([]IDMap, error) { r, err := parseSubgid(strconv.Itoa(usr.Uid)) if err != nil { return nil, err } if len(r) == 0 { r, err = parseSubgid(usr.Name) if err != nil { return nil, err } } if len(r) == 0 { return nil, errors.Errorf("no subgid ranges found for user %q", usr.Name) } return createIDMap(r), nil } ```
thaJeztah
4,979
moby/moby
42,004
Fix userns-remap option when username & UID match
Signed-off-by: Grant Millar <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Fixed #42003 by only matching UID or username. **- How I did it** Check `subuidRangesWithUID` first then `subuidRangesWithUserName`. **- How to verify it** 1. `echo "10426:x:10426:10426::/nonexistent:/bin/false" >> /etc/passwd` 2. `echo "10426:x:10426:" >> /etc/group` 3. `echo "10426:10426000:10000" >> /etc/subuid` 4. `echo "10426:10426000:10000" >> /etc/subgid` 5. `dockerd --userns-remap=10426 &` 6. `docker run hello-world` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fix userns-remap option when username & UID match **- A picture of a cute animal (not mandatory but encouraged)** ![06-beaver-attack](https://user-images.githubusercontent.com/3407496/107393024-33e1d480-6af2-11eb-9fdd-ab74acf03985.jpg)
null
2021-02-09 16:17:29+00:00
2021-02-11 19:55:47+00:00
pkg/idtools/idtools_unix.go
// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bytes" "fmt" "io" "os" "path/filepath" "strconv" "sync" "syscall" "github.com/docker/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/user" "github.com/pkg/errors" ) var ( entOnce sync.Once getentCmd string ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists var paths []string stat, err := system.Stat(path) if err == nil { if !stat.IsDir() { return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} } if !chownExisting { return nil } // short-circuit--we were called with an existing directory and chown was requested return setPermissions(path, mode, owner.UID, owner.GID, stat) } if os.IsNotExist(err) { paths = []string{path} } if mkAll { // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { paths = append(paths, dirPath) } } if err := system.MkdirAll(path, mode); err != nil { return err } } else { if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { return err } } // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil { return err } } return nil } // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory func CanAccess(path string, pair Identity) bool { statInfo, err := system.Stat(path) if err != nil { return false } fileMode := os.FileMode(statInfo.Mode()) permBits := fileMode.Perm() return accessible(statInfo.UID() == uint32(pair.UID), statInfo.GID() == uint32(pair.GID), permBits) } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { if isOwner && (perms&0100 == 0100) { return true } if isGroup && (perms&0010 == 0010) { return true } if perms&0001 == 0001 { return true } return false } // LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUser(name string) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUser(name) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs usr, err = getentUser(name) if err != nil { return user.User{}, err } return usr, nil } // LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUID(uid int) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUid(uid) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs return getentUser(strconv.Itoa(uid)) } func getentUser(name string) (user.User, error) { reader, err := callGetent("passwd", name) if err != nil { return user.User{}, err } users, err := user.ParsePasswd(reader) if err != nil { return user.User{}, err } if len(users) == 0 { return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) } return users[0], nil } // LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGroup(name string) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGroup(name) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(name) } // LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGID(gid int) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGid(gid) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(strconv.Itoa(gid)) } func getentGroup(name string) (user.Group, error) { reader, err := callGetent("group", name) if err != nil { return user.Group{}, err } groups, err := user.ParseGroup(reader) if err != nil { return user.Group{}, err } if len(groups) == 0 { return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) } return groups[0], nil } func callGetent(database, key string) (io.Reader, error) { entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) // if no `getent` command on host, can't do anything else if getentCmd == "" { return nil, fmt.Errorf("unable to find getent command") } out, err := execCmd(getentCmd, database, key) if err != nil { exitCode, errC := system.GetExitCode(err) if errC != nil { return nil, err } switch exitCode { case 1: return nil, fmt.Errorf("getent reported invalid parameters/database unknown") case 2: return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) case 3: return nil, fmt.Errorf("getent database doesn't support enumeration") default: return nil, err } } return bytes.NewReader(out), nil } // setPermissions performs a chown/chmod only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. // Likewise for setting permissions. func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error { if stat == nil { var err error stat, err = system.Stat(p) if err != nil { return err } } if os.FileMode(stat.Mode()).Perm() != mode.Perm() { if err := os.Chmod(p, mode.Perm()); err != nil { return err } } if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil } return os.Chown(p, uid, gid) } // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair func NewIdentityMapping(name string) (*IdentityMapping, error) { usr, err := LookupUser(name) if err != nil { return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) } uid := strconv.Itoa(usr.Uid) subuidRangesWithUserName, err := parseSubuid(name) if err != nil { return nil, err } subgidRangesWithUserName, err := parseSubgid(name) if err != nil { return nil, err } subuidRangesWithUID, err := parseSubuid(uid) if err != nil { return nil, err } subgidRangesWithUID, err := parseSubgid(uid) if err != nil { return nil, err } subuidRanges := append(subuidRangesWithUserName, subuidRangesWithUID...) subgidRanges := append(subgidRangesWithUserName, subgidRangesWithUID...) if len(subuidRanges) == 0 { return nil, errors.Errorf("no subuid ranges found for user %q", name) } if len(subgidRanges) == 0 { return nil, errors.Errorf("no subgid ranges found for user %q", name) } return &IdentityMapping{ uids: createIDMap(subuidRanges), gids: createIDMap(subgidRanges), }, nil }
// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bytes" "fmt" "io" "os" "path/filepath" "strconv" "sync" "syscall" "github.com/docker/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/user" "github.com/pkg/errors" ) var ( entOnce sync.Once getentCmd string ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists var paths []string stat, err := system.Stat(path) if err == nil { if !stat.IsDir() { return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} } if !chownExisting { return nil } // short-circuit--we were called with an existing directory and chown was requested return setPermissions(path, mode, owner.UID, owner.GID, stat) } if os.IsNotExist(err) { paths = []string{path} } if mkAll { // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { paths = append(paths, dirPath) } } if err := system.MkdirAll(path, mode); err != nil { return err } } else { if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { return err } } // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil { return err } } return nil } // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory func CanAccess(path string, pair Identity) bool { statInfo, err := system.Stat(path) if err != nil { return false } fileMode := os.FileMode(statInfo.Mode()) permBits := fileMode.Perm() return accessible(statInfo.UID() == uint32(pair.UID), statInfo.GID() == uint32(pair.GID), permBits) } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { if isOwner && (perms&0100 == 0100) { return true } if isGroup && (perms&0010 == 0010) { return true } if perms&0001 == 0001 { return true } return false } // LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUser(name string) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUser(name) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs usr, err = getentUser(name) if err != nil { return user.User{}, err } return usr, nil } // LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUID(uid int) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUid(uid) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs return getentUser(strconv.Itoa(uid)) } func getentUser(name string) (user.User, error) { reader, err := callGetent("passwd", name) if err != nil { return user.User{}, err } users, err := user.ParsePasswd(reader) if err != nil { return user.User{}, err } if len(users) == 0 { return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) } return users[0], nil } // LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGroup(name string) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGroup(name) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(name) } // LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGID(gid int) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGid(gid) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(strconv.Itoa(gid)) } func getentGroup(name string) (user.Group, error) { reader, err := callGetent("group", name) if err != nil { return user.Group{}, err } groups, err := user.ParseGroup(reader) if err != nil { return user.Group{}, err } if len(groups) == 0 { return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) } return groups[0], nil } func callGetent(database, key string) (io.Reader, error) { entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) // if no `getent` command on host, can't do anything else if getentCmd == "" { return nil, fmt.Errorf("unable to find getent command") } out, err := execCmd(getentCmd, database, key) if err != nil { exitCode, errC := system.GetExitCode(err) if errC != nil { return nil, err } switch exitCode { case 1: return nil, fmt.Errorf("getent reported invalid parameters/database unknown") case 2: return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) case 3: return nil, fmt.Errorf("getent database doesn't support enumeration") default: return nil, err } } return bytes.NewReader(out), nil } // setPermissions performs a chown/chmod only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. // Likewise for setting permissions. func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error { if stat == nil { var err error stat, err = system.Stat(p) if err != nil { return err } } if os.FileMode(stat.Mode()).Perm() != mode.Perm() { if err := os.Chmod(p, mode.Perm()); err != nil { return err } } if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil } return os.Chown(p, uid, gid) } // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair func NewIdentityMapping(name string) (*IdentityMapping, error) { usr, err := LookupUser(name) if err != nil { return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) } subuidRanges, err := lookupSubUIDRanges(usr) if err != nil { return nil, err } subgidRanges, err := lookupSubGIDRanges(usr) if err != nil { return nil, err } return &IdentityMapping{ uids: subuidRanges, gids: subgidRanges, }, nil } func lookupSubUIDRanges(usr user.User) ([]IDMap, error) { rangeList, err := parseSubuid(strconv.Itoa(usr.Uid)) if err != nil { return nil, err } if len(rangeList) == 0 { rangeList, err = parseSubuid(usr.Name) if err != nil { return nil, err } } if len(rangeList) == 0 { return nil, errors.Errorf("no subuid ranges found for user %q", usr.Name) } return createIDMap(rangeList), nil } func lookupSubGIDRanges(usr user.User) ([]IDMap, error) { rangeList, err := parseSubgid(strconv.Itoa(usr.Uid)) if err != nil { return nil, err } if len(rangeList) == 0 { rangeList, err = parseSubgid(usr.Name) if err != nil { return nil, err } } if len(rangeList) == 0 { return nil, errors.Errorf("no subgid ranges found for user %q", usr.Name) } return createIDMap(rangeList), nil }
Rid
93ab21a193d726ab622f8b66f672774593c059a3
c9bbc68e75f224c4490ff6e3121be50a612edc22
I think it makes sense for UIDs to take precedence, as 99% of the time a numerical value is going to be a UID rather than a username. I was thinking originally to implement separate functions, I wasn't sure if it was too much boilerplate but semantically it looks better to me. Do you want me to update the commit with your implementation?
Rid
4,980
moby/moby
42,004
Fix userns-remap option when username & UID match
Signed-off-by: Grant Millar <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Fixed #42003 by only matching UID or username. **- How I did it** Check `subuidRangesWithUID` first then `subuidRangesWithUserName`. **- How to verify it** 1. `echo "10426:x:10426:10426::/nonexistent:/bin/false" >> /etc/passwd` 2. `echo "10426:x:10426:" >> /etc/group` 3. `echo "10426:10426000:10000" >> /etc/subuid` 4. `echo "10426:10426000:10000" >> /etc/subgid` 5. `dockerd --userns-remap=10426 &` 6. `docker run hello-world` **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> Fix userns-remap option when username & UID match **- A picture of a cute animal (not mandatory but encouraged)** ![06-beaver-attack](https://user-images.githubusercontent.com/3407496/107393024-33e1d480-6af2-11eb-9fdd-ab74acf03985.jpg)
null
2021-02-09 16:17:29+00:00
2021-02-11 19:55:47+00:00
pkg/idtools/idtools_unix.go
// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bytes" "fmt" "io" "os" "path/filepath" "strconv" "sync" "syscall" "github.com/docker/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/user" "github.com/pkg/errors" ) var ( entOnce sync.Once getentCmd string ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists var paths []string stat, err := system.Stat(path) if err == nil { if !stat.IsDir() { return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} } if !chownExisting { return nil } // short-circuit--we were called with an existing directory and chown was requested return setPermissions(path, mode, owner.UID, owner.GID, stat) } if os.IsNotExist(err) { paths = []string{path} } if mkAll { // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { paths = append(paths, dirPath) } } if err := system.MkdirAll(path, mode); err != nil { return err } } else { if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { return err } } // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil { return err } } return nil } // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory func CanAccess(path string, pair Identity) bool { statInfo, err := system.Stat(path) if err != nil { return false } fileMode := os.FileMode(statInfo.Mode()) permBits := fileMode.Perm() return accessible(statInfo.UID() == uint32(pair.UID), statInfo.GID() == uint32(pair.GID), permBits) } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { if isOwner && (perms&0100 == 0100) { return true } if isGroup && (perms&0010 == 0010) { return true } if perms&0001 == 0001 { return true } return false } // LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUser(name string) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUser(name) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs usr, err = getentUser(name) if err != nil { return user.User{}, err } return usr, nil } // LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUID(uid int) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUid(uid) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs return getentUser(strconv.Itoa(uid)) } func getentUser(name string) (user.User, error) { reader, err := callGetent("passwd", name) if err != nil { return user.User{}, err } users, err := user.ParsePasswd(reader) if err != nil { return user.User{}, err } if len(users) == 0 { return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) } return users[0], nil } // LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGroup(name string) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGroup(name) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(name) } // LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGID(gid int) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGid(gid) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(strconv.Itoa(gid)) } func getentGroup(name string) (user.Group, error) { reader, err := callGetent("group", name) if err != nil { return user.Group{}, err } groups, err := user.ParseGroup(reader) if err != nil { return user.Group{}, err } if len(groups) == 0 { return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) } return groups[0], nil } func callGetent(database, key string) (io.Reader, error) { entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) // if no `getent` command on host, can't do anything else if getentCmd == "" { return nil, fmt.Errorf("unable to find getent command") } out, err := execCmd(getentCmd, database, key) if err != nil { exitCode, errC := system.GetExitCode(err) if errC != nil { return nil, err } switch exitCode { case 1: return nil, fmt.Errorf("getent reported invalid parameters/database unknown") case 2: return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) case 3: return nil, fmt.Errorf("getent database doesn't support enumeration") default: return nil, err } } return bytes.NewReader(out), nil } // setPermissions performs a chown/chmod only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. // Likewise for setting permissions. func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error { if stat == nil { var err error stat, err = system.Stat(p) if err != nil { return err } } if os.FileMode(stat.Mode()).Perm() != mode.Perm() { if err := os.Chmod(p, mode.Perm()); err != nil { return err } } if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil } return os.Chown(p, uid, gid) } // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair func NewIdentityMapping(name string) (*IdentityMapping, error) { usr, err := LookupUser(name) if err != nil { return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) } uid := strconv.Itoa(usr.Uid) subuidRangesWithUserName, err := parseSubuid(name) if err != nil { return nil, err } subgidRangesWithUserName, err := parseSubgid(name) if err != nil { return nil, err } subuidRangesWithUID, err := parseSubuid(uid) if err != nil { return nil, err } subgidRangesWithUID, err := parseSubgid(uid) if err != nil { return nil, err } subuidRanges := append(subuidRangesWithUserName, subuidRangesWithUID...) subgidRanges := append(subgidRangesWithUserName, subgidRangesWithUID...) if len(subuidRanges) == 0 { return nil, errors.Errorf("no subuid ranges found for user %q", name) } if len(subgidRanges) == 0 { return nil, errors.Errorf("no subgid ranges found for user %q", name) } return &IdentityMapping{ uids: createIDMap(subuidRanges), gids: createIDMap(subgidRanges), }, nil }
// +build !windows package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bytes" "fmt" "io" "os" "path/filepath" "strconv" "sync" "syscall" "github.com/docker/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/user" "github.com/pkg/errors" ) var ( entOnce sync.Once getentCmd string ) func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists var paths []string stat, err := system.Stat(path) if err == nil { if !stat.IsDir() { return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} } if !chownExisting { return nil } // short-circuit--we were called with an existing directory and chown was requested return setPermissions(path, mode, owner.UID, owner.GID, stat) } if os.IsNotExist(err) { paths = []string{path} } if mkAll { // walk back to "/" looking for directories which do not exist // and add them to the paths array for chown after creation dirPath := path for { dirPath = filepath.Dir(dirPath) if dirPath == "/" { break } if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { paths = append(paths, dirPath) } } if err := system.MkdirAll(path, mode); err != nil { return err } } else { if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { return err } } // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil { return err } } return nil } // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory func CanAccess(path string, pair Identity) bool { statInfo, err := system.Stat(path) if err != nil { return false } fileMode := os.FileMode(statInfo.Mode()) permBits := fileMode.Perm() return accessible(statInfo.UID() == uint32(pair.UID), statInfo.GID() == uint32(pair.GID), permBits) } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { if isOwner && (perms&0100 == 0100) { return true } if isGroup && (perms&0010 == 0010) { return true } if perms&0001 == 0001 { return true } return false } // LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUser(name string) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUser(name) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs usr, err = getentUser(name) if err != nil { return user.User{}, err } return usr, nil } // LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupUID(uid int) (user.User, error) { // first try a local system files lookup using existing capabilities usr, err := user.LookupUid(uid) if err == nil { return usr, nil } // local files lookup failed; attempt to call `getent` to query configured passwd dbs return getentUser(strconv.Itoa(uid)) } func getentUser(name string) (user.User, error) { reader, err := callGetent("passwd", name) if err != nil { return user.User{}, err } users, err := user.ParsePasswd(reader) if err != nil { return user.User{}, err } if len(users) == 0 { return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) } return users[0], nil } // LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGroup(name string) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGroup(name) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(name) } // LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, // followed by a call to `getent` for supporting host configured non-files passwd and group dbs func LookupGID(gid int) (user.Group, error) { // first try a local system files lookup using existing capabilities group, err := user.LookupGid(gid) if err == nil { return group, nil } // local files lookup failed; attempt to call `getent` to query configured group dbs return getentGroup(strconv.Itoa(gid)) } func getentGroup(name string) (user.Group, error) { reader, err := callGetent("group", name) if err != nil { return user.Group{}, err } groups, err := user.ParseGroup(reader) if err != nil { return user.Group{}, err } if len(groups) == 0 { return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) } return groups[0], nil } func callGetent(database, key string) (io.Reader, error) { entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) // if no `getent` command on host, can't do anything else if getentCmd == "" { return nil, fmt.Errorf("unable to find getent command") } out, err := execCmd(getentCmd, database, key) if err != nil { exitCode, errC := system.GetExitCode(err) if errC != nil { return nil, err } switch exitCode { case 1: return nil, fmt.Errorf("getent reported invalid parameters/database unknown") case 2: return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) case 3: return nil, fmt.Errorf("getent database doesn't support enumeration") default: return nil, err } } return bytes.NewReader(out), nil } // setPermissions performs a chown/chmod only if the uid/gid don't match what's requested // Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the // dir is on an NFS share, so don't call chown unless we absolutely must. // Likewise for setting permissions. func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error { if stat == nil { var err error stat, err = system.Stat(p) if err != nil { return err } } if os.FileMode(stat.Mode()).Perm() != mode.Perm() { if err := os.Chmod(p, mode.Perm()); err != nil { return err } } if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { return nil } return os.Chown(p, uid, gid) } // NewIdentityMapping takes a requested username and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair func NewIdentityMapping(name string) (*IdentityMapping, error) { usr, err := LookupUser(name) if err != nil { return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) } subuidRanges, err := lookupSubUIDRanges(usr) if err != nil { return nil, err } subgidRanges, err := lookupSubGIDRanges(usr) if err != nil { return nil, err } return &IdentityMapping{ uids: subuidRanges, gids: subgidRanges, }, nil } func lookupSubUIDRanges(usr user.User) ([]IDMap, error) { rangeList, err := parseSubuid(strconv.Itoa(usr.Uid)) if err != nil { return nil, err } if len(rangeList) == 0 { rangeList, err = parseSubuid(usr.Name) if err != nil { return nil, err } } if len(rangeList) == 0 { return nil, errors.Errorf("no subuid ranges found for user %q", usr.Name) } return createIDMap(rangeList), nil } func lookupSubGIDRanges(usr user.User) ([]IDMap, error) { rangeList, err := parseSubgid(strconv.Itoa(usr.Uid)) if err != nil { return nil, err } if len(rangeList) == 0 { rangeList, err = parseSubgid(usr.Name) if err != nil { return nil, err } } if len(rangeList) == 0 { return nil, errors.Errorf("no subgid ranges found for user %q", usr.Name) } return createIDMap(rangeList), nil }
Rid
93ab21a193d726ab622f8b66f672774593c059a3
c9bbc68e75f224c4490ff6e3121be50a612edc22
Yes, there's definitely some boilerplating; I initially kept it inline, and, well, it was still boilerplating, so I extracted it to separate functions. Definitely feel free to copy my code (check if I didn't make a mistake, as it was a quick write up 😅)
thaJeztah
4,981
moby/moby
41,999
Fix for lack of synchronization in daemon/update.go
fixes https://github.com/moby/moby/issues/41988 Signed-off-by: dmytro.iakovliev <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Reordered operations to fix race. **- How I did it** The container state checking has been placed between container.Lock() and container.Unlock(). Added container state checking into the defer handler. **- How to verify it** See script to reproduce failing conditions in https://github.com/moby/moby/issues/41988. **- Description for the changelog** Fix for lack of syncronization in daemon/update.go. <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: -->
null
2021-02-08 13:54:52+00:00
2021-04-08 22:27:43+00:00
daemon/update.go
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "github.com/docker/docker/api/types/container" "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) // ContainerUpdate updates configuration of the container func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { var warnings []string c, err := daemon.GetContainer(name) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) } if err := daemon.update(name, hostConfig); err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } return container.ContainerUpdateOKBody{Warnings: warnings}, nil } func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil } ctr, err := daemon.GetContainer(name) if err != nil { return err } restoreConfig := false backupHostConfig := *ctr.HostConfig defer func() { if restoreConfig { ctr.Lock() ctr.HostConfig = &backupHostConfig ctr.CheckpointTo(daemon.containersReplica) ctr.Unlock() } }() if ctr.RemovalInProgress || ctr.Dead { return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\"")) } ctr.Lock() if err := ctr.UpdateContainer(hostConfig); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } if err := ctr.CheckpointTo(daemon.containersReplica); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } ctr.Unlock() // if Restart Policy changed, we need to update container monitor if hostConfig.RestartPolicy.Name != "" { ctr.UpdateMonitor(hostConfig.RestartPolicy) } // If container is not running, update hostConfig struct is enough, // resources will be updated when the container is started again. // If container is running (including paused), we need to update configs // to the real world. if ctr.IsRunning() && !ctr.IsRestarting() { if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil { restoreConfig = true // TODO: it would be nice if containerd responded with better errors here so we can classify this better. return errCannotUpdate(ctr.ID, errdefs.System(err)) } } daemon.LogContainerEvent(ctr, "update") return nil } func errCannotUpdate(containerID string, err error) error { return errors.Wrap(err, "Cannot update container "+containerID) }
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "github.com/docker/docker/api/types/container" "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) // ContainerUpdate updates configuration of the container func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { var warnings []string c, err := daemon.GetContainer(name) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) } if err := daemon.update(name, hostConfig); err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } return container.ContainerUpdateOKBody{Warnings: warnings}, nil } func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil } ctr, err := daemon.GetContainer(name) if err != nil { return err } restoreConfig := false backupHostConfig := *ctr.HostConfig defer func() { if restoreConfig { ctr.Lock() if !ctr.RemovalInProgress && !ctr.Dead { ctr.HostConfig = &backupHostConfig ctr.CheckpointTo(daemon.containersReplica) } ctr.Unlock() } }() ctr.Lock() if ctr.RemovalInProgress || ctr.Dead { ctr.Unlock() return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\"")) } if err := ctr.UpdateContainer(hostConfig); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } if err := ctr.CheckpointTo(daemon.containersReplica); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } ctr.Unlock() // if Restart Policy changed, we need to update container monitor if hostConfig.RestartPolicy.Name != "" { ctr.UpdateMonitor(hostConfig.RestartPolicy) } // If container is not running, update hostConfig struct is enough, // resources will be updated when the container is started again. // If container is running (including paused), we need to update configs // to the real world. if ctr.IsRunning() && !ctr.IsRestarting() { if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil { restoreConfig = true // TODO: it would be nice if containerd responded with better errors here so we can classify this better. return errCannotUpdate(ctr.ID, errdefs.System(err)) } } daemon.LogContainerEvent(ctr, "update") return nil } func errCannotUpdate(containerID string, err error) error { return errors.Wrap(err, "Cannot update container "+containerID) }
diakovliev
b9f127276fe96957ffa98ba20bc5062ae8678723
be95eae6d2d8d46860f305d417b868a8ac5fc151
Should this use `restoreConfig` instead? (looks like that variable is for that purpose) ```suggestion if restoreConfig { ```
thaJeztah
4,982
moby/moby
41,999
Fix for lack of synchronization in daemon/update.go
fixes https://github.com/moby/moby/issues/41988 Signed-off-by: dmytro.iakovliev <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Reordered operations to fix race. **- How I did it** The container state checking has been placed between container.Lock() and container.Unlock(). Added container state checking into the defer handler. **- How to verify it** See script to reproduce failing conditions in https://github.com/moby/moby/issues/41988. **- Description for the changelog** Fix for lack of syncronization in daemon/update.go. <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: -->
null
2021-02-08 13:54:52+00:00
2021-04-08 22:27:43+00:00
daemon/update.go
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "github.com/docker/docker/api/types/container" "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) // ContainerUpdate updates configuration of the container func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { var warnings []string c, err := daemon.GetContainer(name) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) } if err := daemon.update(name, hostConfig); err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } return container.ContainerUpdateOKBody{Warnings: warnings}, nil } func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil } ctr, err := daemon.GetContainer(name) if err != nil { return err } restoreConfig := false backupHostConfig := *ctr.HostConfig defer func() { if restoreConfig { ctr.Lock() ctr.HostConfig = &backupHostConfig ctr.CheckpointTo(daemon.containersReplica) ctr.Unlock() } }() if ctr.RemovalInProgress || ctr.Dead { return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\"")) } ctr.Lock() if err := ctr.UpdateContainer(hostConfig); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } if err := ctr.CheckpointTo(daemon.containersReplica); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } ctr.Unlock() // if Restart Policy changed, we need to update container monitor if hostConfig.RestartPolicy.Name != "" { ctr.UpdateMonitor(hostConfig.RestartPolicy) } // If container is not running, update hostConfig struct is enough, // resources will be updated when the container is started again. // If container is running (including paused), we need to update configs // to the real world. if ctr.IsRunning() && !ctr.IsRestarting() { if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil { restoreConfig = true // TODO: it would be nice if containerd responded with better errors here so we can classify this better. return errCannotUpdate(ctr.ID, errdefs.System(err)) } } daemon.LogContainerEvent(ctr, "update") return nil } func errCannotUpdate(containerID string, err error) error { return errors.Wrap(err, "Cannot update container "+containerID) }
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "github.com/docker/docker/api/types/container" "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) // ContainerUpdate updates configuration of the container func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { var warnings []string c, err := daemon.GetContainer(name) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) } if err := daemon.update(name, hostConfig); err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } return container.ContainerUpdateOKBody{Warnings: warnings}, nil } func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil } ctr, err := daemon.GetContainer(name) if err != nil { return err } restoreConfig := false backupHostConfig := *ctr.HostConfig defer func() { if restoreConfig { ctr.Lock() if !ctr.RemovalInProgress && !ctr.Dead { ctr.HostConfig = &backupHostConfig ctr.CheckpointTo(daemon.containersReplica) } ctr.Unlock() } }() ctr.Lock() if ctr.RemovalInProgress || ctr.Dead { ctr.Unlock() return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\"")) } if err := ctr.UpdateContainer(hostConfig); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } if err := ctr.CheckpointTo(daemon.containersReplica); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } ctr.Unlock() // if Restart Policy changed, we need to update container monitor if hostConfig.RestartPolicy.Name != "" { ctr.UpdateMonitor(hostConfig.RestartPolicy) } // If container is not running, update hostConfig struct is enough, // resources will be updated when the container is started again. // If container is running (including paused), we need to update configs // to the real world. if ctr.IsRunning() && !ctr.IsRestarting() { if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil { restoreConfig = true // TODO: it would be nice if containerd responded with better errors here so we can classify this better. return errCannotUpdate(ctr.ID, errdefs.System(err)) } } daemon.LogContainerEvent(ctr, "update") return nil } func errCannotUpdate(containerID string, err error) error { return errors.Wrap(err, "Cannot update container "+containerID) }
diakovliev
b9f127276fe96957ffa98ba20bc5062ae8678723
be95eae6d2d8d46860f305d417b868a8ac5fc151
Why was this moved?
thaJeztah
4,983
moby/moby
41,999
Fix for lack of synchronization in daemon/update.go
fixes https://github.com/moby/moby/issues/41988 Signed-off-by: dmytro.iakovliev <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Reordered operations to fix race. **- How I did it** The container state checking has been placed between container.Lock() and container.Unlock(). Added container state checking into the defer handler. **- How to verify it** See script to reproduce failing conditions in https://github.com/moby/moby/issues/41988. **- Description for the changelog** Fix for lack of syncronization in daemon/update.go. <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: -->
null
2021-02-08 13:54:52+00:00
2021-04-08 22:27:43+00:00
daemon/update.go
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "github.com/docker/docker/api/types/container" "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) // ContainerUpdate updates configuration of the container func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { var warnings []string c, err := daemon.GetContainer(name) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) } if err := daemon.update(name, hostConfig); err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } return container.ContainerUpdateOKBody{Warnings: warnings}, nil } func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil } ctr, err := daemon.GetContainer(name) if err != nil { return err } restoreConfig := false backupHostConfig := *ctr.HostConfig defer func() { if restoreConfig { ctr.Lock() ctr.HostConfig = &backupHostConfig ctr.CheckpointTo(daemon.containersReplica) ctr.Unlock() } }() if ctr.RemovalInProgress || ctr.Dead { return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\"")) } ctr.Lock() if err := ctr.UpdateContainer(hostConfig); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } if err := ctr.CheckpointTo(daemon.containersReplica); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } ctr.Unlock() // if Restart Policy changed, we need to update container monitor if hostConfig.RestartPolicy.Name != "" { ctr.UpdateMonitor(hostConfig.RestartPolicy) } // If container is not running, update hostConfig struct is enough, // resources will be updated when the container is started again. // If container is running (including paused), we need to update configs // to the real world. if ctr.IsRunning() && !ctr.IsRestarting() { if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil { restoreConfig = true // TODO: it would be nice if containerd responded with better errors here so we can classify this better. return errCannotUpdate(ctr.ID, errdefs.System(err)) } } daemon.LogContainerEvent(ctr, "update") return nil } func errCannotUpdate(containerID string, err error) error { return errors.Wrap(err, "Cannot update container "+containerID) }
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "github.com/docker/docker/api/types/container" "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) // ContainerUpdate updates configuration of the container func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { var warnings []string c, err := daemon.GetContainer(name) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) } if err := daemon.update(name, hostConfig); err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } return container.ContainerUpdateOKBody{Warnings: warnings}, nil } func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil } ctr, err := daemon.GetContainer(name) if err != nil { return err } restoreConfig := false backupHostConfig := *ctr.HostConfig defer func() { if restoreConfig { ctr.Lock() if !ctr.RemovalInProgress && !ctr.Dead { ctr.HostConfig = &backupHostConfig ctr.CheckpointTo(daemon.containersReplica) } ctr.Unlock() } }() ctr.Lock() if ctr.RemovalInProgress || ctr.Dead { ctr.Unlock() return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\"")) } if err := ctr.UpdateContainer(hostConfig); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } if err := ctr.CheckpointTo(daemon.containersReplica); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } ctr.Unlock() // if Restart Policy changed, we need to update container monitor if hostConfig.RestartPolicy.Name != "" { ctr.UpdateMonitor(hostConfig.RestartPolicy) } // If container is not running, update hostConfig struct is enough, // resources will be updated when the container is started again. // If container is running (including paused), we need to update configs // to the real world. if ctr.IsRunning() && !ctr.IsRestarting() { if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil { restoreConfig = true // TODO: it would be nice if containerd responded with better errors here so we can classify this better. return errCannotUpdate(ctr.ID, errdefs.System(err)) } } daemon.LogContainerEvent(ctr, "update") return nil } func errCannotUpdate(containerID string, err error) error { return errors.Wrap(err, "Cannot update container "+containerID) }
diakovliev
b9f127276fe96957ffa98ba20bc5062ae8678723
be95eae6d2d8d46860f305d417b868a8ac5fc151
This change can be dropped. It just my style - make variables declaration as near as possible to first variable usage.
diakovliev
4,984
moby/moby
41,999
Fix for lack of synchronization in daemon/update.go
fixes https://github.com/moby/moby/issues/41988 Signed-off-by: dmytro.iakovliev <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Reordered operations to fix race. **- How I did it** The container state checking has been placed between container.Lock() and container.Unlock(). Added container state checking into the defer handler. **- How to verify it** See script to reproduce failing conditions in https://github.com/moby/moby/issues/41988. **- Description for the changelog** Fix for lack of syncronization in daemon/update.go. <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: -->
null
2021-02-08 13:54:52+00:00
2021-04-08 22:27:43+00:00
daemon/update.go
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "github.com/docker/docker/api/types/container" "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) // ContainerUpdate updates configuration of the container func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { var warnings []string c, err := daemon.GetContainer(name) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) } if err := daemon.update(name, hostConfig); err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } return container.ContainerUpdateOKBody{Warnings: warnings}, nil } func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil } ctr, err := daemon.GetContainer(name) if err != nil { return err } restoreConfig := false backupHostConfig := *ctr.HostConfig defer func() { if restoreConfig { ctr.Lock() ctr.HostConfig = &backupHostConfig ctr.CheckpointTo(daemon.containersReplica) ctr.Unlock() } }() if ctr.RemovalInProgress || ctr.Dead { return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\"")) } ctr.Lock() if err := ctr.UpdateContainer(hostConfig); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } if err := ctr.CheckpointTo(daemon.containersReplica); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } ctr.Unlock() // if Restart Policy changed, we need to update container monitor if hostConfig.RestartPolicy.Name != "" { ctr.UpdateMonitor(hostConfig.RestartPolicy) } // If container is not running, update hostConfig struct is enough, // resources will be updated when the container is started again. // If container is running (including paused), we need to update configs // to the real world. if ctr.IsRunning() && !ctr.IsRestarting() { if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil { restoreConfig = true // TODO: it would be nice if containerd responded with better errors here so we can classify this better. return errCannotUpdate(ctr.ID, errdefs.System(err)) } } daemon.LogContainerEvent(ctr, "update") return nil } func errCannotUpdate(containerID string, err error) error { return errors.Wrap(err, "Cannot update container "+containerID) }
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "github.com/docker/docker/api/types/container" "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) // ContainerUpdate updates configuration of the container func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { var warnings []string c, err := daemon.GetContainer(name) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) } if err := daemon.update(name, hostConfig); err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } return container.ContainerUpdateOKBody{Warnings: warnings}, nil } func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil } ctr, err := daemon.GetContainer(name) if err != nil { return err } restoreConfig := false backupHostConfig := *ctr.HostConfig defer func() { if restoreConfig { ctr.Lock() if !ctr.RemovalInProgress && !ctr.Dead { ctr.HostConfig = &backupHostConfig ctr.CheckpointTo(daemon.containersReplica) } ctr.Unlock() } }() ctr.Lock() if ctr.RemovalInProgress || ctr.Dead { ctr.Unlock() return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\"")) } if err := ctr.UpdateContainer(hostConfig); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } if err := ctr.CheckpointTo(daemon.containersReplica); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } ctr.Unlock() // if Restart Policy changed, we need to update container monitor if hostConfig.RestartPolicy.Name != "" { ctr.UpdateMonitor(hostConfig.RestartPolicy) } // If container is not running, update hostConfig struct is enough, // resources will be updated when the container is started again. // If container is running (including paused), we need to update configs // to the real world. if ctr.IsRunning() && !ctr.IsRestarting() { if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil { restoreConfig = true // TODO: it would be nice if containerd responded with better errors here so we can classify this better. return errCannotUpdate(ctr.ID, errdefs.System(err)) } } daemon.LogContainerEvent(ctr, "update") return nil } func errCannotUpdate(containerID string, err error) error { return errors.Wrap(err, "Cannot update container "+containerID) }
diakovliev
b9f127276fe96957ffa98ba20bc5062ae8678723
be95eae6d2d8d46860f305d417b868a8ac5fc151
No, at least because restoreConfig is set without container lock in line https://github.com/moby/moby/blob/1e1b8bf88ab03bb7dbdfb9b44039b1ba5c0090e1/daemon/update.go#L88. As a result it possible what the restoreConfig will be true when container is dead and/or container removing will be activated.
diakovliev
4,985
moby/moby
41,999
Fix for lack of synchronization in daemon/update.go
fixes https://github.com/moby/moby/issues/41988 Signed-off-by: dmytro.iakovliev <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** Reordered operations to fix race. **- How I did it** The container state checking has been placed between container.Lock() and container.Unlock(). Added container state checking into the defer handler. **- How to verify it** See script to reproduce failing conditions in https://github.com/moby/moby/issues/41988. **- Description for the changelog** Fix for lack of syncronization in daemon/update.go. <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: -->
null
2021-02-08 13:54:52+00:00
2021-04-08 22:27:43+00:00
daemon/update.go
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "github.com/docker/docker/api/types/container" "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) // ContainerUpdate updates configuration of the container func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { var warnings []string c, err := daemon.GetContainer(name) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) } if err := daemon.update(name, hostConfig); err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } return container.ContainerUpdateOKBody{Warnings: warnings}, nil } func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil } ctr, err := daemon.GetContainer(name) if err != nil { return err } restoreConfig := false backupHostConfig := *ctr.HostConfig defer func() { if restoreConfig { ctr.Lock() ctr.HostConfig = &backupHostConfig ctr.CheckpointTo(daemon.containersReplica) ctr.Unlock() } }() if ctr.RemovalInProgress || ctr.Dead { return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\"")) } ctr.Lock() if err := ctr.UpdateContainer(hostConfig); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } if err := ctr.CheckpointTo(daemon.containersReplica); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } ctr.Unlock() // if Restart Policy changed, we need to update container monitor if hostConfig.RestartPolicy.Name != "" { ctr.UpdateMonitor(hostConfig.RestartPolicy) } // If container is not running, update hostConfig struct is enough, // resources will be updated when the container is started again. // If container is running (including paused), we need to update configs // to the real world. if ctr.IsRunning() && !ctr.IsRestarting() { if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil { restoreConfig = true // TODO: it would be nice if containerd responded with better errors here so we can classify this better. return errCannotUpdate(ctr.ID, errdefs.System(err)) } } daemon.LogContainerEvent(ctr, "update") return nil } func errCannotUpdate(containerID string, err error) error { return errors.Wrap(err, "Cannot update container "+containerID) }
package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" "github.com/docker/docker/api/types/container" "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) // ContainerUpdate updates configuration of the container func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { var warnings []string c, err := daemon.GetContainer(name) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) if err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) } if err := daemon.update(name, hostConfig); err != nil { return container.ContainerUpdateOKBody{Warnings: warnings}, err } return container.ContainerUpdateOKBody{Warnings: warnings}, nil } func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { if hostConfig == nil { return nil } ctr, err := daemon.GetContainer(name) if err != nil { return err } restoreConfig := false backupHostConfig := *ctr.HostConfig defer func() { if restoreConfig { ctr.Lock() if !ctr.RemovalInProgress && !ctr.Dead { ctr.HostConfig = &backupHostConfig ctr.CheckpointTo(daemon.containersReplica) } ctr.Unlock() } }() ctr.Lock() if ctr.RemovalInProgress || ctr.Dead { ctr.Unlock() return errCannotUpdate(ctr.ID, fmt.Errorf("container is marked for removal and cannot be \"update\"")) } if err := ctr.UpdateContainer(hostConfig); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } if err := ctr.CheckpointTo(daemon.containersReplica); err != nil { restoreConfig = true ctr.Unlock() return errCannotUpdate(ctr.ID, err) } ctr.Unlock() // if Restart Policy changed, we need to update container monitor if hostConfig.RestartPolicy.Name != "" { ctr.UpdateMonitor(hostConfig.RestartPolicy) } // If container is not running, update hostConfig struct is enough, // resources will be updated when the container is started again. // If container is running (including paused), we need to update configs // to the real world. if ctr.IsRunning() && !ctr.IsRestarting() { if err := daemon.containerd.UpdateResources(context.Background(), ctr.ID, toContainerdResources(hostConfig.Resources)); err != nil { restoreConfig = true // TODO: it would be nice if containerd responded with better errors here so we can classify this better. return errCannotUpdate(ctr.ID, errdefs.System(err)) } } daemon.LogContainerEvent(ctr, "update") return nil } func errCannotUpdate(containerID string, err error) error { return errors.Wrap(err, "Cannot update container "+containerID) }
diakovliev
b9f127276fe96957ffa98ba20bc5062ae8678723
be95eae6d2d8d46860f305d417b868a8ac5fc151
oh 🤦 I see this whole block is already in a `if restoreConfig {` block. Ignore me
thaJeztah
4,986
moby/moby
41,995
Dockerfile.simple: Fix compile docker binary error with btrfs
Use the image build from Dockerfile.simple to build docker binary failed with not find <brtfs/ioctl.h>, we need to install libbtrfs-dev to fix this. ``` Building: bundles/dynbinary-daemon/dockerd-dev GOOS="" GOARCH="" GOARM="" .gopath/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go:8:10: fatal error: btrfs/ioctl.h: No such file or directory #include <btrfs/ioctl.h> ``` Signed-off-by: Lei Jitang <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** **- How I did it** **- How to verify it** **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-06 14:23:04+00:00
2021-02-06 17:33:48+00:00
Dockerfile.simple
# docker build -t docker:simple -f Dockerfile.simple . # docker run --rm docker:simple hack/make.sh dynbinary # docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit # docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration # This represents the bare minimum required to build and test Docker. ARG GO_VERSION=1.13.15 FROM golang:${GO_VERSION}-buster ENV GO111MODULE=off # allow replacing httpredir or deb mirror ARG APT_MIRROR=deb.debian.org RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list # Compile and runtime deps # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ btrfs-tools \ build-essential \ curl \ cmake \ gcc \ git \ libapparmor-dev \ libdevmapper-dev \ libseccomp-dev \ ca-certificates \ e2fsprogs \ iptables \ pkg-config \ pigz \ procps \ xfsprogs \ xz-utils \ \ aufs-tools \ vim-common \ && rm -rf /var/lib/apt/lists/* # Install runc, containerd, tini and docker-proxy # Please edit hack/dockerfile/install/<name>.installer to update them. COPY hack/dockerfile/install hack/dockerfile/install RUN for i in runc containerd tini proxy dockercli; \ do hack/dockerfile/install/install.sh $i; \ done ENV PATH=/usr/local/cli:$PATH ENV AUTO_GOPATH 1 WORKDIR /usr/src/docker COPY . /usr/src/docker
# docker build -t docker:simple -f Dockerfile.simple . # docker run --rm docker:simple hack/make.sh dynbinary # docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit # docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration # This represents the bare minimum required to build and test Docker. ARG GO_VERSION=1.13.15 FROM golang:${GO_VERSION}-buster ENV GO111MODULE=off # allow replacing httpredir or deb mirror ARG APT_MIRROR=deb.debian.org RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list # Compile and runtime deps # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ build-essential \ curl \ cmake \ gcc \ git \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ ca-certificates \ e2fsprogs \ iptables \ pkg-config \ pigz \ procps \ xfsprogs \ xz-utils \ \ aufs-tools \ vim-common \ && rm -rf /var/lib/apt/lists/* # Install runc, containerd, tini and docker-proxy # Please edit hack/dockerfile/install/<name>.installer to update them. COPY hack/dockerfile/install hack/dockerfile/install RUN for i in runc containerd tini proxy dockercli; \ do hack/dockerfile/install/install.sh $i; \ done ENV PATH=/usr/local/cli:$PATH ENV AUTO_GOPATH 1 WORKDIR /usr/src/docker COPY . /usr/src/docker
coolljt0725
0af8ed47bbf51f796ab22797334f69fda10840e5
791640417b67036bbc7d13597cad55bb5fcead2b
This is the package that used to contain this header, so we should be able to remove it from the list now (also, this list is in sorted order). 👍
tianon
4,987
moby/moby
41,995
Dockerfile.simple: Fix compile docker binary error with btrfs
Use the image build from Dockerfile.simple to build docker binary failed with not find <brtfs/ioctl.h>, we need to install libbtrfs-dev to fix this. ``` Building: bundles/dynbinary-daemon/dockerd-dev GOOS="" GOARCH="" GOARM="" .gopath/src/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go:8:10: fatal error: btrfs/ioctl.h: No such file or directory #include <btrfs/ioctl.h> ``` Signed-off-by: Lei Jitang <[email protected]> <!-- Please make sure you've read and understood our contributing guidelines; https://github.com/moby/moby/blob/master/CONTRIBUTING.md ** Make sure all your commits include a signature generated with `git commit -s` ** For additional information on our contributing process, read our contributing guide https://docs.docker.com/opensource/code/ If this is a bug fix, make sure your description includes "fixes #xxxx", or "closes #xxxx" Please provide the following information: --> **- What I did** **- How I did it** **- How to verify it** **- Description for the changelog** <!-- Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> **- A picture of a cute animal (not mandatory but encouraged)**
null
2021-02-06 14:23:04+00:00
2021-02-06 17:33:48+00:00
Dockerfile.simple
# docker build -t docker:simple -f Dockerfile.simple . # docker run --rm docker:simple hack/make.sh dynbinary # docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit # docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration # This represents the bare minimum required to build and test Docker. ARG GO_VERSION=1.13.15 FROM golang:${GO_VERSION}-buster ENV GO111MODULE=off # allow replacing httpredir or deb mirror ARG APT_MIRROR=deb.debian.org RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list # Compile and runtime deps # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ btrfs-tools \ build-essential \ curl \ cmake \ gcc \ git \ libapparmor-dev \ libdevmapper-dev \ libseccomp-dev \ ca-certificates \ e2fsprogs \ iptables \ pkg-config \ pigz \ procps \ xfsprogs \ xz-utils \ \ aufs-tools \ vim-common \ && rm -rf /var/lib/apt/lists/* # Install runc, containerd, tini and docker-proxy # Please edit hack/dockerfile/install/<name>.installer to update them. COPY hack/dockerfile/install hack/dockerfile/install RUN for i in runc containerd tini proxy dockercli; \ do hack/dockerfile/install/install.sh $i; \ done ENV PATH=/usr/local/cli:$PATH ENV AUTO_GOPATH 1 WORKDIR /usr/src/docker COPY . /usr/src/docker
# docker build -t docker:simple -f Dockerfile.simple . # docker run --rm docker:simple hack/make.sh dynbinary # docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit # docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration # This represents the bare minimum required to build and test Docker. ARG GO_VERSION=1.13.15 FROM golang:${GO_VERSION}-buster ENV GO111MODULE=off # allow replacing httpredir or deb mirror ARG APT_MIRROR=deb.debian.org RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list # Compile and runtime deps # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies # https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies RUN apt-get update && apt-get install -y --no-install-recommends \ build-essential \ curl \ cmake \ gcc \ git \ libapparmor-dev \ libbtrfs-dev \ libdevmapper-dev \ libseccomp-dev \ ca-certificates \ e2fsprogs \ iptables \ pkg-config \ pigz \ procps \ xfsprogs \ xz-utils \ \ aufs-tools \ vim-common \ && rm -rf /var/lib/apt/lists/* # Install runc, containerd, tini and docker-proxy # Please edit hack/dockerfile/install/<name>.installer to update them. COPY hack/dockerfile/install hack/dockerfile/install RUN for i in runc containerd tini proxy dockercli; \ do hack/dockerfile/install/install.sh $i; \ done ENV PATH=/usr/local/cli:$PATH ENV AUTO_GOPATH 1 WORKDIR /usr/src/docker COPY . /usr/src/docker
coolljt0725
0af8ed47bbf51f796ab22797334f69fda10840e5
791640417b67036bbc7d13597cad55bb5fcead2b
@tianon Thanks, updated
coolljt0725
4,988
moby/moby
41,982
Add Swarm Cluster Volume support
This is a preliminary, draft pull request for Swarm Cluster Volume Support. Though it is a long way off from being ready for merging, because of the size and scope of it, it is best to start review now. This is more-or-less an implementation of the proposal in #39624. Please give special attention to the API changes. I welcome any alternative suggestions for how to structure the API. Closes #31923 Closes #39624
null
2021-02-04 18:06:01+00:00
2022-05-13 00:30:45+00:00
api/server/router/volume/backend.go
package volume // import "github.com/docker/docker/api/server/router/volume" import ( "context" "github.com/docker/docker/volume/service/opts" // TODO return types need to be refactored into pkg "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/volume" ) // Backend is the methods that need to be implemented to provide // volume specific functionality type Backend interface { List(ctx context.Context, filter filters.Args) ([]*volume.Volume, []string, error) Get(ctx context.Context, name string, opts ...opts.GetOption) (*volume.Volume, error) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*volume.Volume, error) Remove(ctx context.Context, name string, opts ...opts.RemoveOption) error Prune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error) }
package volume // import "github.com/docker/docker/api/server/router/volume" import ( "context" "github.com/docker/docker/volume/service/opts" // TODO return types need to be refactored into pkg "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/volume" ) // Backend is the methods that need to be implemented to provide // volume specific functionality type Backend interface { List(ctx context.Context, filter filters.Args) ([]*volume.Volume, []string, error) Get(ctx context.Context, name string, opts ...opts.GetOption) (*volume.Volume, error) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*volume.Volume, error) Remove(ctx context.Context, name string, opts ...opts.RemoveOption) error Prune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error) } // ClusterBackend is the backend used for Swarm Cluster Volumes. Regular // volumes go through the volume service, but to avoid across-dependency // between the cluster package and the volume package, we simply provide two // backends here. type ClusterBackend interface { GetVolume(nameOrID string) (volume.Volume, error) GetVolumes(options volume.ListOptions) ([]*volume.Volume, error) CreateVolume(volume volume.CreateOptions) (*volume.Volume, error) RemoveVolume(nameOrID string, force bool) error UpdateVolume(nameOrID string, version uint64, volume volume.UpdateOptions) error IsManager() bool }
dperny
3fb59282337a282d31aa11c9e273e3349c16a709
d35731fa15ed8ea96d4f3b6f5358d9385b1b7a46
In a follow-up, perhaps we should look if we can unify the interface with the (non-cluster) `Backend` above; there appears to be an overlap (but slightly different signatures); if we unify both, possibly `ClusterBackend` could be `Backend` _plus_ `<cluster-specific methods>`, like; ```go type ClusterBackend interface { Backend Update(nameOrID string, version uint64, volume volume.UpdateOptions) error IsManager() bool } ``` Not a show-stopper though; the `ClusterBackend` is considered an _internal_ interface (only to decouple internal code from the API), so we should be able to make (breaking) changes in future.
thaJeztah
4,989
moby/moby
41,982
Add Swarm Cluster Volume support
This is a preliminary, draft pull request for Swarm Cluster Volume Support. Though it is a long way off from being ready for merging, because of the size and scope of it, it is best to start review now. This is more-or-less an implementation of the proposal in #39624. Please give special attention to the API changes. I welcome any alternative suggestions for how to structure the API. Closes #31923 Closes #39624
null
2021-02-04 18:06:01+00:00
2022-05-13 00:30:45+00:00
api/server/router/volume/volume.go
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend) router.Router { r := &volumeRouter{ backend: b, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend cluster ClusterBackend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend, cb ClusterBackend) router.Router { r := &volumeRouter{ backend: b, cluster: cb, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // PUT router.NewPutRoute("/volumes/{name:.*}", r.putVolumesUpdate), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
dperny
3fb59282337a282d31aa11c9e273e3349c16a709
d35731fa15ed8ea96d4f3b6f5358d9385b1b7a46
I know we made that mistake for some other endpoints, but wondering if we can make this `"/volumes/{name:.*}"` (so without the trailing `/update`). The endpoint would then be `POST`, or if we want to do it "correct"; `PUT` (if we require the whole definition to be sent) or `PATCH` (if we accept "partial" definitions). Removing the `/update` removes potential confusing URLs, for example, a volume named `foo/update` would otherwise be accessed as `/volumes/foo/update/update`
thaJeztah
4,990
moby/moby
41,982
Add Swarm Cluster Volume support
This is a preliminary, draft pull request for Swarm Cluster Volume Support. Though it is a long way off from being ready for merging, because of the size and scope of it, it is best to start review now. This is more-or-less an implementation of the proposal in #39624. Please give special attention to the API changes. I welcome any alternative suggestions for how to structure the API. Closes #31923 Closes #39624
null
2021-02-04 18:06:01+00:00
2022-05-13 00:30:45+00:00
api/server/router/volume/volume.go
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend) router.Router { r := &volumeRouter{ backend: b, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend cluster ClusterBackend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend, cb ClusterBackend) router.Router { r := &volumeRouter{ backend: b, cluster: cb, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // PUT router.NewPutRoute("/volumes/{name:.*}", r.putVolumesUpdate), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
dperny
3fb59282337a282d31aa11c9e273e3349c16a709
d35731fa15ed8ea96d4f3b6f5358d9385b1b7a46
I won't argue this at all, pleased to do it the right way for volumes.
dperny
4,991
moby/moby
41,982
Add Swarm Cluster Volume support
This is a preliminary, draft pull request for Swarm Cluster Volume Support. Though it is a long way off from being ready for merging, because of the size and scope of it, it is best to start review now. This is more-or-less an implementation of the proposal in #39624. Please give special attention to the API changes. I welcome any alternative suggestions for how to structure the API. Closes #31923 Closes #39624
null
2021-02-04 18:06:01+00:00
2022-05-13 00:30:45+00:00
api/server/router/volume/volume.go
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend) router.Router { r := &volumeRouter{ backend: b, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend cluster ClusterBackend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend, cb ClusterBackend) router.Router { r := &volumeRouter{ backend: b, cluster: cb, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // PUT router.NewPutRoute("/volumes/{name:.*}", r.putVolumesUpdate), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
dperny
3fb59282337a282d31aa11c9e273e3349c16a709
d35731fa15ed8ea96d4f3b6f5358d9385b1b7a46
as an uninvolved busybody ... this still looks like "create" and "prune" will be invalid volume names?
chrisbecke
4,992
moby/moby
41,982
Add Swarm Cluster Volume support
This is a preliminary, draft pull request for Swarm Cluster Volume Support. Though it is a long way off from being ready for merging, because of the size and scope of it, it is best to start review now. This is more-or-less an implementation of the proposal in #39624. Please give special attention to the API changes. I welcome any alternative suggestions for how to structure the API. Closes #31923 Closes #39624
null
2021-02-04 18:06:01+00:00
2022-05-13 00:30:45+00:00
api/server/router/volume/volume.go
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend) router.Router { r := &volumeRouter{ backend: b, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend cluster ClusterBackend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend, cb ClusterBackend) router.Router { r := &volumeRouter{ backend: b, cluster: cb, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // PUT router.NewPutRoute("/volumes/{name:.*}", r.putVolumesUpdate), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
dperny
3fb59282337a282d31aa11c9e273e3349c16a709
d35731fa15ed8ea96d4f3b6f5358d9385b1b7a46
Arf, you're right; we can't change specifically due to those existing, so we must have the suffix 🤦‍♂️ My bad; didn't think it through properly
thaJeztah
4,993
moby/moby
41,982
Add Swarm Cluster Volume support
This is a preliminary, draft pull request for Swarm Cluster Volume Support. Though it is a long way off from being ready for merging, because of the size and scope of it, it is best to start review now. This is more-or-less an implementation of the proposal in #39624. Please give special attention to the API changes. I welcome any alternative suggestions for how to structure the API. Closes #31923 Closes #39624
null
2021-02-04 18:06:01+00:00
2022-05-13 00:30:45+00:00
api/server/router/volume/volume.go
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend) router.Router { r := &volumeRouter{ backend: b, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend cluster ClusterBackend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend, cb ClusterBackend) router.Router { r := &volumeRouter{ backend: b, cluster: cb, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // PUT router.NewPutRoute("/volumes/{name:.*}", r.putVolumesUpdate), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
dperny
3fb59282337a282d31aa11c9e273e3349c16a709
d35731fa15ed8ea96d4f3b6f5358d9385b1b7a46
Or, well.... looks like we already passed that point, as the DELETE endpoint below has the same problem, so I guess it wouldn't put us in a worse spot than we already are
thaJeztah
4,994
moby/moby
41,982
Add Swarm Cluster Volume support
This is a preliminary, draft pull request for Swarm Cluster Volume Support. Though it is a long way off from being ready for merging, because of the size and scope of it, it is best to start review now. This is more-or-less an implementation of the proposal in #39624. Please give special attention to the API changes. I welcome any alternative suggestions for how to structure the API. Closes #31923 Closes #39624
null
2021-02-04 18:06:01+00:00
2022-05-13 00:30:45+00:00
api/server/router/volume/volume.go
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend) router.Router { r := &volumeRouter{ backend: b, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend cluster ClusterBackend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend, cb ClusterBackend) router.Router { r := &volumeRouter{ backend: b, cluster: cb, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // PUT router.NewPutRoute("/volumes/{name:.*}", r.putVolumesUpdate), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
dperny
3fb59282337a282d31aa11c9e273e3349c16a709
d35731fa15ed8ea96d4f3b6f5358d9385b1b7a46
the routes are: `GET /volumes` `GET /volumes/{name:.*}` `POST /volumes/create` `POST /volumes/prune` `POST /volumes/{name:.*}/update` `DELETE /volumes/{name:.*}` Replace `POST /volumes/{name:.*}/update` with `PUT /volumes/{name:.*}` for the update operation as per your initial suggestion is actually the zero conflict option
chrisbecke
4,995
moby/moby
41,982
Add Swarm Cluster Volume support
This is a preliminary, draft pull request for Swarm Cluster Volume Support. Though it is a long way off from being ready for merging, because of the size and scope of it, it is best to start review now. This is more-or-less an implementation of the proposal in #39624. Please give special attention to the API changes. I welcome any alternative suggestions for how to structure the API. Closes #31923 Closes #39624
null
2021-02-04 18:06:01+00:00
2022-05-13 00:30:45+00:00
api/server/router/volume/volume.go
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend) router.Router { r := &volumeRouter{ backend: b, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
package volume // import "github.com/docker/docker/api/server/router/volume" import "github.com/docker/docker/api/server/router" // volumeRouter is a router to talk with the volumes controller type volumeRouter struct { backend Backend cluster ClusterBackend routes []router.Route } // NewRouter initializes a new volume router func NewRouter(b Backend, cb ClusterBackend) router.Router { r := &volumeRouter{ backend: b, cluster: cb, } r.initRoutes() return r } // Routes returns the available routes to the volumes controller func (r *volumeRouter) Routes() []router.Route { return r.routes } func (r *volumeRouter) initRoutes() { r.routes = []router.Route{ // GET router.NewGetRoute("/volumes", r.getVolumesList), router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), // POST router.NewPostRoute("/volumes/create", r.postVolumesCreate), router.NewPostRoute("/volumes/prune", r.postVolumesPrune), // PUT router.NewPutRoute("/volumes/{name:.*}", r.putVolumesUpdate), // DELETE router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), } }
dperny
3fb59282337a282d31aa11c9e273e3349c16a709
d35731fa15ed8ea96d4f3b6f5358d9385b1b7a46
Yes, `PUT` (or `PATCH`) works for me
thaJeztah
4,996
moby/moby
41,982
Add Swarm Cluster Volume support
This is a preliminary, draft pull request for Swarm Cluster Volume Support. Though it is a long way off from being ready for merging, because of the size and scope of it, it is best to start review now. This is more-or-less an implementation of the proposal in #39624. Please give special attention to the API changes. I welcome any alternative suggestions for how to structure the API. Closes #31923 Closes #39624
null
2021-02-04 18:06:01+00:00
2022-05-13 00:30:45+00:00
api/server/router/volume/volume_routes.go
package volume // import "github.com/docker/docker/api/server/router/volume" import ( "context" "net/http" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/volume" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" ) func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } filters, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return errors.Wrap(err, "error reading volume filters") } volumes, warnings, err := v.backend.List(ctx, filters) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, &volume.ListResponse{Volumes: volumes, Warnings: warnings}) } func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } vol, err := v.backend.Get(ctx, vars["name"], opts.WithGetResolveStatus) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, vol) } func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } var req volume.CreateOptions if err := httputils.ReadJSON(r, &req); err != nil { return err } vol, err := v.backend.Create(ctx, req.Name, req.Driver, opts.WithCreateOptions(req.DriverOpts), opts.WithCreateLabels(req.Labels)) if err != nil { return err } return httputils.WriteJSON(w, http.StatusCreated, vol) } func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } force := httputils.BoolValue(r, "force") if err := v.backend.Remove(ctx, vars["name"], opts.WithPurgeOnError(force)); err != nil { return err } w.WriteHeader(http.StatusNoContent) return nil } func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } pruneFilters, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } pruneReport, err := v.backend.Prune(ctx, pruneFilters) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, pruneReport) }
package volume // import "github.com/docker/docker/api/server/router/volume" import ( "context" "fmt" "net/http" "strconv" "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/api/types/volume" "github.com/docker/docker/errdefs" "github.com/docker/docker/volume/service/opts" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( // clusterVolumesVersion defines the API version that swarm cluster volume // functionality was introduced. avoids the use of magic numbers. clusterVolumesVersion = "1.42" ) func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } filters, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return errors.Wrap(err, "error reading volume filters") } volumes, warnings, err := v.backend.List(ctx, filters) if err != nil { return err } version := httputils.VersionFromContext(ctx) if versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() { clusterVolumes, swarmErr := v.cluster.GetVolumes(volume.ListOptions{Filters: filters}) if swarmErr != nil { // if there is a swarm error, we may not want to error out right // away. the local list probably worked. instead, let's do what we // do if there's a bad driver while trying to list: add the error // to the warnings. don't do this if swarm is not initialized. warnings = append(warnings, swarmErr.Error()) } // add the cluster volumes to the return volumes = append(volumes, clusterVolumes...) } return httputils.WriteJSON(w, http.StatusOK, &volume.ListResponse{Volumes: volumes, Warnings: warnings}) } func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } version := httputils.VersionFromContext(ctx) // re: volume name duplication // // we prefer to get volumes locally before attempting to get them from the // cluster. Local volumes can only be looked up by name, but cluster // volumes can also be looked up by ID. vol, err := v.backend.Get(ctx, vars["name"], opts.WithGetResolveStatus) // if the volume is not found in the regular volume backend, and the client // is using an API version greater than 1.42 (when cluster volumes were // introduced), then check if Swarm has the volume. if errdefs.IsNotFound(err) && versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() { swarmVol, err := v.cluster.GetVolume(vars["name"]) // if swarm returns an error and that error indicates that swarm is not // initialized, return original NotFound error. Otherwise, we'd return // a weird swarm unavailable error on non-swarm engines. if err != nil { return err } vol = &swarmVol } else if err != nil { // otherwise, if this isn't NotFound, or this isn't a high enough version, // just return the error by itself. return err } return httputils.WriteJSON(w, http.StatusOK, vol) } func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } var req volume.CreateOptions if err := httputils.ReadJSON(r, &req); err != nil { return err } var ( vol *volume.Volume err error version = httputils.VersionFromContext(ctx) ) // if the ClusterVolumeSpec is filled in, then this is a cluster volume // and is created through the swarm cluster volume backend. // // re: volume name duplication // // As it happens, there is no good way to prevent duplication of a volume // name between local and cluster volumes. This is because Swarm volumes // can be created from any manager node, bypassing most of the protections // we could put into the engine side. // // Instead, we will allow creating a volume with a duplicate name, which // should not break anything. if req.ClusterVolumeSpec != nil && versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) { logrus.Debug("using cluster volume") vol, err = v.cluster.CreateVolume(req) } else { logrus.Debug("using regular volume") vol, err = v.backend.Create(ctx, req.Name, req.Driver, opts.WithCreateOptions(req.DriverOpts), opts.WithCreateLabels(req.Labels)) } if err != nil { return err } return httputils.WriteJSON(w, http.StatusCreated, vol) } func (v *volumeRouter) putVolumesUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if !v.cluster.IsManager() { return errdefs.Unavailable(errors.New("volume update only valid for cluster volumes, but swarm is unavailable")) } if err := httputils.ParseForm(r); err != nil { return err } rawVersion := r.URL.Query().Get("version") version, err := strconv.ParseUint(rawVersion, 10, 64) if err != nil { err = fmt.Errorf("invalid swarm object version '%s': %v", rawVersion, err) return errdefs.InvalidParameter(err) } var req volume.UpdateOptions if err := httputils.ReadJSON(r, &req); err != nil { return err } return v.cluster.UpdateVolume(vars["name"], version, req) } func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } force := httputils.BoolValue(r, "force") version := httputils.VersionFromContext(ctx) err := v.backend.Remove(ctx, vars["name"], opts.WithPurgeOnError(force)) if err != nil { if errdefs.IsNotFound(err) && versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() { err := v.cluster.RemoveVolume(vars["name"], force) if err != nil { return err } } else { return err } } w.WriteHeader(http.StatusNoContent) return nil } func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } pruneFilters, err := filters.FromJSON(r.Form.Get("filters")) if err != nil { return err } pruneReport, err := v.backend.Prune(ctx, pruneFilters) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, pruneReport) }
dperny
3fb59282337a282d31aa11c9e273e3349c16a709
d35731fa15ed8ea96d4f3b6f5358d9385b1b7a46
Given that `v.cluster` is actually the whole cluster backend, one option could be to add `IsManager() bool` to the `volume.ClusterBackend` interface. That way we can check here if the current node is a Swarm manager, and otherwise skip this block; that way, we don't have to use the (rather hacky) `isNoSwarmErr()` handling here. ```suggestion version := httputils.VersionFromContext(ctx) if versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() { clusterVolumes, err := v.cluster.GetVolumes(types.VolumeListOptions{Filters: filters}) if err != nil { // if there is a swarm error, we may not want to error out right // away. the local list probably worked. warnings = append(warnings, err.Error()) } volumes = append(volumes, clusterVolumes...) } ```
thaJeztah
4,997