repo_name
stringlengths 8
38
| pr_number
int64 3
47.1k
| pr_title
stringlengths 8
175
| pr_description
stringlengths 2
19.8k
⌀ | author
null | date_created
stringlengths 25
25
| date_merged
stringlengths 25
25
| filepath
stringlengths 6
136
| before_content
stringlengths 54
884k
⌀ | after_content
stringlengths 56
884k
| pr_author
stringlengths 3
21
| previous_commit
stringlengths 40
40
| pr_commit
stringlengths 40
40
| comment
stringlengths 2
25.4k
| comment_author
stringlengths 3
29
| __index_level_0__
int64 0
5.1k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
moby/moby | 42,528 | Jenkinsfile: add stage for Windows 2022 on containerd | follow-up to https://github.com/moby/moby/pull/42527
relates to https://github.com/moby/moby/pull/41479 | null | 2021-06-16 08:36:05+00:00 | 2021-06-28 07:35:03+00:00 | Jenkinsfile | #!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windows2022', defaultValue: true, description: 'Windows 2022 (SAC) Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '2b0755b936416834e14208c6c37b36977e67ea35'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
when {
beforeAgent true
expression { params.unit_validate }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
environment {
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
// even if no files were changed. This allows catching problems caused by pull-requests
// that were merged out-of-sequence.
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
}
}
stage("Validate") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/default
'''
}
}
stage("Docker-py") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-docker-py
'''
}
post {
always {
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
stage("Static") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh binary
'''
}
}
stage("Cross") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh cross
'''
}
}
// needs to be last stage that calls make.sh for the junit report to work
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Validate vendor") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/vendor
'''
}
}
}
post {
always {
sh '''
echo 'Ensuring container killed.'
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('amd64') {
when {
beforeAgent true
expression { params.amd64 }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
# todo: include ip_vs in base image
sudo modprobe ip_vs
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Run tests") {
steps {
sh '''#!/bin/bash
# bash is needed so 'jobs -p' works properly
# it also accepts setting inline envvars for functions without explicitly exporting
set -x
run_tests() {
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
docker run $rm -t --privileged \
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name "$CONTAINER_NAME" \
-e KEEPBUNDLE=1 \
-e TESTDEBUG \
-e TESTFLAGS \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
"$1" \
test-integration
}
trap "exit" INT TERM
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
CONTAINER_NAME=docker-pr$BUILD_NUMBER
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name ${CONTAINER_NAME}-build \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary
# flaky + integration
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
# integration-cli first set
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
# integration-cli second set
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
c=0
for job in $(jobs -p); do
wait ${job} || c=$?
done
exit $c
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
[ -n "$cids" ] && docker rm -vf $cids || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('rootless') {
when {
beforeAgent true
expression { params.rootless }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_ROOTLESS = '1'
TEST_SKIP_INTEGRATION_CLI = '1'
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_ROOTLESS \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-rootless
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('cgroup2') {
when {
beforeAgent true
expression { params.cgroup2 }
}
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_SYSTEMD \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-cgroup2
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.s390x }
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.ppc64le }
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS1') {
when {
beforeAgent true
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
anyOf {
not { changeRequest() }
expression { params.windowsRS1 }
}
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS1-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS5') {
when {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS5-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022') {
when {
beforeAgent true
expression { params.windows2022 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}
| #!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windows2022', defaultValue: true, description: 'Windows 2022 (SAC) Build/Test')
booleanParam(name: 'windows2022containerd', defaultValue: true, description: 'Windows 2022 (SAC) with containerd Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '2b0755b936416834e14208c6c37b36977e67ea35'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
when {
beforeAgent true
expression { params.unit_validate }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
environment {
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
// even if no files were changed. This allows catching problems caused by pull-requests
// that were merged out-of-sequence.
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
}
}
stage("Validate") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/default
'''
}
}
stage("Docker-py") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-docker-py
'''
}
post {
always {
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
stage("Static") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh binary
'''
}
}
stage("Cross") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh cross
'''
}
}
// needs to be last stage that calls make.sh for the junit report to work
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Validate vendor") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/vendor
'''
}
}
}
post {
always {
sh '''
echo 'Ensuring container killed.'
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('amd64') {
when {
beforeAgent true
expression { params.amd64 }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
# todo: include ip_vs in base image
sudo modprobe ip_vs
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Run tests") {
steps {
sh '''#!/bin/bash
# bash is needed so 'jobs -p' works properly
# it also accepts setting inline envvars for functions without explicitly exporting
set -x
run_tests() {
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
docker run $rm -t --privileged \
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name "$CONTAINER_NAME" \
-e KEEPBUNDLE=1 \
-e TESTDEBUG \
-e TESTFLAGS \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
"$1" \
test-integration
}
trap "exit" INT TERM
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
CONTAINER_NAME=docker-pr$BUILD_NUMBER
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name ${CONTAINER_NAME}-build \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary
# flaky + integration
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
# integration-cli first set
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
# integration-cli second set
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
c=0
for job in $(jobs -p); do
wait ${job} || c=$?
done
exit $c
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
[ -n "$cids" ] && docker rm -vf $cids || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('rootless') {
when {
beforeAgent true
expression { params.rootless }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_ROOTLESS = '1'
TEST_SKIP_INTEGRATION_CLI = '1'
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_ROOTLESS \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-rootless
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('cgroup2') {
when {
beforeAgent true
expression { params.cgroup2 }
}
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_SYSTEMD \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-cgroup2
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.s390x }
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.ppc64le }
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS1') {
when {
beforeAgent true
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
anyOf {
not { changeRequest() }
expression { params.windowsRS1 }
}
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS1-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS5') {
when {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS5-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022') {
when {
beforeAgent true
expression { params.windows2022 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022-c8d') {
when {
beforeAgent true
expression { params.windows2022containerd }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
// Available tags can be found at https://mcr.microsoft.com/v2/windows/servercore/insider/tags/list
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
DOCKER_WINDOWS_CONTAINERD_RUNTIME = '1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-c8d-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}
| thaJeztah | 050929ab83c85812ad289ddc5305a857a510561f | 665de2e973b8061930e50426bb469228e3c66963 | Yes, I checked again a vanilla machine, but it's already 10.0.2348.51. 😿 | StefanScherer | 4,698 |
moby/moby | 42,528 | Jenkinsfile: add stage for Windows 2022 on containerd | follow-up to https://github.com/moby/moby/pull/42527
relates to https://github.com/moby/moby/pull/41479 | null | 2021-06-16 08:36:05+00:00 | 2021-06-28 07:35:03+00:00 | Jenkinsfile | #!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windows2022', defaultValue: true, description: 'Windows 2022 (SAC) Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '2b0755b936416834e14208c6c37b36977e67ea35'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
when {
beforeAgent true
expression { params.unit_validate }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
environment {
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
// even if no files were changed. This allows catching problems caused by pull-requests
// that were merged out-of-sequence.
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
}
}
stage("Validate") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/default
'''
}
}
stage("Docker-py") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-docker-py
'''
}
post {
always {
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
stage("Static") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh binary
'''
}
}
stage("Cross") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh cross
'''
}
}
// needs to be last stage that calls make.sh for the junit report to work
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Validate vendor") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/vendor
'''
}
}
}
post {
always {
sh '''
echo 'Ensuring container killed.'
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('amd64') {
when {
beforeAgent true
expression { params.amd64 }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
# todo: include ip_vs in base image
sudo modprobe ip_vs
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Run tests") {
steps {
sh '''#!/bin/bash
# bash is needed so 'jobs -p' works properly
# it also accepts setting inline envvars for functions without explicitly exporting
set -x
run_tests() {
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
docker run $rm -t --privileged \
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name "$CONTAINER_NAME" \
-e KEEPBUNDLE=1 \
-e TESTDEBUG \
-e TESTFLAGS \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
"$1" \
test-integration
}
trap "exit" INT TERM
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
CONTAINER_NAME=docker-pr$BUILD_NUMBER
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name ${CONTAINER_NAME}-build \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary
# flaky + integration
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
# integration-cli first set
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
# integration-cli second set
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
c=0
for job in $(jobs -p); do
wait ${job} || c=$?
done
exit $c
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
[ -n "$cids" ] && docker rm -vf $cids || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('rootless') {
when {
beforeAgent true
expression { params.rootless }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_ROOTLESS = '1'
TEST_SKIP_INTEGRATION_CLI = '1'
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_ROOTLESS \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-rootless
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('cgroup2') {
when {
beforeAgent true
expression { params.cgroup2 }
}
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_SYSTEMD \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-cgroup2
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.s390x }
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.ppc64le }
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS1') {
when {
beforeAgent true
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
anyOf {
not { changeRequest() }
expression { params.windowsRS1 }
}
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS1-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS5') {
when {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS5-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022') {
when {
beforeAgent true
expression { params.windows2022 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}
| #!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windows2022', defaultValue: true, description: 'Windows 2022 (SAC) Build/Test')
booleanParam(name: 'windows2022containerd', defaultValue: true, description: 'Windows 2022 (SAC) with containerd Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '2b0755b936416834e14208c6c37b36977e67ea35'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
when {
beforeAgent true
expression { params.unit_validate }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
environment {
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
// even if no files were changed. This allows catching problems caused by pull-requests
// that were merged out-of-sequence.
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
}
}
stage("Validate") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/default
'''
}
}
stage("Docker-py") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-docker-py
'''
}
post {
always {
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
stage("Static") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh binary
'''
}
}
stage("Cross") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh cross
'''
}
}
// needs to be last stage that calls make.sh for the junit report to work
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Validate vendor") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/vendor
'''
}
}
}
post {
always {
sh '''
echo 'Ensuring container killed.'
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('amd64') {
when {
beforeAgent true
expression { params.amd64 }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
# todo: include ip_vs in base image
sudo modprobe ip_vs
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Run tests") {
steps {
sh '''#!/bin/bash
# bash is needed so 'jobs -p' works properly
# it also accepts setting inline envvars for functions without explicitly exporting
set -x
run_tests() {
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
docker run $rm -t --privileged \
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name "$CONTAINER_NAME" \
-e KEEPBUNDLE=1 \
-e TESTDEBUG \
-e TESTFLAGS \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
"$1" \
test-integration
}
trap "exit" INT TERM
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
CONTAINER_NAME=docker-pr$BUILD_NUMBER
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name ${CONTAINER_NAME}-build \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary
# flaky + integration
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
# integration-cli first set
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
# integration-cli second set
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
c=0
for job in $(jobs -p); do
wait ${job} || c=$?
done
exit $c
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
[ -n "$cids" ] && docker rm -vf $cids || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('rootless') {
when {
beforeAgent true
expression { params.rootless }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_ROOTLESS = '1'
TEST_SKIP_INTEGRATION_CLI = '1'
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_ROOTLESS \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-rootless
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('cgroup2') {
when {
beforeAgent true
expression { params.cgroup2 }
}
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_SYSTEMD \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-cgroup2
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.s390x }
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.ppc64le }
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS1') {
when {
beforeAgent true
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
anyOf {
not { changeRequest() }
expression { params.windowsRS1 }
}
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS1-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS5') {
when {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS5-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022') {
when {
beforeAgent true
expression { params.windows2022 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022-c8d') {
when {
beforeAgent true
expression { params.windows2022containerd }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
// Available tags can be found at https://mcr.microsoft.com/v2/windows/servercore/insider/tags/list
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
DOCKER_WINDOWS_CONTAINERD_RUNTIME = '1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-c8d-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}
| thaJeztah | 050929ab83c85812ad289ddc5305a857a510561f | 665de2e973b8061930e50426bb469228e3c66963 | @thaJeztah so can you update this one to use `10.0.20295.1` now and update both of those to latest on #42527 when new version is available? | olljanat | 4,699 |
moby/moby | 42,528 | Jenkinsfile: add stage for Windows 2022 on containerd | follow-up to https://github.com/moby/moby/pull/42527
relates to https://github.com/moby/moby/pull/41479 | null | 2021-06-16 08:36:05+00:00 | 2021-06-28 07:35:03+00:00 | Jenkinsfile | #!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windows2022', defaultValue: true, description: 'Windows 2022 (SAC) Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '2b0755b936416834e14208c6c37b36977e67ea35'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
when {
beforeAgent true
expression { params.unit_validate }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
environment {
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
// even if no files were changed. This allows catching problems caused by pull-requests
// that were merged out-of-sequence.
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
}
}
stage("Validate") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/default
'''
}
}
stage("Docker-py") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-docker-py
'''
}
post {
always {
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
stage("Static") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh binary
'''
}
}
stage("Cross") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh cross
'''
}
}
// needs to be last stage that calls make.sh for the junit report to work
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Validate vendor") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/vendor
'''
}
}
}
post {
always {
sh '''
echo 'Ensuring container killed.'
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('amd64') {
when {
beforeAgent true
expression { params.amd64 }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
# todo: include ip_vs in base image
sudo modprobe ip_vs
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Run tests") {
steps {
sh '''#!/bin/bash
# bash is needed so 'jobs -p' works properly
# it also accepts setting inline envvars for functions without explicitly exporting
set -x
run_tests() {
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
docker run $rm -t --privileged \
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name "$CONTAINER_NAME" \
-e KEEPBUNDLE=1 \
-e TESTDEBUG \
-e TESTFLAGS \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
"$1" \
test-integration
}
trap "exit" INT TERM
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
CONTAINER_NAME=docker-pr$BUILD_NUMBER
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name ${CONTAINER_NAME}-build \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary
# flaky + integration
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
# integration-cli first set
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
# integration-cli second set
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
c=0
for job in $(jobs -p); do
wait ${job} || c=$?
done
exit $c
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
[ -n "$cids" ] && docker rm -vf $cids || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('rootless') {
when {
beforeAgent true
expression { params.rootless }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_ROOTLESS = '1'
TEST_SKIP_INTEGRATION_CLI = '1'
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_ROOTLESS \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-rootless
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('cgroup2') {
when {
beforeAgent true
expression { params.cgroup2 }
}
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_SYSTEMD \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-cgroup2
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.s390x }
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.ppc64le }
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS1') {
when {
beforeAgent true
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
anyOf {
not { changeRequest() }
expression { params.windowsRS1 }
}
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS1-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS5') {
when {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS5-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022') {
when {
beforeAgent true
expression { params.windows2022 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}
| #!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windows2022', defaultValue: true, description: 'Windows 2022 (SAC) Build/Test')
booleanParam(name: 'windows2022containerd', defaultValue: true, description: 'Windows 2022 (SAC) with containerd Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '2b0755b936416834e14208c6c37b36977e67ea35'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
when {
beforeAgent true
expression { params.unit_validate }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
environment {
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
// even if no files were changed. This allows catching problems caused by pull-requests
// that were merged out-of-sequence.
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
}
}
stage("Validate") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/default
'''
}
}
stage("Docker-py") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-docker-py
'''
}
post {
always {
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
stage("Static") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh binary
'''
}
}
stage("Cross") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh cross
'''
}
}
// needs to be last stage that calls make.sh for the junit report to work
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Validate vendor") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/vendor
'''
}
}
}
post {
always {
sh '''
echo 'Ensuring container killed.'
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('amd64') {
when {
beforeAgent true
expression { params.amd64 }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
# todo: include ip_vs in base image
sudo modprobe ip_vs
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Run tests") {
steps {
sh '''#!/bin/bash
# bash is needed so 'jobs -p' works properly
# it also accepts setting inline envvars for functions without explicitly exporting
set -x
run_tests() {
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
docker run $rm -t --privileged \
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name "$CONTAINER_NAME" \
-e KEEPBUNDLE=1 \
-e TESTDEBUG \
-e TESTFLAGS \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
"$1" \
test-integration
}
trap "exit" INT TERM
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
CONTAINER_NAME=docker-pr$BUILD_NUMBER
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name ${CONTAINER_NAME}-build \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary
# flaky + integration
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
# integration-cli first set
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
# integration-cli second set
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
c=0
for job in $(jobs -p); do
wait ${job} || c=$?
done
exit $c
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
[ -n "$cids" ] && docker rm -vf $cids || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('rootless') {
when {
beforeAgent true
expression { params.rootless }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_ROOTLESS = '1'
TEST_SKIP_INTEGRATION_CLI = '1'
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_ROOTLESS \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-rootless
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('cgroup2') {
when {
beforeAgent true
expression { params.cgroup2 }
}
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_SYSTEMD \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-cgroup2
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.s390x }
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.ppc64le }
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS1') {
when {
beforeAgent true
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
anyOf {
not { changeRequest() }
expression { params.windowsRS1 }
}
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS1-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS5') {
when {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS5-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022') {
when {
beforeAgent true
expression { params.windows2022 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022-c8d') {
when {
beforeAgent true
expression { params.windows2022containerd }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
// Available tags can be found at https://mcr.microsoft.com/v2/windows/servercore/insider/tags/list
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
DOCKER_WINDOWS_CONTAINERD_RUNTIME = '1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-c8d-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}
| thaJeztah | 050929ab83c85812ad289ddc5305a857a510561f | 665de2e973b8061930e50426bb469228e3c66963 | Process isolation build IDs must _match_. If Host Build > Image Build, then Hyper-V isolation is the only option. And there's no base images for 20348 published yet to match the Windows Server 2022 Preview release on the Evaluation Center, so the latest Windows Server host we can use for process isolation is 20344 from the Windows Server Insiders Preview.
Edit: Sorry, some of this was already known, I didn't notice the fold in the comment history until after I wrote the comment.
tl;dr: 20344 is the latest Insider build available as ISO or VHDX. 20348 is the RTM build, currently only on the [Evaluation Center](https://www.microsoft.com/en-us/evalcenter/evaluate-windows-server-2022-preview), but that includes an Azure option as well as ISO. | TBBle | 4,700 |
moby/moby | 42,528 | Jenkinsfile: add stage for Windows 2022 on containerd | follow-up to https://github.com/moby/moby/pull/42527
relates to https://github.com/moby/moby/pull/41479 | null | 2021-06-16 08:36:05+00:00 | 2021-06-28 07:35:03+00:00 | Jenkinsfile | #!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windows2022', defaultValue: true, description: 'Windows 2022 (SAC) Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '2b0755b936416834e14208c6c37b36977e67ea35'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
when {
beforeAgent true
expression { params.unit_validate }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
environment {
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
// even if no files were changed. This allows catching problems caused by pull-requests
// that were merged out-of-sequence.
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
}
}
stage("Validate") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/default
'''
}
}
stage("Docker-py") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-docker-py
'''
}
post {
always {
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
stage("Static") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh binary
'''
}
}
stage("Cross") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh cross
'''
}
}
// needs to be last stage that calls make.sh for the junit report to work
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Validate vendor") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/vendor
'''
}
}
}
post {
always {
sh '''
echo 'Ensuring container killed.'
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('amd64') {
when {
beforeAgent true
expression { params.amd64 }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
# todo: include ip_vs in base image
sudo modprobe ip_vs
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Run tests") {
steps {
sh '''#!/bin/bash
# bash is needed so 'jobs -p' works properly
# it also accepts setting inline envvars for functions without explicitly exporting
set -x
run_tests() {
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
docker run $rm -t --privileged \
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name "$CONTAINER_NAME" \
-e KEEPBUNDLE=1 \
-e TESTDEBUG \
-e TESTFLAGS \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
"$1" \
test-integration
}
trap "exit" INT TERM
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
CONTAINER_NAME=docker-pr$BUILD_NUMBER
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name ${CONTAINER_NAME}-build \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary
# flaky + integration
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
# integration-cli first set
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
# integration-cli second set
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
c=0
for job in $(jobs -p); do
wait ${job} || c=$?
done
exit $c
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
[ -n "$cids" ] && docker rm -vf $cids || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('rootless') {
when {
beforeAgent true
expression { params.rootless }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_ROOTLESS = '1'
TEST_SKIP_INTEGRATION_CLI = '1'
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_ROOTLESS \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-rootless
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('cgroup2') {
when {
beforeAgent true
expression { params.cgroup2 }
}
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_SYSTEMD \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-cgroup2
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.s390x }
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.ppc64le }
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS1') {
when {
beforeAgent true
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
anyOf {
not { changeRequest() }
expression { params.windowsRS1 }
}
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS1-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS5') {
when {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS5-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022') {
when {
beforeAgent true
expression { params.windows2022 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}
| #!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windows2022', defaultValue: true, description: 'Windows 2022 (SAC) Build/Test')
booleanParam(name: 'windows2022containerd', defaultValue: true, description: 'Windows 2022 (SAC) with containerd Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '2b0755b936416834e14208c6c37b36977e67ea35'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
when {
beforeAgent true
expression { params.unit_validate }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
environment {
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
// even if no files were changed. This allows catching problems caused by pull-requests
// that were merged out-of-sequence.
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
}
}
stage("Validate") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/default
'''
}
}
stage("Docker-py") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-docker-py
'''
}
post {
always {
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
stage("Static") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh binary
'''
}
}
stage("Cross") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh cross
'''
}
}
// needs to be last stage that calls make.sh for the junit report to work
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Validate vendor") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/vendor
'''
}
}
}
post {
always {
sh '''
echo 'Ensuring container killed.'
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report.xml bundles/go-test-report.json bundles/profile.out
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('amd64') {
when {
beforeAgent true
expression { params.amd64 }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
# todo: include ip_vs in base image
sudo modprobe ip_vs
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Run tests") {
steps {
sh '''#!/bin/bash
# bash is needed so 'jobs -p' works properly
# it also accepts setting inline envvars for functions without explicitly exporting
set -x
run_tests() {
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
docker run $rm -t --privileged \
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name "$CONTAINER_NAME" \
-e KEEPBUNDLE=1 \
-e TESTDEBUG \
-e TESTFLAGS \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
"$1" \
test-integration
}
trap "exit" INT TERM
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
CONTAINER_NAME=docker-pr$BUILD_NUMBER
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name ${CONTAINER_NAME}-build \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary
# flaky + integration
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
# integration-cli first set
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
# integration-cli second set
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
c=0
for job in $(jobs -p); do
wait ${job} || c=$?
done
exit $c
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
[ -n "$cids" ] && docker rm -vf $cids || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('rootless') {
when {
beforeAgent true
expression { params.rootless }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_ROOTLESS = '1'
TEST_SKIP_INTEGRATION_CLI = '1'
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_ROOTLESS \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-rootless
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('cgroup2') {
when {
beforeAgent true
expression { params.cgroup2 }
}
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_SYSTEMD \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-cgroup2
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.s390x }
}
agent { label 's390x-ubuntu-1804' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.ppc64le }
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS1') {
when {
beforeAgent true
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
anyOf {
not { changeRequest() }
expression { params.windowsRS1 }
}
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS1-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS5') {
when {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS5-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022') {
when {
beforeAgent true
expression { params.windows2022 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022-c8d') {
when {
beforeAgent true
expression { params.windows2022containerd }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
// Available tags can be found at https://mcr.microsoft.com/v2/windows/servercore/insider/tags/list
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
DOCKER_WINDOWS_CONTAINERD_RUNTIME = '1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-c8d-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}
| thaJeztah | 050929ab83c85812ad289ddc5305a857a510561f | 665de2e973b8061930e50426bb469228e3c66963 | I updated this PR to only add a new stage with the `DOCKER_WINDOWS_CONTAINERD_RUNTIME` env variable set, and we can use https://github.com/moby/moby/pull/42527 to update to more recent versions once things become available. | thaJeztah | 4,701 |
moby/moby | 42,527 | Jenkinsfile: update Windows 2022 insider to latest tag (10.0.20348.1) | Tags can be found at https://mcr.microsoft.com/v2/windows/servercore/insider/tags/list
| null | 2021-06-16 08:31:54+00:00 | 2021-08-10 08:49:04+00:00 | Jenkinsfile | #!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windows2022', defaultValue: true, description: 'Windows 2022 (SAC) Build/Test')
booleanParam(name: 'windows2022containerd', defaultValue: true, description: 'Windows 2022 (SAC) with containerd Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '2b0755b936416834e14208c6c37b36977e67ea35'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
when {
beforeAgent true
expression { params.unit_validate }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
environment {
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
// even if no files were changed. This allows catching problems caused by pull-requests
// that were merged out-of-sequence.
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
}
}
stage("Validate") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/default
'''
}
}
stage("Docker-py") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-docker-py
'''
}
post {
always {
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
stage("Static") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh binary
'''
}
}
stage("Cross") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh cross
'''
}
}
// needs to be last stage that calls make.sh for the junit report to work
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Validate vendor") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/vendor
'''
}
}
}
post {
always {
sh '''
echo 'Ensuring container killed.'
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report*.xml bundles/go-test-report*.json bundles/profile*.out
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('amd64') {
when {
beforeAgent true
expression { params.amd64 }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
# todo: include ip_vs in base image
sudo modprobe ip_vs
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Run tests") {
steps {
sh '''#!/bin/bash
# bash is needed so 'jobs -p' works properly
# it also accepts setting inline envvars for functions without explicitly exporting
set -x
run_tests() {
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
docker run $rm -t --privileged \
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name "$CONTAINER_NAME" \
-e KEEPBUNDLE=1 \
-e TESTDEBUG \
-e TESTFLAGS \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
"$1" \
test-integration
}
trap "exit" INT TERM
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
CONTAINER_NAME=docker-pr$BUILD_NUMBER
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name ${CONTAINER_NAME}-build \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary
# flaky + integration
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
# integration-cli first set
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
# integration-cli second set
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
c=0
for job in $(jobs -p); do
wait ${job} || c=$?
done
exit $c
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
[ -n "$cids" ] && docker rm -vf $cids || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('rootless') {
when {
beforeAgent true
expression { params.rootless }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_ROOTLESS = '1'
TEST_SKIP_INTEGRATION_CLI = '1'
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_ROOTLESS \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-rootless
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('cgroup2') {
when {
beforeAgent true
expression { params.cgroup2 }
}
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_SYSTEMD \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-cgroup2
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-2004' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.s390x }
}
agent { label 's390x-ubuntu-2004' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.ppc64le }
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS1') {
when {
beforeAgent true
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
anyOf {
not { changeRequest() }
expression { params.windowsRS1 }
}
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS1-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS5') {
when {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS5-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022') {
when {
beforeAgent true
expression { params.windows2022 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022-c8d') {
when {
beforeAgent true
expression { params.windows2022containerd }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
// Available tags can be found at https://mcr.microsoft.com/v2/windows/servercore/insider/tags/list
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
DOCKER_WINDOWS_CONTAINERD_RUNTIME = '1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-c8d-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}
| #!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windows2022', defaultValue: true, description: 'Windows 2022 (SAC) Build/Test')
booleanParam(name: 'windows2022containerd', defaultValue: true, description: 'Windows 2022 (SAC) with containerd Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '2b0755b936416834e14208c6c37b36977e67ea35'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
when {
beforeAgent true
expression { params.unit_validate }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
environment {
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
// even if no files were changed. This allows catching problems caused by pull-requests
// that were merged out-of-sequence.
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
}
}
stage("Validate") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/default
'''
}
}
stage("Docker-py") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-docker-py
'''
}
post {
always {
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
stage("Static") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh binary
'''
}
}
stage("Cross") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh cross
'''
}
}
// needs to be last stage that calls make.sh for the junit report to work
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Validate vendor") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/vendor
'''
}
}
}
post {
always {
sh '''
echo 'Ensuring container killed.'
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report*.xml bundles/go-test-report*.json bundles/profile*.out
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('amd64') {
when {
beforeAgent true
expression { params.amd64 }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
# todo: include ip_vs in base image
sudo modprobe ip_vs
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Run tests") {
steps {
sh '''#!/bin/bash
# bash is needed so 'jobs -p' works properly
# it also accepts setting inline envvars for functions without explicitly exporting
set -x
run_tests() {
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
docker run $rm -t --privileged \
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name "$CONTAINER_NAME" \
-e KEEPBUNDLE=1 \
-e TESTDEBUG \
-e TESTFLAGS \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
"$1" \
test-integration
}
trap "exit" INT TERM
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
CONTAINER_NAME=docker-pr$BUILD_NUMBER
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name ${CONTAINER_NAME}-build \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary
# flaky + integration
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
# integration-cli first set
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
# integration-cli second set
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
c=0
for job in $(jobs -p); do
wait ${job} || c=$?
done
exit $c
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
[ -n "$cids" ] && docker rm -vf $cids || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('rootless') {
when {
beforeAgent true
expression { params.rootless }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_ROOTLESS = '1'
TEST_SKIP_INTEGRATION_CLI = '1'
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_ROOTLESS \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-rootless
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('cgroup2') {
when {
beforeAgent true
expression { params.cgroup2 }
}
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_SYSTEMD \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-cgroup2
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-2004' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.s390x }
}
agent { label 's390x-ubuntu-2004' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.ppc64le }
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS1') {
when {
beforeAgent true
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
anyOf {
not { changeRequest() }
expression { params.windowsRS1 }
}
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS1-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS5') {
when {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS5-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022') {
when {
beforeAgent true
expression { params.windows2022 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
// Available tags can be found at https://mcr.microsoft.com/v2/windows/servercore/insider/tags/list
WINDOWS_BASE_IMAGE_TAG = '10.0.20348.1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022-c8d') {
when {
beforeAgent true
expression { params.windows2022containerd }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
// Available tags can be found at https://mcr.microsoft.com/v2/windows/servercore/insider/tags/list
WINDOWS_BASE_IMAGE_TAG = '10.0.20348.1'
DOCKER_WINDOWS_CONTAINERD_RUNTIME = '1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-c8d-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}
| thaJeztah | f91b0d39ed7cb8699be9a3343d3c663ba06bc831 | 919f2ef7641d2139211aafe12abbbf1f81689c01 | ```suggestion
WINDOWS_BASE_IMAGE_TAG = '10.0.20348.1'
``` | StefanScherer | 4,702 |
moby/moby | 42,527 | Jenkinsfile: update Windows 2022 insider to latest tag (10.0.20348.1) | Tags can be found at https://mcr.microsoft.com/v2/windows/servercore/insider/tags/list
| null | 2021-06-16 08:31:54+00:00 | 2021-08-10 08:49:04+00:00 | Jenkinsfile | #!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windows2022', defaultValue: true, description: 'Windows 2022 (SAC) Build/Test')
booleanParam(name: 'windows2022containerd', defaultValue: true, description: 'Windows 2022 (SAC) with containerd Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '2b0755b936416834e14208c6c37b36977e67ea35'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
when {
beforeAgent true
expression { params.unit_validate }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
environment {
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
// even if no files were changed. This allows catching problems caused by pull-requests
// that were merged out-of-sequence.
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
}
}
stage("Validate") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/default
'''
}
}
stage("Docker-py") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-docker-py
'''
}
post {
always {
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
stage("Static") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh binary
'''
}
}
stage("Cross") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh cross
'''
}
}
// needs to be last stage that calls make.sh for the junit report to work
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Validate vendor") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/vendor
'''
}
}
}
post {
always {
sh '''
echo 'Ensuring container killed.'
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report*.xml bundles/go-test-report*.json bundles/profile*.out
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('amd64') {
when {
beforeAgent true
expression { params.amd64 }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
# todo: include ip_vs in base image
sudo modprobe ip_vs
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Run tests") {
steps {
sh '''#!/bin/bash
# bash is needed so 'jobs -p' works properly
# it also accepts setting inline envvars for functions without explicitly exporting
set -x
run_tests() {
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
docker run $rm -t --privileged \
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name "$CONTAINER_NAME" \
-e KEEPBUNDLE=1 \
-e TESTDEBUG \
-e TESTFLAGS \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
"$1" \
test-integration
}
trap "exit" INT TERM
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
CONTAINER_NAME=docker-pr$BUILD_NUMBER
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name ${CONTAINER_NAME}-build \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary
# flaky + integration
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
# integration-cli first set
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
# integration-cli second set
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
c=0
for job in $(jobs -p); do
wait ${job} || c=$?
done
exit $c
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
[ -n "$cids" ] && docker rm -vf $cids || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('rootless') {
when {
beforeAgent true
expression { params.rootless }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_ROOTLESS = '1'
TEST_SKIP_INTEGRATION_CLI = '1'
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_ROOTLESS \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-rootless
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('cgroup2') {
when {
beforeAgent true
expression { params.cgroup2 }
}
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_SYSTEMD \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-cgroup2
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-2004' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.s390x }
}
agent { label 's390x-ubuntu-2004' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.ppc64le }
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS1') {
when {
beforeAgent true
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
anyOf {
not { changeRequest() }
expression { params.windowsRS1 }
}
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS1-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS5') {
when {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS5-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022') {
when {
beforeAgent true
expression { params.windows2022 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022-c8d') {
when {
beforeAgent true
expression { params.windows2022containerd }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
// Available tags can be found at https://mcr.microsoft.com/v2/windows/servercore/insider/tags/list
WINDOWS_BASE_IMAGE_TAG = '10.0.20295.1'
DOCKER_WINDOWS_CONTAINERD_RUNTIME = '1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-c8d-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}
| #!groovy
pipeline {
agent none
options {
buildDiscarder(logRotator(daysToKeepStr: '30'))
timeout(time: 2, unit: 'HOURS')
timestamps()
}
parameters {
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
booleanParam(name: 'windowsRS1', defaultValue: false, description: 'Windows 2016 (RS1) Build/Test')
booleanParam(name: 'windowsRS5', defaultValue: true, description: 'Windows 2019 (RS5) Build/Test')
booleanParam(name: 'windows2022', defaultValue: true, description: 'Windows 2022 (SAC) Build/Test')
booleanParam(name: 'windows2022containerd', defaultValue: true, description: 'Windows 2022 (SAC) with containerd Build/Test')
booleanParam(name: 'dco', defaultValue: true, description: 'Run the DCO check')
}
environment {
DOCKER_BUILDKIT = '1'
DOCKER_EXPERIMENTAL = '1'
DOCKER_GRAPHDRIVER = 'overlay2'
APT_MIRROR = 'cdn-fastly.deb.debian.org'
CHECK_CONFIG_COMMIT = '2b0755b936416834e14208c6c37b36977e67ea35'
TESTDEBUG = '0'
TIMEOUT = '120m'
}
stages {
stage('pr-hack') {
when { changeRequest() }
steps {
script {
echo "Workaround for PR auto-cancel feature. Borrowed from https://issues.jenkins-ci.org/browse/JENKINS-43353"
def buildNumber = env.BUILD_NUMBER as int
if (buildNumber > 1) milestone(buildNumber - 1)
milestone(buildNumber)
}
}
}
stage('DCO-check') {
when {
beforeAgent true
expression { params.dco }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
steps {
sh '''
docker run --rm \
-v "$WORKSPACE:/workspace" \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
alpine sh -c 'apk add --no-cache -q bash git openssh-client && cd /workspace && hack/validate/dco'
'''
}
}
stage('Build') {
parallel {
stage('unit-validate') {
when {
beforeAgent true
expression { params.unit_validate }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
environment {
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
// even if no files were changed. This allows catching problems caused by pull-requests
// that were merged out-of-sequence.
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
}
}
stage("Validate") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/default
'''
}
}
stage("Docker-py") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-docker-py
'''
}
post {
always {
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=docker-py
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
}
}
stage("Static") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh binary
'''
}
}
stage("Cross") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh cross
'''
}
}
// needs to be last stage that calls make.sh for the junit report to work
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Validate vendor") {
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_FORCE_VALIDATE \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/validate/vendor
'''
}
}
}
post {
always {
sh '''
echo 'Ensuring container killed.'
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo 'Chowning /workspace to jenkins user'
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=unit
echo "Creating ${bundleName}-bundles.tar.gz"
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report*.xml bundles/go-test-report*.json bundles/profile*.out
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('amd64') {
when {
beforeAgent true
expression { params.amd64 }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
# todo: include ip_vs in base image
sudo modprobe ip_vs
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Run tests") {
steps {
sh '''#!/bin/bash
# bash is needed so 'jobs -p' works properly
# it also accepts setting inline envvars for functions without explicitly exporting
set -x
run_tests() {
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
docker run $rm -t --privileged \
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name "$CONTAINER_NAME" \
-e KEEPBUNDLE=1 \
-e TESTDEBUG \
-e TESTFLAGS \
-e TEST_SKIP_INTEGRATION \
-e TEST_SKIP_INTEGRATION_CLI \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
"$1" \
test-integration
}
trap "exit" INT TERM
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
CONTAINER_NAME=docker-pr$BUILD_NUMBER
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
--name ${CONTAINER_NAME}-build \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary
# flaky + integration
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
# integration-cli first set
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
# integration-cli second set
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
c=0
for job in $(jobs -p); do
wait ${job} || c=$?
done
exit $c
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
[ -n "$cids" ] && docker rm -vf $cids || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('rootless') {
when {
beforeAgent true
expression { params.rootless }
}
agent { label 'amd64 && ubuntu-1804 && overlay2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_ROOTLESS = '1'
TEST_SKIP_INTEGRATION_CLI = '1'
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_ROOTLESS \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-rootless
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('cgroup2') {
when {
beforeAgent true
expression { params.cgroup2 }
}
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration tests") {
environment {
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
}
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_SYSTEMD \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=amd64-cgroup2
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.s390x }
}
}
agent { label 's390x-ubuntu-2004' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('s390x integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.s390x }
}
agent { label 's390x-ubuntu-2004' }
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=s390x-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le') {
when {
beforeAgent true
// Skip this stage on PRs unless the checkbox is selected
anyOf {
not { changeRequest() }
expression { params.ppc64le }
}
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('ppc64le integration-cli') {
when {
beforeAgent true
not { changeRequest() }
expression { params.ppc64le }
}
agent { label 'ppc64le-ubuntu-1604' }
// ppc64le machines run on Docker 18.06, and buildkit has some
// bugs on that version. Build and use buildx instead.
environment {
USE_BUILDX = '1'
DOCKER_BUILDKIT = '0'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh '''
make bundles/buildx
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
'''
}
}
stage("Integration-cli tests") {
environment { TEST_SKIP_INTEGRATION = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TEST_SKIP_INTEGRATION \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=ppc64le-integration-cli
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('arm64') {
when {
beforeAgent true
expression { params.arm64 }
}
agent { label 'arm64 && ubuntu-2004' }
environment {
TEST_SKIP_INTEGRATION_CLI = '1'
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
sh '''
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
&& bash ${WORKSPACE}/check-config.sh || true
'''
}
}
stage("Build dev image") {
steps {
sh 'docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .'
}
}
stage("Unit tests") {
steps {
sh '''
sudo modprobe ip6table_filter
'''
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/test/unit
'''
}
post {
always {
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
}
}
}
stage("Integration tests") {
environment { TEST_SKIP_INTEGRATION_CLI = '1' }
steps {
sh '''
docker run --rm -t --privileged \
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
--name docker-pr$BUILD_NUMBER \
-e DOCKER_EXPERIMENTAL \
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
-e DOCKER_GRAPHDRIVER \
-e TESTDEBUG \
-e TEST_SKIP_INTEGRATION_CLI \
-e TIMEOUT \
-e VALIDATE_REPO=${GIT_URL} \
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
docker:${GIT_COMMIT} \
hack/make.sh \
dynbinary \
test-integration
'''
}
post {
always {
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
}
}
}
}
post {
always {
sh '''
echo "Ensuring container killed."
docker rm -vf docker-pr$BUILD_NUMBER || true
'''
sh '''
echo "Chowning /workspace to jenkins user"
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
'''
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
sh '''
bundleName=arm64-integration
echo "Creating ${bundleName}-bundles.tar.gz"
# exclude overlay2 directories
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
'''
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS1') {
when {
beforeAgent true
// Skip this stage on PRs unless the windowsRS1 checkbox is selected
anyOf {
not { changeRequest() }
expression { params.windowsRS1 }
}
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2016'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2016'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS1-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-RS5') {
when {
beforeAgent true
expression { params.windowsRS5 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore'
WINDOWS_BASE_IMAGE_TAG = 'ltsc2019'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2019'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
powershell '''
cd $env:WORKSPACE
$bundleName="windowsRS5-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022') {
when {
beforeAgent true
expression { params.windows2022 }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
// Available tags can be found at https://mcr.microsoft.com/v2/windows/servercore/insider/tags/list
WINDOWS_BASE_IMAGE_TAG = '10.0.20348.1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
stage('win-2022-c8d') {
when {
beforeAgent true
expression { params.windows2022containerd }
}
environment {
DOCKER_BUILDKIT = '0'
DOCKER_DUT_DEBUG = '1'
SKIP_VALIDATION_TESTS = '1'
SOURCES_DRIVE = 'd'
SOURCES_SUBDIR = 'gopath'
TESTRUN_DRIVE = 'd'
TESTRUN_SUBDIR = "CI"
// TODO switch to mcr.microsoft.com/windows/servercore:2022 once published
WINDOWS_BASE_IMAGE = 'mcr.microsoft.com/windows/servercore/insider'
// Available tags can be found at https://mcr.microsoft.com/v2/windows/servercore/insider/tags/list
WINDOWS_BASE_IMAGE_TAG = '10.0.20348.1'
DOCKER_WINDOWS_CONTAINERD_RUNTIME = '1'
}
agent {
node {
customWorkspace 'd:\\gopath\\src\\github.com\\docker\\docker'
label 'windows-2022'
}
}
stages {
stage("Print info") {
steps {
sh 'docker version'
sh 'docker info'
}
}
stage("Run tests") {
steps {
powershell '''
$ErrorActionPreference = 'Stop'
Invoke-WebRequest https://github.com/moby/docker-ci-zap/blob/master/docker-ci-zap.exe?raw=true -OutFile C:/Windows/System32/docker-ci-zap.exe
./hack/ci/windows.ps1
exit $LastExitCode
'''
}
}
}
post {
always {
junit testResults: 'bundles/junit-report-*.xml', allowEmptyResults: true
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.zip') {
powershell '''
cd $env:WORKSPACE
$bundleName="win-2022-c8d-integration"
Write-Host -ForegroundColor Green "Creating ${bundleName}-bundles.zip"
# archiveArtifacts does not support env-vars to , so save the artifacts in a fixed location
Compress-Archive -Path "bundles/CIDUT.out", "bundles/CIDUT.err", "bundles/junit-report-*.xml" -CompressionLevel Optimal -DestinationPath "${bundleName}-bundles.zip"
'''
archiveArtifacts artifacts: '*-bundles.zip', allowEmptyArchive: true
}
}
cleanup {
sh 'make clean'
deleteDir()
}
}
}
}
}
}
}
| thaJeztah | f91b0d39ed7cb8699be9a3343d3c663ba06bc831 | 919f2ef7641d2139211aafe12abbbf1f81689c01 | ```suggestion
WINDOWS_BASE_IMAGE_TAG = '10.0.20348.1'
``` | StefanScherer | 4,703 |
moby/moby | 42,524 | Crosscompilation for s390x and ppc64le | **- What I did**
Update build environment to provide cross compilation for linux/s390x and linux/ppc64le
**- How I did it**
Modify runtime-dev-cross-false and runtime-dev-cross-true docker environments.
Use bullseye to provide libseccomp-dev as s390x and ppc64le in buster-backports are conflicting with other archs.
**- How to verify it**
```
$ make DOCKER_CROSSPLATFORMS="linux/s390x linux/ppc64le" cross
$ ls -l bundles/cross/linux/{s390x,ppc64le}-daemon
$ file bundles/cross/linux/{s390x,ppc64le}-daemon/dockerd-dev
```
SO FAR THE RESULTING BINARIES ARE UNTESTED
**- Description for the changelog**
Add cross compilation for s390x and ppc64le
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-06-15 10:25:29+00:00 | 2021-06-24 19:23:28+00:00 | Dockerfile | # syntax=docker/dockerfile:1.2
ARG CROSS="false"
ARG SYSTEMD="false"
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
ARG GO_VERSION=1.16.5
ARG DEBIAN_FRONTEND=noninteractive
ARG VPNKIT_VERSION=0.5.0
ARG DOCKER_BUILDTAGS="apparmor seccomp"
ARG BASE_DEBIAN_DISTRO="buster"
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
FROM ${GOLANG_IMAGE} AS base
RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
ARG APT_MIRROR
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
ENV GO111MODULE=off
FROM base AS criu
ARG DEBIAN_FRONTEND
ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc
# FIXME: temporarily doing a manual chmod as workaround for https://github.com/moby/buildkit/issues/2114
RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \
chmod 0644 /etc/apt/trusted.gpg.d/criu.gpg.asc \
&& echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends criu \
&& install -D /usr/sbin/criu /build/criu
FROM base AS registry
WORKDIR /go/src/github.com/docker/distribution
# Install two versions of the registry. The first one is a recent version that
# supports both schema 1 and 2 manifests. The second one is an older version that
# only supports schema1 manifests. This allows integration-cli tests to cover
# push/pull with both schema1 and schema2 manifests.
# The old version of the registry is not working on arm64, so installation is
# skipped on that architecture.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ \
set -x \
&& git clone https://github.com/docker/distribution.git . \
&& git checkout -q "$REGISTRY_COMMIT" \
&& GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
&& case $(dpkg --print-architecture) in \
amd64|armhf|ppc64*|s390x) \
git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \
GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
;; \
esac
FROM base AS swagger
WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger
# Install go-swagger for validating swagger.yaml
# This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ \
set -x \
&& git clone https://github.com/kolyshkin/go-swagger.git . \
&& git checkout -q "$GO_SWAGGER_COMMIT" \
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger
FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
curl \
jq
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /
ARG TARGETARCH
RUN /download-frozen-image-v2.sh /build \
buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
# See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list)
FROM base AS cross-false
FROM --platform=linux/amd64 base AS cross-true
ARG DEBIAN_FRONTEND
RUN dpkg --add-architecture arm64
RUN dpkg --add-architecture armel
RUN dpkg --add-architecture armhf
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
crossbuild-essential-arm64 \
crossbuild-essential-armel \
crossbuild-essential-armhf
FROM cross-${CROSS} as dev-base
FROM dev-base AS runtime-dev-cross-false
ARG DEBIAN_FRONTEND
RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list
RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
binutils-mingw-w64 \
g++-mingw-w64-x86-64 \
libapparmor-dev \
libbtrfs-dev \
libdevmapper-dev \
libseccomp-dev/buster-backports \
libsystemd-dev \
libudev-dev
FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true
ARG DEBIAN_FRONTEND
# These crossbuild packages rely on gcc-<arch>, but this doesn't want to install
# on non-amd64 systems.
# Additionally, the crossbuild-amd64 is currently only on debian:buster, so
# other architectures cannnot crossbuild amd64.
RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
libapparmor-dev:arm64 \
libapparmor-dev:armel \
libapparmor-dev:armhf \
libseccomp-dev:arm64/buster-backports \
libseccomp-dev:armel/buster-backports \
libseccomp-dev:armhf/buster-backports
FROM runtime-dev-cross-${CROSS} AS runtime-dev
FROM base AS tomll
ARG GOTOML_VERSION
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install/tomll.installer,target=/tmp/install/tomll.installer \
. /tmp/install/tomll.installer && PREFIX=/build install_tomll
FROM base AS vndr
ARG VNDR_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh vndr
FROM dev-base AS containerd
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
libbtrfs-dev
ARG CONTAINERD_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh containerd
FROM base AS golangci_lint
ARG GOLANGCI_LINT_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh golangci_lint
FROM base AS gotestsum
ARG GOTESTSUM_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh gotestsum
FROM base AS shfmt
ARG SHFMT_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh shfmt
FROM dev-base AS dockercli
ARG DOCKERCLI_CHANNEL
ARG DOCKERCLI_VERSION
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh dockercli
FROM runtime-dev AS runc
ARG RUNC_COMMIT
ARG RUNC_BUILDTAGS
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh runc
FROM dev-base AS tini
ARG DEBIAN_FRONTEND
ARG TINI_COMMIT
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
cmake \
vim-common
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh tini
FROM dev-base AS rootlesskit
ARG ROOTLESSKIT_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh rootlesskit
COPY ./contrib/dockerd-rootless.sh /build
COPY ./contrib/dockerd-rootless-setuptool.sh /build
FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64
FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64
FROM scratch AS vpnkit
COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64
COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64
# TODO: Some of this is only really needed for testing, it would be nice to split this up
FROM runtime-dev AS dev-systemd-false
ARG DEBIAN_FRONTEND
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser \
&& mkdir -p /home/unprivilegeduser/.local/share/docker \
&& chown -R unprivilegeduser /home/unprivilegeduser
# Let us use a .bashrc file
RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
RUN ldconfig
# This should only install packages that are specifically needed for the dev environment and nothing else
# Do you really need to add another package here? Can it be done in a different build stage?
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
apparmor \
aufs-tools \
bash-completion \
bzip2 \
iptables \
jq \
libcap2-bin \
libnet1 \
libnl-3-200 \
libprotobuf-c1 \
net-tools \
patch \
pigz \
python3-pip \
python3-setuptools \
python3-wheel \
sudo \
thin-provisioning-tools \
uidmap \
vim \
vim-common \
xfsprogs \
xz-utils \
zip
# Switch to use iptables instead of nftables (to match the CI hosts)
# TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824)
RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
RUN pip3 install yamllint==1.26.1
COPY --from=dockercli /build/ /usr/local/cli
COPY --from=frozen-images /build/ /docker-frozen-images
COPY --from=swagger /build/ /usr/local/bin/
COPY --from=tomll /build/ /usr/local/bin/
COPY --from=tini /build/ /usr/local/bin/
COPY --from=registry /build/ /usr/local/bin/
COPY --from=criu /build/ /usr/local/bin/
COPY --from=vndr /build/ /usr/local/bin/
COPY --from=gotestsum /build/ /usr/local/bin/
COPY --from=golangci_lint /build/ /usr/local/bin/
COPY --from=shfmt /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=rootlesskit /build/ /usr/local/bin/
COPY --from=vpnkit /build/ /usr/local/bin/
ENV PATH=/usr/local/cli:$PATH
ARG DOCKER_BUILDTAGS
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
WORKDIR /go/src/github.com/docker/docker
VOLUME /var/lib/docker
VOLUME /home/unprivilegeduser/.local/share/docker
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
FROM dev-systemd-false AS dev-systemd-true
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
dbus \
dbus-user-session \
systemd \
systemd-sysv
RUN mkdir -p hack \
&& curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \
&& chmod +x hack/dind-systemd
ENTRYPOINT ["hack/dind-systemd"]
FROM dev-systemd-${SYSTEMD} AS dev
FROM runtime-dev AS binary-base
ARG DOCKER_GITCOMMIT=HEAD
ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT}
ARG VERSION
ENV VERSION=${VERSION}
ARG PLATFORM
ENV PLATFORM=${PLATFORM}
ARG PRODUCT
ENV PRODUCT=${PRODUCT}
ARG DEFAULT_PRODUCT_LICENSE
ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE}
ARG DOCKER_BUILDTAGS
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
ENV PREFIX=/build
# TODO: This is here because hack/make.sh binary copies these extras binaries
# from $PATH into the bundles dir.
# It would be nice to handle this in a different way.
COPY --from=tini /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=rootlesskit /build/ /usr/local/bin/
COPY --from=vpnkit /build/ /usr/local/bin/
WORKDIR /go/src/github.com/docker/docker
FROM binary-base AS build-binary
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=bind,target=/go/src/github.com/docker/docker \
hack/make.sh binary
FROM binary-base AS build-dynbinary
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=bind,target=/go/src/github.com/docker/docker \
hack/make.sh dynbinary
FROM binary-base AS build-cross
ARG DOCKER_CROSSPLATFORMS
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=bind,target=/go/src/github.com/docker/docker \
--mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \
hack/make.sh cross
FROM scratch AS binary
COPY --from=build-binary /build/bundles/ /
FROM scratch AS dynbinary
COPY --from=build-dynbinary /build/bundles/ /
FROM scratch AS cross
COPY --from=build-cross /build/bundles/ /
FROM dev AS final
COPY . /go/src/github.com/docker/docker
| # syntax=docker/dockerfile:1.2
ARG CROSS="false"
ARG SYSTEMD="false"
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
ARG GO_VERSION=1.16.5
ARG DEBIAN_FRONTEND=noninteractive
ARG VPNKIT_VERSION=0.5.0
ARG DOCKER_BUILDTAGS="apparmor seccomp"
ARG BASE_DEBIAN_DISTRO="buster"
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
FROM ${GOLANG_IMAGE} AS base
RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
ARG APT_MIRROR
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
ENV GO111MODULE=off
FROM base AS criu
ARG DEBIAN_FRONTEND
ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc
# FIXME: temporarily doing a manual chmod as workaround for https://github.com/moby/buildkit/issues/2114
RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \
chmod 0644 /etc/apt/trusted.gpg.d/criu.gpg.asc \
&& echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends criu \
&& install -D /usr/sbin/criu /build/criu
FROM base AS registry
WORKDIR /go/src/github.com/docker/distribution
# Install two versions of the registry. The first one is a recent version that
# supports both schema 1 and 2 manifests. The second one is an older version that
# only supports schema1 manifests. This allows integration-cli tests to cover
# push/pull with both schema1 and schema2 manifests.
# The old version of the registry is not working on arm64, so installation is
# skipped on that architecture.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ \
set -x \
&& git clone https://github.com/docker/distribution.git . \
&& git checkout -q "$REGISTRY_COMMIT" \
&& GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
&& case $(dpkg --print-architecture) in \
amd64|armhf|ppc64*|s390x) \
git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \
GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
;; \
esac
FROM base AS swagger
WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger
# Install go-swagger for validating swagger.yaml
# This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ \
set -x \
&& git clone https://github.com/kolyshkin/go-swagger.git . \
&& git checkout -q "$GO_SWAGGER_COMMIT" \
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger
FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
curl \
jq
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /
ARG TARGETARCH
RUN /download-frozen-image-v2.sh /build \
buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
# See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list)
FROM base AS cross-false
FROM --platform=linux/amd64 base AS cross-true
ARG DEBIAN_FRONTEND
RUN dpkg --add-architecture arm64
RUN dpkg --add-architecture armel
RUN dpkg --add-architecture armhf
RUN dpkg --add-architecture ppc64el
RUN dpkg --add-architecture s390x
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
crossbuild-essential-arm64 \
crossbuild-essential-armel \
crossbuild-essential-armhf \
crossbuild-essential-ppc64el \
crossbuild-essential-s390x
FROM cross-${CROSS} as dev-base
FROM dev-base AS runtime-dev-cross-false
ARG DEBIAN_FRONTEND
RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list
RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
binutils-mingw-w64 \
g++-mingw-w64-x86-64 \
libapparmor-dev \
libbtrfs-dev \
libdevmapper-dev \
libseccomp-dev/buster-backports \
libsystemd-dev \
libudev-dev
FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true
ARG DEBIAN_FRONTEND
# These crossbuild packages rely on gcc-<arch>, but this doesn't want to install
# on non-amd64 systems.
# Additionally, the crossbuild-amd64 is currently only on debian:buster, so
# other architectures cannnot crossbuild amd64.
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
libapparmor-dev:arm64 \
libapparmor-dev:armel \
libapparmor-dev:armhf \
libapparmor-dev:ppc64el \
libapparmor-dev:s390x
FROM runtime-dev-cross-${CROSS} AS runtime-dev
FROM base AS tomll
ARG GOTOML_VERSION
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install/tomll.installer,target=/tmp/install/tomll.installer \
. /tmp/install/tomll.installer && PREFIX=/build install_tomll
FROM base AS vndr
ARG VNDR_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh vndr
FROM dev-base AS containerd
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
libbtrfs-dev
ARG CONTAINERD_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh containerd
FROM base AS golangci_lint
ARG GOLANGCI_LINT_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh golangci_lint
FROM base AS gotestsum
ARG GOTESTSUM_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh gotestsum
FROM base AS shfmt
ARG SHFMT_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh shfmt
FROM dev-base AS dockercli
ARG DOCKERCLI_CHANNEL
ARG DOCKERCLI_VERSION
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh dockercli
FROM runtime-dev AS runc
ARG RUNC_COMMIT
ARG RUNC_BUILDTAGS
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh runc
FROM dev-base AS tini
ARG DEBIAN_FRONTEND
ARG TINI_COMMIT
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
cmake \
vim-common
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh tini
FROM dev-base AS rootlesskit
ARG ROOTLESSKIT_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh rootlesskit
COPY ./contrib/dockerd-rootless.sh /build
COPY ./contrib/dockerd-rootless-setuptool.sh /build
FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64
FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64
FROM scratch AS vpnkit
COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64
COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64
# TODO: Some of this is only really needed for testing, it would be nice to split this up
FROM runtime-dev AS dev-systemd-false
ARG DEBIAN_FRONTEND
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser \
&& mkdir -p /home/unprivilegeduser/.local/share/docker \
&& chown -R unprivilegeduser /home/unprivilegeduser
# Let us use a .bashrc file
RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
RUN ldconfig
# This should only install packages that are specifically needed for the dev environment and nothing else
# Do you really need to add another package here? Can it be done in a different build stage?
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
apparmor \
aufs-tools \
bash-completion \
bzip2 \
iptables \
jq \
libcap2-bin \
libnet1 \
libnl-3-200 \
libprotobuf-c1 \
net-tools \
patch \
pigz \
python3-pip \
python3-setuptools \
python3-wheel \
sudo \
thin-provisioning-tools \
uidmap \
vim \
vim-common \
xfsprogs \
xz-utils \
zip
# Switch to use iptables instead of nftables (to match the CI hosts)
# TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824)
RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
RUN pip3 install yamllint==1.26.1
COPY --from=dockercli /build/ /usr/local/cli
COPY --from=frozen-images /build/ /docker-frozen-images
COPY --from=swagger /build/ /usr/local/bin/
COPY --from=tomll /build/ /usr/local/bin/
COPY --from=tini /build/ /usr/local/bin/
COPY --from=registry /build/ /usr/local/bin/
COPY --from=criu /build/ /usr/local/bin/
COPY --from=vndr /build/ /usr/local/bin/
COPY --from=gotestsum /build/ /usr/local/bin/
COPY --from=golangci_lint /build/ /usr/local/bin/
COPY --from=shfmt /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=rootlesskit /build/ /usr/local/bin/
COPY --from=vpnkit /build/ /usr/local/bin/
ENV PATH=/usr/local/cli:$PATH
ARG DOCKER_BUILDTAGS
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
WORKDIR /go/src/github.com/docker/docker
VOLUME /var/lib/docker
VOLUME /home/unprivilegeduser/.local/share/docker
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
FROM dev-systemd-false AS dev-systemd-true
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
dbus \
dbus-user-session \
systemd \
systemd-sysv
RUN mkdir -p hack \
&& curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \
&& chmod +x hack/dind-systemd
ENTRYPOINT ["hack/dind-systemd"]
FROM dev-systemd-${SYSTEMD} AS dev
FROM runtime-dev AS binary-base
ARG DOCKER_GITCOMMIT=HEAD
ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT}
ARG VERSION
ENV VERSION=${VERSION}
ARG PLATFORM
ENV PLATFORM=${PLATFORM}
ARG PRODUCT
ENV PRODUCT=${PRODUCT}
ARG DEFAULT_PRODUCT_LICENSE
ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE}
ARG DOCKER_BUILDTAGS
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
ENV PREFIX=/build
# TODO: This is here because hack/make.sh binary copies these extras binaries
# from $PATH into the bundles dir.
# It would be nice to handle this in a different way.
COPY --from=tini /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=rootlesskit /build/ /usr/local/bin/
COPY --from=vpnkit /build/ /usr/local/bin/
WORKDIR /go/src/github.com/docker/docker
FROM binary-base AS build-binary
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=bind,target=/go/src/github.com/docker/docker \
hack/make.sh binary
FROM binary-base AS build-dynbinary
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=bind,target=/go/src/github.com/docker/docker \
hack/make.sh dynbinary
FROM binary-base AS build-cross
ARG DOCKER_CROSSPLATFORMS
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=bind,target=/go/src/github.com/docker/docker \
--mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \
hack/make.sh cross
FROM scratch AS binary
COPY --from=build-binary /build/bundles/ /
FROM scratch AS dynbinary
COPY --from=build-dynbinary /build/bundles/ /
FROM scratch AS cross
COPY --from=build-cross /build/bundles/ /
FROM dev AS final
COPY . /go/src/github.com/docker/docker
| fredericdalleau | 301be64d4181f058908f585cb0af8cb42688d3b3 | 7d471d88a0723be1066ab630df1427bf2a08a6b8 | For consistency, I think these should be in the same (sorted) order as those above:
```suggestion
libapparmor-dev:armhf \
libapparmor-dev:s390x \
libapparmor-dev:ppc64el
``` | tianon | 4,704 |
moby/moby | 42,524 | Crosscompilation for s390x and ppc64le | **- What I did**
Update build environment to provide cross compilation for linux/s390x and linux/ppc64le
**- How I did it**
Modify runtime-dev-cross-false and runtime-dev-cross-true docker environments.
Use bullseye to provide libseccomp-dev as s390x and ppc64le in buster-backports are conflicting with other archs.
**- How to verify it**
```
$ make DOCKER_CROSSPLATFORMS="linux/s390x linux/ppc64le" cross
$ ls -l bundles/cross/linux/{s390x,ppc64le}-daemon
$ file bundles/cross/linux/{s390x,ppc64le}-daemon/dockerd-dev
```
SO FAR THE RESULTING BINARIES ARE UNTESTED
**- Description for the changelog**
Add cross compilation for s390x and ppc64le
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-06-15 10:25:29+00:00 | 2021-06-24 19:23:28+00:00 | Dockerfile | # syntax=docker/dockerfile:1.2
ARG CROSS="false"
ARG SYSTEMD="false"
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
ARG GO_VERSION=1.16.5
ARG DEBIAN_FRONTEND=noninteractive
ARG VPNKIT_VERSION=0.5.0
ARG DOCKER_BUILDTAGS="apparmor seccomp"
ARG BASE_DEBIAN_DISTRO="buster"
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
FROM ${GOLANG_IMAGE} AS base
RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
ARG APT_MIRROR
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
ENV GO111MODULE=off
FROM base AS criu
ARG DEBIAN_FRONTEND
ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc
# FIXME: temporarily doing a manual chmod as workaround for https://github.com/moby/buildkit/issues/2114
RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \
chmod 0644 /etc/apt/trusted.gpg.d/criu.gpg.asc \
&& echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends criu \
&& install -D /usr/sbin/criu /build/criu
FROM base AS registry
WORKDIR /go/src/github.com/docker/distribution
# Install two versions of the registry. The first one is a recent version that
# supports both schema 1 and 2 manifests. The second one is an older version that
# only supports schema1 manifests. This allows integration-cli tests to cover
# push/pull with both schema1 and schema2 manifests.
# The old version of the registry is not working on arm64, so installation is
# skipped on that architecture.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ \
set -x \
&& git clone https://github.com/docker/distribution.git . \
&& git checkout -q "$REGISTRY_COMMIT" \
&& GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
&& case $(dpkg --print-architecture) in \
amd64|armhf|ppc64*|s390x) \
git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \
GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
;; \
esac
FROM base AS swagger
WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger
# Install go-swagger for validating swagger.yaml
# This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ \
set -x \
&& git clone https://github.com/kolyshkin/go-swagger.git . \
&& git checkout -q "$GO_SWAGGER_COMMIT" \
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger
FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
curl \
jq
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /
ARG TARGETARCH
RUN /download-frozen-image-v2.sh /build \
buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
# See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list)
FROM base AS cross-false
FROM --platform=linux/amd64 base AS cross-true
ARG DEBIAN_FRONTEND
RUN dpkg --add-architecture arm64
RUN dpkg --add-architecture armel
RUN dpkg --add-architecture armhf
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
crossbuild-essential-arm64 \
crossbuild-essential-armel \
crossbuild-essential-armhf
FROM cross-${CROSS} as dev-base
FROM dev-base AS runtime-dev-cross-false
ARG DEBIAN_FRONTEND
RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list
RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
binutils-mingw-w64 \
g++-mingw-w64-x86-64 \
libapparmor-dev \
libbtrfs-dev \
libdevmapper-dev \
libseccomp-dev/buster-backports \
libsystemd-dev \
libudev-dev
FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true
ARG DEBIAN_FRONTEND
# These crossbuild packages rely on gcc-<arch>, but this doesn't want to install
# on non-amd64 systems.
# Additionally, the crossbuild-amd64 is currently only on debian:buster, so
# other architectures cannnot crossbuild amd64.
RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
libapparmor-dev:arm64 \
libapparmor-dev:armel \
libapparmor-dev:armhf \
libseccomp-dev:arm64/buster-backports \
libseccomp-dev:armel/buster-backports \
libseccomp-dev:armhf/buster-backports
FROM runtime-dev-cross-${CROSS} AS runtime-dev
FROM base AS tomll
ARG GOTOML_VERSION
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install/tomll.installer,target=/tmp/install/tomll.installer \
. /tmp/install/tomll.installer && PREFIX=/build install_tomll
FROM base AS vndr
ARG VNDR_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh vndr
FROM dev-base AS containerd
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
libbtrfs-dev
ARG CONTAINERD_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh containerd
FROM base AS golangci_lint
ARG GOLANGCI_LINT_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh golangci_lint
FROM base AS gotestsum
ARG GOTESTSUM_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh gotestsum
FROM base AS shfmt
ARG SHFMT_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh shfmt
FROM dev-base AS dockercli
ARG DOCKERCLI_CHANNEL
ARG DOCKERCLI_VERSION
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh dockercli
FROM runtime-dev AS runc
ARG RUNC_COMMIT
ARG RUNC_BUILDTAGS
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh runc
FROM dev-base AS tini
ARG DEBIAN_FRONTEND
ARG TINI_COMMIT
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
cmake \
vim-common
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh tini
FROM dev-base AS rootlesskit
ARG ROOTLESSKIT_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh rootlesskit
COPY ./contrib/dockerd-rootless.sh /build
COPY ./contrib/dockerd-rootless-setuptool.sh /build
FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64
FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64
FROM scratch AS vpnkit
COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64
COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64
# TODO: Some of this is only really needed for testing, it would be nice to split this up
FROM runtime-dev AS dev-systemd-false
ARG DEBIAN_FRONTEND
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser \
&& mkdir -p /home/unprivilegeduser/.local/share/docker \
&& chown -R unprivilegeduser /home/unprivilegeduser
# Let us use a .bashrc file
RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
RUN ldconfig
# This should only install packages that are specifically needed for the dev environment and nothing else
# Do you really need to add another package here? Can it be done in a different build stage?
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
apparmor \
aufs-tools \
bash-completion \
bzip2 \
iptables \
jq \
libcap2-bin \
libnet1 \
libnl-3-200 \
libprotobuf-c1 \
net-tools \
patch \
pigz \
python3-pip \
python3-setuptools \
python3-wheel \
sudo \
thin-provisioning-tools \
uidmap \
vim \
vim-common \
xfsprogs \
xz-utils \
zip
# Switch to use iptables instead of nftables (to match the CI hosts)
# TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824)
RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
RUN pip3 install yamllint==1.26.1
COPY --from=dockercli /build/ /usr/local/cli
COPY --from=frozen-images /build/ /docker-frozen-images
COPY --from=swagger /build/ /usr/local/bin/
COPY --from=tomll /build/ /usr/local/bin/
COPY --from=tini /build/ /usr/local/bin/
COPY --from=registry /build/ /usr/local/bin/
COPY --from=criu /build/ /usr/local/bin/
COPY --from=vndr /build/ /usr/local/bin/
COPY --from=gotestsum /build/ /usr/local/bin/
COPY --from=golangci_lint /build/ /usr/local/bin/
COPY --from=shfmt /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=rootlesskit /build/ /usr/local/bin/
COPY --from=vpnkit /build/ /usr/local/bin/
ENV PATH=/usr/local/cli:$PATH
ARG DOCKER_BUILDTAGS
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
WORKDIR /go/src/github.com/docker/docker
VOLUME /var/lib/docker
VOLUME /home/unprivilegeduser/.local/share/docker
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
FROM dev-systemd-false AS dev-systemd-true
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
dbus \
dbus-user-session \
systemd \
systemd-sysv
RUN mkdir -p hack \
&& curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \
&& chmod +x hack/dind-systemd
ENTRYPOINT ["hack/dind-systemd"]
FROM dev-systemd-${SYSTEMD} AS dev
FROM runtime-dev AS binary-base
ARG DOCKER_GITCOMMIT=HEAD
ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT}
ARG VERSION
ENV VERSION=${VERSION}
ARG PLATFORM
ENV PLATFORM=${PLATFORM}
ARG PRODUCT
ENV PRODUCT=${PRODUCT}
ARG DEFAULT_PRODUCT_LICENSE
ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE}
ARG DOCKER_BUILDTAGS
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
ENV PREFIX=/build
# TODO: This is here because hack/make.sh binary copies these extras binaries
# from $PATH into the bundles dir.
# It would be nice to handle this in a different way.
COPY --from=tini /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=rootlesskit /build/ /usr/local/bin/
COPY --from=vpnkit /build/ /usr/local/bin/
WORKDIR /go/src/github.com/docker/docker
FROM binary-base AS build-binary
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=bind,target=/go/src/github.com/docker/docker \
hack/make.sh binary
FROM binary-base AS build-dynbinary
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=bind,target=/go/src/github.com/docker/docker \
hack/make.sh dynbinary
FROM binary-base AS build-cross
ARG DOCKER_CROSSPLATFORMS
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=bind,target=/go/src/github.com/docker/docker \
--mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \
hack/make.sh cross
FROM scratch AS binary
COPY --from=build-binary /build/bundles/ /
FROM scratch AS dynbinary
COPY --from=build-dynbinary /build/bundles/ /
FROM scratch AS cross
COPY --from=build-cross /build/bundles/ /
FROM dev AS final
COPY . /go/src/github.com/docker/docker
| # syntax=docker/dockerfile:1.2
ARG CROSS="false"
ARG SYSTEMD="false"
# IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
ARG GO_VERSION=1.16.5
ARG DEBIAN_FRONTEND=noninteractive
ARG VPNKIT_VERSION=0.5.0
ARG DOCKER_BUILDTAGS="apparmor seccomp"
ARG BASE_DEBIAN_DISTRO="buster"
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
FROM ${GOLANG_IMAGE} AS base
RUN echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
ARG APT_MIRROR
RUN sed -ri "s/(httpredir|deb).debian.org/${APT_MIRROR:-deb.debian.org}/g" /etc/apt/sources.list \
&& sed -ri "s/(security).debian.org/${APT_MIRROR:-security.debian.org}/g" /etc/apt/sources.list
ENV GO111MODULE=off
FROM base AS criu
ARG DEBIAN_FRONTEND
ADD --chmod=0644 https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/Release.key /etc/apt/trusted.gpg.d/criu.gpg.asc
# FIXME: temporarily doing a manual chmod as workaround for https://github.com/moby/buildkit/issues/2114
RUN --mount=type=cache,sharing=locked,id=moby-criu-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-criu-aptcache,target=/var/cache/apt \
chmod 0644 /etc/apt/trusted.gpg.d/criu.gpg.asc \
&& echo 'deb https://download.opensuse.org/repositories/devel:/tools:/criu/Debian_10/ /' > /etc/apt/sources.list.d/criu.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends criu \
&& install -D /usr/sbin/criu /build/criu
FROM base AS registry
WORKDIR /go/src/github.com/docker/distribution
# Install two versions of the registry. The first one is a recent version that
# supports both schema 1 and 2 manifests. The second one is an older version that
# only supports schema1 manifests. This allows integration-cli tests to cover
# push/pull with both schema1 and schema2 manifests.
# The old version of the registry is not working on arm64, so installation is
# skipped on that architecture.
ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd
ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ \
set -x \
&& git clone https://github.com/docker/distribution.git . \
&& git checkout -q "$REGISTRY_COMMIT" \
&& GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \
go build -buildmode=pie -o /build/registry-v2 github.com/docker/distribution/cmd/registry \
&& case $(dpkg --print-architecture) in \
amd64|armhf|ppc64*|s390x) \
git checkout -q "$REGISTRY_COMMIT_SCHEMA1"; \
GOPATH="/go/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH"; \
go build -buildmode=pie -o /build/registry-v2-schema1 github.com/docker/distribution/cmd/registry; \
;; \
esac
FROM base AS swagger
WORKDIR $GOPATH/src/github.com/go-swagger/go-swagger
# Install go-swagger for validating swagger.yaml
# This is https://github.com/kolyshkin/go-swagger/tree/golang-1.13-fix
# TODO: move to under moby/ or fix upstream go-swagger to work for us.
ENV GO_SWAGGER_COMMIT 5e6cb12f7c82ce78e45ba71fa6cb1928094db050
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=tmpfs,target=/go/src/ \
set -x \
&& git clone https://github.com/kolyshkin/go-swagger.git . \
&& git checkout -q "$GO_SWAGGER_COMMIT" \
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger
FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-frozen-images-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
curl \
jq
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
COPY contrib/download-frozen-image-v2.sh /
ARG TARGETARCH
RUN /download-frozen-image-v2.sh /build \
buildpack-deps:buster@sha256:d0abb4b1e5c664828b93e8b6ac84d10bce45ee469999bef88304be04a2709491 \
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
debian:bullseye@sha256:7190e972ab16aefea4d758ebe42a293f4e5c5be63595f4d03a5b9bf6839a4344 \
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
# See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list)
FROM base AS cross-false
FROM --platform=linux/amd64 base AS cross-true
ARG DEBIAN_FRONTEND
RUN dpkg --add-architecture arm64
RUN dpkg --add-architecture armel
RUN dpkg --add-architecture armhf
RUN dpkg --add-architecture ppc64el
RUN dpkg --add-architecture s390x
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
crossbuild-essential-arm64 \
crossbuild-essential-armel \
crossbuild-essential-armhf \
crossbuild-essential-ppc64el \
crossbuild-essential-s390x
FROM cross-${CROSS} as dev-base
FROM dev-base AS runtime-dev-cross-false
ARG DEBIAN_FRONTEND
RUN echo 'deb http://deb.debian.org/debian buster-backports main' > /etc/apt/sources.list.d/backports.list
RUN --mount=type=cache,sharing=locked,id=moby-cross-false-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-false-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
binutils-mingw-w64 \
g++-mingw-w64-x86-64 \
libapparmor-dev \
libbtrfs-dev \
libdevmapper-dev \
libseccomp-dev/buster-backports \
libsystemd-dev \
libudev-dev
FROM --platform=linux/amd64 runtime-dev-cross-false AS runtime-dev-cross-true
ARG DEBIAN_FRONTEND
# These crossbuild packages rely on gcc-<arch>, but this doesn't want to install
# on non-amd64 systems.
# Additionally, the crossbuild-amd64 is currently only on debian:buster, so
# other architectures cannnot crossbuild amd64.
RUN --mount=type=cache,sharing=locked,id=moby-cross-true-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-cross-true-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
libapparmor-dev:arm64 \
libapparmor-dev:armel \
libapparmor-dev:armhf \
libapparmor-dev:ppc64el \
libapparmor-dev:s390x
FROM runtime-dev-cross-${CROSS} AS runtime-dev
FROM base AS tomll
ARG GOTOML_VERSION
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install/tomll.installer,target=/tmp/install/tomll.installer \
. /tmp/install/tomll.installer && PREFIX=/build install_tomll
FROM base AS vndr
ARG VNDR_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh vndr
FROM dev-base AS containerd
ARG DEBIAN_FRONTEND
RUN --mount=type=cache,sharing=locked,id=moby-containerd-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-containerd-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
libbtrfs-dev
ARG CONTAINERD_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh containerd
FROM base AS golangci_lint
ARG GOLANGCI_LINT_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh golangci_lint
FROM base AS gotestsum
ARG GOTESTSUM_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh gotestsum
FROM base AS shfmt
ARG SHFMT_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh shfmt
FROM dev-base AS dockercli
ARG DOCKERCLI_CHANNEL
ARG DOCKERCLI_VERSION
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh dockercli
FROM runtime-dev AS runc
ARG RUNC_COMMIT
ARG RUNC_BUILDTAGS
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh runc
FROM dev-base AS tini
ARG DEBIAN_FRONTEND
ARG TINI_COMMIT
RUN --mount=type=cache,sharing=locked,id=moby-tini-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-tini-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
cmake \
vim-common
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh tini
FROM dev-base AS rootlesskit
ARG ROOTLESSKIT_COMMIT
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=bind,src=hack/dockerfile/install,target=/tmp/install \
PREFIX=/build /tmp/install/install.sh rootlesskit
COPY ./contrib/dockerd-rootless.sh /build
COPY ./contrib/dockerd-rootless-setuptool.sh /build
FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64
FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64
FROM scratch AS vpnkit
COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64
COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64
# TODO: Some of this is only really needed for testing, it would be nice to split this up
FROM runtime-dev AS dev-systemd-false
ARG DEBIAN_FRONTEND
RUN groupadd -r docker
RUN useradd --create-home --gid docker unprivilegeduser \
&& mkdir -p /home/unprivilegeduser/.local/share/docker \
&& chown -R unprivilegeduser /home/unprivilegeduser
# Let us use a .bashrc file
RUN ln -sfv /go/src/github.com/docker/docker/.bashrc ~/.bashrc
# Activate bash completion and include Docker's completion if mounted with DOCKER_BASH_COMPLETION_PATH
RUN echo "source /usr/share/bash-completion/bash_completion" >> /etc/bash.bashrc
RUN ln -s /usr/local/completion/bash/docker /etc/bash_completion.d/docker
RUN ldconfig
# This should only install packages that are specifically needed for the dev environment and nothing else
# Do you really need to add another package here? Can it be done in a different build stage?
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
apparmor \
aufs-tools \
bash-completion \
bzip2 \
iptables \
jq \
libcap2-bin \
libnet1 \
libnl-3-200 \
libprotobuf-c1 \
net-tools \
patch \
pigz \
python3-pip \
python3-setuptools \
python3-wheel \
sudo \
thin-provisioning-tools \
uidmap \
vim \
vim-common \
xfsprogs \
xz-utils \
zip
# Switch to use iptables instead of nftables (to match the CI hosts)
# TODO use some kind of runtime auto-detection instead if/when nftables is supported (https://github.com/moby/moby/issues/26824)
RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
RUN pip3 install yamllint==1.26.1
COPY --from=dockercli /build/ /usr/local/cli
COPY --from=frozen-images /build/ /docker-frozen-images
COPY --from=swagger /build/ /usr/local/bin/
COPY --from=tomll /build/ /usr/local/bin/
COPY --from=tini /build/ /usr/local/bin/
COPY --from=registry /build/ /usr/local/bin/
COPY --from=criu /build/ /usr/local/bin/
COPY --from=vndr /build/ /usr/local/bin/
COPY --from=gotestsum /build/ /usr/local/bin/
COPY --from=golangci_lint /build/ /usr/local/bin/
COPY --from=shfmt /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=rootlesskit /build/ /usr/local/bin/
COPY --from=vpnkit /build/ /usr/local/bin/
ENV PATH=/usr/local/cli:$PATH
ARG DOCKER_BUILDTAGS
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
WORKDIR /go/src/github.com/docker/docker
VOLUME /var/lib/docker
VOLUME /home/unprivilegeduser/.local/share/docker
# Wrap all commands in the "docker-in-docker" script to allow nested containers
ENTRYPOINT ["hack/dind"]
FROM dev-systemd-false AS dev-systemd-true
RUN --mount=type=cache,sharing=locked,id=moby-dev-aptlib,target=/var/lib/apt \
--mount=type=cache,sharing=locked,id=moby-dev-aptcache,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
dbus \
dbus-user-session \
systemd \
systemd-sysv
RUN mkdir -p hack \
&& curl -o hack/dind-systemd https://raw.githubusercontent.com/AkihiroSuda/containerized-systemd/b70bac0daeea120456764248164c21684ade7d0d/docker-entrypoint.sh \
&& chmod +x hack/dind-systemd
ENTRYPOINT ["hack/dind-systemd"]
FROM dev-systemd-${SYSTEMD} AS dev
FROM runtime-dev AS binary-base
ARG DOCKER_GITCOMMIT=HEAD
ENV DOCKER_GITCOMMIT=${DOCKER_GITCOMMIT}
ARG VERSION
ENV VERSION=${VERSION}
ARG PLATFORM
ENV PLATFORM=${PLATFORM}
ARG PRODUCT
ENV PRODUCT=${PRODUCT}
ARG DEFAULT_PRODUCT_LICENSE
ENV DEFAULT_PRODUCT_LICENSE=${DEFAULT_PRODUCT_LICENSE}
ARG DOCKER_BUILDTAGS
ENV DOCKER_BUILDTAGS="${DOCKER_BUILDTAGS}"
ENV PREFIX=/build
# TODO: This is here because hack/make.sh binary copies these extras binaries
# from $PATH into the bundles dir.
# It would be nice to handle this in a different way.
COPY --from=tini /build/ /usr/local/bin/
COPY --from=runc /build/ /usr/local/bin/
COPY --from=containerd /build/ /usr/local/bin/
COPY --from=rootlesskit /build/ /usr/local/bin/
COPY --from=vpnkit /build/ /usr/local/bin/
WORKDIR /go/src/github.com/docker/docker
FROM binary-base AS build-binary
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=bind,target=/go/src/github.com/docker/docker \
hack/make.sh binary
FROM binary-base AS build-dynbinary
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=bind,target=/go/src/github.com/docker/docker \
hack/make.sh dynbinary
FROM binary-base AS build-cross
ARG DOCKER_CROSSPLATFORMS
RUN --mount=type=cache,target=/root/.cache/go-build \
--mount=type=bind,target=/go/src/github.com/docker/docker \
--mount=type=tmpfs,target=/go/src/github.com/docker/docker/autogen \
hack/make.sh cross
FROM scratch AS binary
COPY --from=build-binary /build/bundles/ /
FROM scratch AS dynbinary
COPY --from=build-dynbinary /build/bundles/ /
FROM scratch AS cross
COPY --from=build-cross /build/bundles/ /
FROM dev AS final
COPY . /go/src/github.com/docker/docker
| fredericdalleau | 301be64d4181f058908f585cb0af8cb42688d3b3 | 7d471d88a0723be1066ab630df1427bf2a08a6b8 | Thanks, fixed now | fredericdalleau | 4,705 |
moby/moby | 42,520 | Remove LCOW (step 5): volumes/mounts: remove LCOW code (alternative) | relates to (part of) https://github.com/moby/moby/pull/42170
alternative for https://github.com/moby/moby/pull/42517
closes https://github.com/moby/moby/pull/42517
This alternative to https://github.com/moby/moby/pull/42517 preserves the LCOW parsing code for mounts (in case we will be using LCOW using containerd's LCOW implementation). By default, a native parser is now used (matching the host (and thus container's) platform.
After taking care of the above, tests have also be split to be per-driver, and some refactoring to improve the tests (being able to run them "parallel").
details below:
### volume/mounts: move some code to correct location, and minor linting/formatting
- Remove the windowsparser.HasResource() override, as it was the same on both
Windows and Linux
- Move the rxLCOWDestination to the lcowParser code
- Move the rwModes variable to a generic (non-platform-specific) file, as it's
used both for the windowsParser and the linuxParser
- Some minor formatting and linting changes
### volume/mounts: remove "containerOS" argument from NewParser (LCOW code)
This changes mounts.NewParser() to create a parser for the current operatingsystem,
instead of one specific to a (possibly non-matching, in case of LCOW) OS.
With the OS-specific handling being removed, the "OS" parameter is also removed
from `daemon.verifyContainerSettings()`, and various other container-related
functions.
### Split tests per parser
Have tests per-parser, instead of grouping all tests in a single test (with a
test-set for each parser inside that test). All tests have been kept, at the
cost of some duplication in the test-cases that overlapped / were the same
for multiple parsers.
### Add constructors for each parser
This adds constructors for the Linux, Windows, and LCOW, to allow using these
parsers externally, instead of only through `NewParser()`.
### Don't use global variable for fileinfoprovider
The code used a global / package-level `currentFileInfoProvider` that was used
to stub the actual fileInfoProvider during tests. Using a package-level variable
for this could cause issues when trying to run tests in parallel, as well as
making the tests more complicated (tests had to reset the provider in a defer).
Now that we have a constructor for each driver, we can set up the fileinfoprovider
in those constructors, and stub the implementation for specific instances, instead
of globally.
### Use sub-tests, and use gotest.tools
These tests were running many test-cases, which required a lot of glue code to
construct useful errors if "one of those" failed.
Using regular sub-tests instead to make debugging easier, and use gotest.tools
to take care of useful errors.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-14 12:45:46+00:00 | 2021-07-26 17:24:53+00:00 | volume/mounts/lcow_parser.go | package mounts // import "github.com/docker/docker/volume/mounts"
import (
"errors"
"path"
"github.com/docker/docker/api/types/mount"
)
var lcowSpecificValidators mountValidator = func(m *mount.Mount) error {
if path.Clean(m.Target) == "/" {
return ErrVolumeTargetIsRoot
}
if m.Type == mount.TypeNamedPipe {
return errors.New("Linux containers on Windows do not support named pipe mounts")
}
return nil
}
type lcowParser struct {
windowsParser
}
func (p *lcowParser) ValidateMountConfig(mnt *mount.Mount) error {
return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators)
}
func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) {
return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators)
}
func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) {
return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators)
}
| package mounts // import "github.com/docker/docker/volume/mounts"
import (
"errors"
"path"
"github.com/docker/docker/api/types/mount"
)
// NewLCOWParser creates a parser with Linux Containers on Windows semantics.
func NewLCOWParser() Parser {
return &lcowParser{
windowsParser{
fi: defaultFileInfoProvider{},
},
}
}
// rxLCOWDestination is the regex expression for the mount destination for LCOW
//
// Destination (aka container path):
// - Variation on hostdir but can be a drive followed by colon as well
// - If a path, must be absolute. Can include spaces
// - Drive cannot be c: (explicitly checked in code, not RegEx)
const rxLCOWDestination = `(?P<destination>/(?:[^\\/:*?"<>\r\n]+[/]?)*)`
var lcowSpecificValidators mountValidator = func(m *mount.Mount) error {
if path.Clean(m.Target) == "/" {
return ErrVolumeTargetIsRoot
}
if m.Type == mount.TypeNamedPipe {
return errors.New("Linux containers on Windows do not support named pipe mounts")
}
return nil
}
type lcowParser struct {
windowsParser
}
func (p *lcowParser) ValidateMountConfig(mnt *mount.Mount) error {
return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators)
}
func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) {
return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators)
}
func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) {
return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators)
}
| thaJeztah | e9b07a730ef02df793a28d07eb8ee3c39bba92d8 | 9674540ccff358c3cd84cc2f33c3503e0dab7fb7 | This isn't new, so I'm not suggesting we change it now, but shouldn't this (and the friends it moved away from) be `regexp.MustCompile`'d? | tianon | 4,706 |
moby/moby | 42,520 | Remove LCOW (step 5): volumes/mounts: remove LCOW code (alternative) | relates to (part of) https://github.com/moby/moby/pull/42170
alternative for https://github.com/moby/moby/pull/42517
closes https://github.com/moby/moby/pull/42517
This alternative to https://github.com/moby/moby/pull/42517 preserves the LCOW parsing code for mounts (in case we will be using LCOW using containerd's LCOW implementation). By default, a native parser is now used (matching the host (and thus container's) platform.
After taking care of the above, tests have also be split to be per-driver, and some refactoring to improve the tests (being able to run them "parallel").
details below:
### volume/mounts: move some code to correct location, and minor linting/formatting
- Remove the windowsparser.HasResource() override, as it was the same on both
Windows and Linux
- Move the rxLCOWDestination to the lcowParser code
- Move the rwModes variable to a generic (non-platform-specific) file, as it's
used both for the windowsParser and the linuxParser
- Some minor formatting and linting changes
### volume/mounts: remove "containerOS" argument from NewParser (LCOW code)
This changes mounts.NewParser() to create a parser for the current operatingsystem,
instead of one specific to a (possibly non-matching, in case of LCOW) OS.
With the OS-specific handling being removed, the "OS" parameter is also removed
from `daemon.verifyContainerSettings()`, and various other container-related
functions.
### Split tests per parser
Have tests per-parser, instead of grouping all tests in a single test (with a
test-set for each parser inside that test). All tests have been kept, at the
cost of some duplication in the test-cases that overlapped / were the same
for multiple parsers.
### Add constructors for each parser
This adds constructors for the Linux, Windows, and LCOW, to allow using these
parsers externally, instead of only through `NewParser()`.
### Don't use global variable for fileinfoprovider
The code used a global / package-level `currentFileInfoProvider` that was used
to stub the actual fileInfoProvider during tests. Using a package-level variable
for this could cause issues when trying to run tests in parallel, as well as
making the tests more complicated (tests had to reset the provider in a defer).
Now that we have a constructor for each driver, we can set up the fileinfoprovider
in those constructors, and stub the implementation for specific instances, instead
of globally.
### Use sub-tests, and use gotest.tools
These tests were running many test-cases, which required a lot of glue code to
construct useful errors if "one of those" failed.
Using regular sub-tests instead to make debugging easier, and use gotest.tools
to take care of useful errors.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-14 12:45:46+00:00 | 2021-07-26 17:24:53+00:00 | volume/mounts/lcow_parser.go | package mounts // import "github.com/docker/docker/volume/mounts"
import (
"errors"
"path"
"github.com/docker/docker/api/types/mount"
)
var lcowSpecificValidators mountValidator = func(m *mount.Mount) error {
if path.Clean(m.Target) == "/" {
return ErrVolumeTargetIsRoot
}
if m.Type == mount.TypeNamedPipe {
return errors.New("Linux containers on Windows do not support named pipe mounts")
}
return nil
}
type lcowParser struct {
windowsParser
}
func (p *lcowParser) ValidateMountConfig(mnt *mount.Mount) error {
return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators)
}
func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) {
return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators)
}
func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) {
return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators)
}
| package mounts // import "github.com/docker/docker/volume/mounts"
import (
"errors"
"path"
"github.com/docker/docker/api/types/mount"
)
// NewLCOWParser creates a parser with Linux Containers on Windows semantics.
func NewLCOWParser() Parser {
return &lcowParser{
windowsParser{
fi: defaultFileInfoProvider{},
},
}
}
// rxLCOWDestination is the regex expression for the mount destination for LCOW
//
// Destination (aka container path):
// - Variation on hostdir but can be a drive followed by colon as well
// - If a path, must be absolute. Can include spaces
// - Drive cannot be c: (explicitly checked in code, not RegEx)
const rxLCOWDestination = `(?P<destination>/(?:[^\\/:*?"<>\r\n]+[/]?)*)`
var lcowSpecificValidators mountValidator = func(m *mount.Mount) error {
if path.Clean(m.Target) == "/" {
return ErrVolumeTargetIsRoot
}
if m.Type == mount.TypeNamedPipe {
return errors.New("Linux containers on Windows do not support named pipe mounts")
}
return nil
}
type lcowParser struct {
windowsParser
}
func (p *lcowParser) ValidateMountConfig(mnt *mount.Mount) error {
return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators)
}
func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) {
return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators)
}
func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) {
return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators)
}
| thaJeztah | e9b07a730ef02df793a28d07eb8ee3c39bba92d8 | 9674540ccff358c3cd84cc2f33c3503e0dab7fb7 | Yes, probably makes sense to do so (help me remind in case I forget) | thaJeztah | 4,707 |
moby/moby | 42,520 | Remove LCOW (step 5): volumes/mounts: remove LCOW code (alternative) | relates to (part of) https://github.com/moby/moby/pull/42170
alternative for https://github.com/moby/moby/pull/42517
closes https://github.com/moby/moby/pull/42517
This alternative to https://github.com/moby/moby/pull/42517 preserves the LCOW parsing code for mounts (in case we will be using LCOW using containerd's LCOW implementation). By default, a native parser is now used (matching the host (and thus container's) platform.
After taking care of the above, tests have also be split to be per-driver, and some refactoring to improve the tests (being able to run them "parallel").
details below:
### volume/mounts: move some code to correct location, and minor linting/formatting
- Remove the windowsparser.HasResource() override, as it was the same on both
Windows and Linux
- Move the rxLCOWDestination to the lcowParser code
- Move the rwModes variable to a generic (non-platform-specific) file, as it's
used both for the windowsParser and the linuxParser
- Some minor formatting and linting changes
### volume/mounts: remove "containerOS" argument from NewParser (LCOW code)
This changes mounts.NewParser() to create a parser for the current operatingsystem,
instead of one specific to a (possibly non-matching, in case of LCOW) OS.
With the OS-specific handling being removed, the "OS" parameter is also removed
from `daemon.verifyContainerSettings()`, and various other container-related
functions.
### Split tests per parser
Have tests per-parser, instead of grouping all tests in a single test (with a
test-set for each parser inside that test). All tests have been kept, at the
cost of some duplication in the test-cases that overlapped / were the same
for multiple parsers.
### Add constructors for each parser
This adds constructors for the Linux, Windows, and LCOW, to allow using these
parsers externally, instead of only through `NewParser()`.
### Don't use global variable for fileinfoprovider
The code used a global / package-level `currentFileInfoProvider` that was used
to stub the actual fileInfoProvider during tests. Using a package-level variable
for this could cause issues when trying to run tests in parallel, as well as
making the tests more complicated (tests had to reset the provider in a defer).
Now that we have a constructor for each driver, we can set up the fileinfoprovider
in those constructors, and stub the implementation for specific instances, instead
of globally.
### Use sub-tests, and use gotest.tools
These tests were running many test-cases, which required a lot of glue code to
construct useful errors if "one of those" failed.
Using regular sub-tests instead to make debugging easier, and use gotest.tools
to take care of useful errors.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-14 12:45:46+00:00 | 2021-07-26 17:24:53+00:00 | volume/mounts/lcow_parser.go | package mounts // import "github.com/docker/docker/volume/mounts"
import (
"errors"
"path"
"github.com/docker/docker/api/types/mount"
)
var lcowSpecificValidators mountValidator = func(m *mount.Mount) error {
if path.Clean(m.Target) == "/" {
return ErrVolumeTargetIsRoot
}
if m.Type == mount.TypeNamedPipe {
return errors.New("Linux containers on Windows do not support named pipe mounts")
}
return nil
}
type lcowParser struct {
windowsParser
}
func (p *lcowParser) ValidateMountConfig(mnt *mount.Mount) error {
return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators)
}
func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) {
return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators)
}
func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) {
return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators)
}
| package mounts // import "github.com/docker/docker/volume/mounts"
import (
"errors"
"path"
"github.com/docker/docker/api/types/mount"
)
// NewLCOWParser creates a parser with Linux Containers on Windows semantics.
func NewLCOWParser() Parser {
return &lcowParser{
windowsParser{
fi: defaultFileInfoProvider{},
},
}
}
// rxLCOWDestination is the regex expression for the mount destination for LCOW
//
// Destination (aka container path):
// - Variation on hostdir but can be a drive followed by colon as well
// - If a path, must be absolute. Can include spaces
// - Drive cannot be c: (explicitly checked in code, not RegEx)
const rxLCOWDestination = `(?P<destination>/(?:[^\\/:*?"<>\r\n]+[/]?)*)`
var lcowSpecificValidators mountValidator = func(m *mount.Mount) error {
if path.Clean(m.Target) == "/" {
return ErrVolumeTargetIsRoot
}
if m.Type == mount.TypeNamedPipe {
return errors.New("Linux containers on Windows do not support named pipe mounts")
}
return nil
}
type lcowParser struct {
windowsParser
}
func (p *lcowParser) ValidateMountConfig(mnt *mount.Mount) error {
return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators)
}
func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) {
return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators)
}
func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) {
return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators)
}
| thaJeztah | e9b07a730ef02df793a28d07eb8ee3c39bba92d8 | 9674540ccff358c3cd84cc2f33c3503e0dab7fb7 | https://github.com/moby/moby/issues/42667 :smirk: :innocent: | tianon | 4,708 |
moby/moby | 42,514 | don't use buildmode=pie on ppc64 | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
made sure `buildmode=pie` is not passed on ppc64 (big-endian)
**- How I did it**
added exception in ` hack/make/.binary`
It's already omitted for ppc64 in
`hack/dockerfile/install/install.sh`
not using wildcard, because GOARCH=ppc64le supports pie
**- How to verify it**
apply provided patch and https://github.com/moby/moby/pull/42172
docker now builds and works on big-endian ppc64
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
don't pass buildmode=pie on ppc64be
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-11 00:26:47+00:00 | 2021-06-11 13:26:51+00:00 | hack/make/.binary | #!/usr/bin/env bash
set -e
# a helper to provide ".exe" when it's appropriate
binary_extension() {
if [ "$(go env GOOS)" = 'windows' ]; then
echo -n '.exe'
fi
}
BINARY_NAME="$BINARY_SHORT_NAME-$VERSION"
BINARY_EXTENSION="$(binary_extension)"
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
source "${MAKEDIR}/.go-autogen"
hash_files() {
while [ $# -gt 0 ]; do
f="$1"
shift
dir="$(dirname "$f")"
base="$(basename "$f")"
for hashAlgo in md5 sha256; do
if command -v "${hashAlgo}sum" &> /dev/null; then
(
# subshell and cd so that we get output files like:
# $HASH docker-$VERSION
# instead of:
# $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION
cd "$dir"
"${hashAlgo}sum" "$base" > "$base.$hashAlgo"
)
fi
done
done
}
(
export GOGC=${DOCKER_BUILD_GOGC:-1000}
if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then
# must be cross-compiling!
case "$(go env GOOS)/$(go env GOARCH)" in
windows/amd64)
export CC="${CC:-x86_64-w64-mingw32-gcc}"
export CGO_ENABLED=1
;;
linux/arm)
case "${GOARM}" in
5 | "")
export CC="${CC:-arm-linux-gnueabi-gcc}"
export CGO_ENABLED=1
;;
7)
export CC="${CC:-arm-linux-gnueabihf-gcc}"
export CGO_ENABLED=1
;;
esac
;;
linux/arm64)
export CC="${CC:-aarch64-linux-gnu-gcc}"
export CGO_ENABLED=1
;;
linux/amd64)
export CC="${CC:-x86_64-linux-gnu-gcc}"
export CGO_ENABLED=1
;;
esac
fi
# -buildmode=pie is not supported on Windows and Linux on mips and riscv64.
case "$(go env GOOS)/$(go env GOARCH)" in
windows/* | linux/mips* | linux/riscv*) ;;
*)
BUILDFLAGS+=("-buildmode=pie")
;;
esac
echo "Building: $DEST/$BINARY_FULLNAME"
echo "GOOS=\"${GOOS}\" GOARCH=\"${GOARCH}\" GOARM=\"${GOARM}\""
go build \
-o "$DEST/$BINARY_FULLNAME" \
"${BUILDFLAGS[@]}" \
-ldflags "
$LDFLAGS
$LDFLAGS_STATIC_DOCKER
$DOCKER_LDFLAGS
" \
${GO_PACKAGE}
)
echo "Created binary: $DEST/$BINARY_FULLNAME"
ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION"
hash_files "$DEST/$BINARY_FULLNAME"
| #!/usr/bin/env bash
set -e
# a helper to provide ".exe" when it's appropriate
binary_extension() {
if [ "$(go env GOOS)" = 'windows' ]; then
echo -n '.exe'
fi
}
BINARY_NAME="$BINARY_SHORT_NAME-$VERSION"
BINARY_EXTENSION="$(binary_extension)"
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
source "${MAKEDIR}/.go-autogen"
hash_files() {
while [ $# -gt 0 ]; do
f="$1"
shift
dir="$(dirname "$f")"
base="$(basename "$f")"
for hashAlgo in md5 sha256; do
if command -v "${hashAlgo}sum" &> /dev/null; then
(
# subshell and cd so that we get output files like:
# $HASH docker-$VERSION
# instead of:
# $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION
cd "$dir"
"${hashAlgo}sum" "$base" > "$base.$hashAlgo"
)
fi
done
done
}
(
export GOGC=${DOCKER_BUILD_GOGC:-1000}
if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then
# must be cross-compiling!
case "$(go env GOOS)/$(go env GOARCH)" in
windows/amd64)
export CC="${CC:-x86_64-w64-mingw32-gcc}"
export CGO_ENABLED=1
;;
linux/arm)
case "${GOARM}" in
5 | "")
export CC="${CC:-arm-linux-gnueabi-gcc}"
export CGO_ENABLED=1
;;
7)
export CC="${CC:-arm-linux-gnueabihf-gcc}"
export CGO_ENABLED=1
;;
esac
;;
linux/arm64)
export CC="${CC:-aarch64-linux-gnu-gcc}"
export CGO_ENABLED=1
;;
linux/amd64)
export CC="${CC:-x86_64-linux-gnu-gcc}"
export CGO_ENABLED=1
;;
esac
fi
# -buildmode=pie is not supported on Windows and Linux on mips, riscv64 and ppc64be.
# https://github.com/golang/go/blob/master/src/cmd/internal/sys/supported.go#L89-L99
case "$(go env GOOS)/$(go env GOARCH)" in
windows/* | linux/mips* | linux/riscv* | linux/ppc64) ;;
*)
BUILDFLAGS+=("-buildmode=pie")
;;
esac
echo "Building: $DEST/$BINARY_FULLNAME"
echo "GOOS=\"${GOOS}\" GOARCH=\"${GOARCH}\" GOARM=\"${GOARM}\""
go build \
-o "$DEST/$BINARY_FULLNAME" \
"${BUILDFLAGS[@]}" \
-ldflags "
$LDFLAGS
$LDFLAGS_STATIC_DOCKER
$DOCKER_LDFLAGS
" \
${GO_PACKAGE}
)
echo "Created binary: $DEST/$BINARY_FULLNAME"
ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION"
hash_files "$DEST/$BINARY_FULLNAME"
| gyakovlev | a062d526f871addbd1ab5b418c693df2eacd3a8c | 7125e60b4579e7c642d8e2e9e7c053cb5892ad17 | Can you update the comment here? | samuelkarp | 4,709 |
moby/moby | 42,514 | don't use buildmode=pie on ppc64 | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
made sure `buildmode=pie` is not passed on ppc64 (big-endian)
**- How I did it**
added exception in ` hack/make/.binary`
It's already omitted for ppc64 in
`hack/dockerfile/install/install.sh`
not using wildcard, because GOARCH=ppc64le supports pie
**- How to verify it**
apply provided patch and https://github.com/moby/moby/pull/42172
docker now builds and works on big-endian ppc64
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
don't pass buildmode=pie on ppc64be
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-11 00:26:47+00:00 | 2021-06-11 13:26:51+00:00 | hack/make/.binary | #!/usr/bin/env bash
set -e
# a helper to provide ".exe" when it's appropriate
binary_extension() {
if [ "$(go env GOOS)" = 'windows' ]; then
echo -n '.exe'
fi
}
BINARY_NAME="$BINARY_SHORT_NAME-$VERSION"
BINARY_EXTENSION="$(binary_extension)"
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
source "${MAKEDIR}/.go-autogen"
hash_files() {
while [ $# -gt 0 ]; do
f="$1"
shift
dir="$(dirname "$f")"
base="$(basename "$f")"
for hashAlgo in md5 sha256; do
if command -v "${hashAlgo}sum" &> /dev/null; then
(
# subshell and cd so that we get output files like:
# $HASH docker-$VERSION
# instead of:
# $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION
cd "$dir"
"${hashAlgo}sum" "$base" > "$base.$hashAlgo"
)
fi
done
done
}
(
export GOGC=${DOCKER_BUILD_GOGC:-1000}
if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then
# must be cross-compiling!
case "$(go env GOOS)/$(go env GOARCH)" in
windows/amd64)
export CC="${CC:-x86_64-w64-mingw32-gcc}"
export CGO_ENABLED=1
;;
linux/arm)
case "${GOARM}" in
5 | "")
export CC="${CC:-arm-linux-gnueabi-gcc}"
export CGO_ENABLED=1
;;
7)
export CC="${CC:-arm-linux-gnueabihf-gcc}"
export CGO_ENABLED=1
;;
esac
;;
linux/arm64)
export CC="${CC:-aarch64-linux-gnu-gcc}"
export CGO_ENABLED=1
;;
linux/amd64)
export CC="${CC:-x86_64-linux-gnu-gcc}"
export CGO_ENABLED=1
;;
esac
fi
# -buildmode=pie is not supported on Windows and Linux on mips and riscv64.
case "$(go env GOOS)/$(go env GOARCH)" in
windows/* | linux/mips* | linux/riscv*) ;;
*)
BUILDFLAGS+=("-buildmode=pie")
;;
esac
echo "Building: $DEST/$BINARY_FULLNAME"
echo "GOOS=\"${GOOS}\" GOARCH=\"${GOARCH}\" GOARM=\"${GOARM}\""
go build \
-o "$DEST/$BINARY_FULLNAME" \
"${BUILDFLAGS[@]}" \
-ldflags "
$LDFLAGS
$LDFLAGS_STATIC_DOCKER
$DOCKER_LDFLAGS
" \
${GO_PACKAGE}
)
echo "Created binary: $DEST/$BINARY_FULLNAME"
ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION"
hash_files "$DEST/$BINARY_FULLNAME"
| #!/usr/bin/env bash
set -e
# a helper to provide ".exe" when it's appropriate
binary_extension() {
if [ "$(go env GOOS)" = 'windows' ]; then
echo -n '.exe'
fi
}
BINARY_NAME="$BINARY_SHORT_NAME-$VERSION"
BINARY_EXTENSION="$(binary_extension)"
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
source "${MAKEDIR}/.go-autogen"
hash_files() {
while [ $# -gt 0 ]; do
f="$1"
shift
dir="$(dirname "$f")"
base="$(basename "$f")"
for hashAlgo in md5 sha256; do
if command -v "${hashAlgo}sum" &> /dev/null; then
(
# subshell and cd so that we get output files like:
# $HASH docker-$VERSION
# instead of:
# $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION
cd "$dir"
"${hashAlgo}sum" "$base" > "$base.$hashAlgo"
)
fi
done
done
}
(
export GOGC=${DOCKER_BUILD_GOGC:-1000}
if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then
# must be cross-compiling!
case "$(go env GOOS)/$(go env GOARCH)" in
windows/amd64)
export CC="${CC:-x86_64-w64-mingw32-gcc}"
export CGO_ENABLED=1
;;
linux/arm)
case "${GOARM}" in
5 | "")
export CC="${CC:-arm-linux-gnueabi-gcc}"
export CGO_ENABLED=1
;;
7)
export CC="${CC:-arm-linux-gnueabihf-gcc}"
export CGO_ENABLED=1
;;
esac
;;
linux/arm64)
export CC="${CC:-aarch64-linux-gnu-gcc}"
export CGO_ENABLED=1
;;
linux/amd64)
export CC="${CC:-x86_64-linux-gnu-gcc}"
export CGO_ENABLED=1
;;
esac
fi
# -buildmode=pie is not supported on Windows and Linux on mips, riscv64 and ppc64be.
# https://github.com/golang/go/blob/master/src/cmd/internal/sys/supported.go#L89-L99
case "$(go env GOOS)/$(go env GOARCH)" in
windows/* | linux/mips* | linux/riscv* | linux/ppc64) ;;
*)
BUILDFLAGS+=("-buildmode=pie")
;;
esac
echo "Building: $DEST/$BINARY_FULLNAME"
echo "GOOS=\"${GOOS}\" GOARCH=\"${GOARCH}\" GOARM=\"${GOARM}\""
go build \
-o "$DEST/$BINARY_FULLNAME" \
"${BUILDFLAGS[@]}" \
-ldflags "
$LDFLAGS
$LDFLAGS_STATIC_DOCKER
$DOCKER_LDFLAGS
" \
${GO_PACKAGE}
)
echo "Created binary: $DEST/$BINARY_FULLNAME"
ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION"
hash_files "$DEST/$BINARY_FULLNAME"
| gyakovlev | a062d526f871addbd1ab5b418c693df2eacd3a8c | 7125e60b4579e7c642d8e2e9e7c053cb5892ad17 | That Go source link you shared would be a great candidate for this comment, IMO :eyes: | tianon | 4,710 |
moby/moby | 42,514 | don't use buildmode=pie on ppc64 | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
made sure `buildmode=pie` is not passed on ppc64 (big-endian)
**- How I did it**
added exception in ` hack/make/.binary`
It's already omitted for ppc64 in
`hack/dockerfile/install/install.sh`
not using wildcard, because GOARCH=ppc64le supports pie
**- How to verify it**
apply provided patch and https://github.com/moby/moby/pull/42172
docker now builds and works on big-endian ppc64
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
don't pass buildmode=pie on ppc64be
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-11 00:26:47+00:00 | 2021-06-11 13:26:51+00:00 | hack/make/.binary | #!/usr/bin/env bash
set -e
# a helper to provide ".exe" when it's appropriate
binary_extension() {
if [ "$(go env GOOS)" = 'windows' ]; then
echo -n '.exe'
fi
}
BINARY_NAME="$BINARY_SHORT_NAME-$VERSION"
BINARY_EXTENSION="$(binary_extension)"
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
source "${MAKEDIR}/.go-autogen"
hash_files() {
while [ $# -gt 0 ]; do
f="$1"
shift
dir="$(dirname "$f")"
base="$(basename "$f")"
for hashAlgo in md5 sha256; do
if command -v "${hashAlgo}sum" &> /dev/null; then
(
# subshell and cd so that we get output files like:
# $HASH docker-$VERSION
# instead of:
# $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION
cd "$dir"
"${hashAlgo}sum" "$base" > "$base.$hashAlgo"
)
fi
done
done
}
(
export GOGC=${DOCKER_BUILD_GOGC:-1000}
if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then
# must be cross-compiling!
case "$(go env GOOS)/$(go env GOARCH)" in
windows/amd64)
export CC="${CC:-x86_64-w64-mingw32-gcc}"
export CGO_ENABLED=1
;;
linux/arm)
case "${GOARM}" in
5 | "")
export CC="${CC:-arm-linux-gnueabi-gcc}"
export CGO_ENABLED=1
;;
7)
export CC="${CC:-arm-linux-gnueabihf-gcc}"
export CGO_ENABLED=1
;;
esac
;;
linux/arm64)
export CC="${CC:-aarch64-linux-gnu-gcc}"
export CGO_ENABLED=1
;;
linux/amd64)
export CC="${CC:-x86_64-linux-gnu-gcc}"
export CGO_ENABLED=1
;;
esac
fi
# -buildmode=pie is not supported on Windows and Linux on mips and riscv64.
case "$(go env GOOS)/$(go env GOARCH)" in
windows/* | linux/mips* | linux/riscv*) ;;
*)
BUILDFLAGS+=("-buildmode=pie")
;;
esac
echo "Building: $DEST/$BINARY_FULLNAME"
echo "GOOS=\"${GOOS}\" GOARCH=\"${GOARCH}\" GOARM=\"${GOARM}\""
go build \
-o "$DEST/$BINARY_FULLNAME" \
"${BUILDFLAGS[@]}" \
-ldflags "
$LDFLAGS
$LDFLAGS_STATIC_DOCKER
$DOCKER_LDFLAGS
" \
${GO_PACKAGE}
)
echo "Created binary: $DEST/$BINARY_FULLNAME"
ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION"
hash_files "$DEST/$BINARY_FULLNAME"
| #!/usr/bin/env bash
set -e
# a helper to provide ".exe" when it's appropriate
binary_extension() {
if [ "$(go env GOOS)" = 'windows' ]; then
echo -n '.exe'
fi
}
BINARY_NAME="$BINARY_SHORT_NAME-$VERSION"
BINARY_EXTENSION="$(binary_extension)"
BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION"
source "${MAKEDIR}/.go-autogen"
hash_files() {
while [ $# -gt 0 ]; do
f="$1"
shift
dir="$(dirname "$f")"
base="$(basename "$f")"
for hashAlgo in md5 sha256; do
if command -v "${hashAlgo}sum" &> /dev/null; then
(
# subshell and cd so that we get output files like:
# $HASH docker-$VERSION
# instead of:
# $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION
cd "$dir"
"${hashAlgo}sum" "$base" > "$base.$hashAlgo"
)
fi
done
done
}
(
export GOGC=${DOCKER_BUILD_GOGC:-1000}
if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then
# must be cross-compiling!
case "$(go env GOOS)/$(go env GOARCH)" in
windows/amd64)
export CC="${CC:-x86_64-w64-mingw32-gcc}"
export CGO_ENABLED=1
;;
linux/arm)
case "${GOARM}" in
5 | "")
export CC="${CC:-arm-linux-gnueabi-gcc}"
export CGO_ENABLED=1
;;
7)
export CC="${CC:-arm-linux-gnueabihf-gcc}"
export CGO_ENABLED=1
;;
esac
;;
linux/arm64)
export CC="${CC:-aarch64-linux-gnu-gcc}"
export CGO_ENABLED=1
;;
linux/amd64)
export CC="${CC:-x86_64-linux-gnu-gcc}"
export CGO_ENABLED=1
;;
esac
fi
# -buildmode=pie is not supported on Windows and Linux on mips, riscv64 and ppc64be.
# https://github.com/golang/go/blob/master/src/cmd/internal/sys/supported.go#L89-L99
case "$(go env GOOS)/$(go env GOARCH)" in
windows/* | linux/mips* | linux/riscv* | linux/ppc64) ;;
*)
BUILDFLAGS+=("-buildmode=pie")
;;
esac
echo "Building: $DEST/$BINARY_FULLNAME"
echo "GOOS=\"${GOOS}\" GOARCH=\"${GOARCH}\" GOARM=\"${GOARM}\""
go build \
-o "$DEST/$BINARY_FULLNAME" \
"${BUILDFLAGS[@]}" \
-ldflags "
$LDFLAGS
$LDFLAGS_STATIC_DOCKER
$DOCKER_LDFLAGS
" \
${GO_PACKAGE}
)
echo "Created binary: $DEST/$BINARY_FULLNAME"
ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION"
hash_files "$DEST/$BINARY_FULLNAME"
| gyakovlev | a062d526f871addbd1ab5b418c693df2eacd3a8c | 7125e60b4579e7c642d8e2e9e7c053cb5892ad17 | applied both suggestions.
btw that go source file shows that `riscv64` supports PIE.
so `.binary` forces people who build docker on riscv64 on newer go versions to use non-PIE. something to keep an eye on.
I don't have `riscv64` HW to test/confirm/deny that though. | gyakovlev | 4,711 |
moby/moby | 42,504 | libnetwork/portallocator: various cleanups, remove init() | relaces https://github.com/moby/libnetwork/pull/2589
closes https://github.com/moby/libnetwork/pull/2589
- portallocator: use const for default port-ranges, instead of `init()`
- portallocator: minor refactor for readability
- portallocator: log instead of discard port-range failures
Both `getDynamicPortRange()` and `sanitizePortRange()` could produce and error, and the error message was currently discarded, silently falling back to using the default port range. This patch:
- Moves the fallback message from `getDynamicPortRange()` to `getDefaultPortRange()`, which is where the actual fallback occurs.
- Logs the fallback message and the error that causes the fallback.
The message/error is currently printed at the `INFO` level, but could be raised to a `WARN`, depending on what kind of situations can cause the error.
| null | 2021-06-09 20:32:36+00:00 | 2021-06-12 21:10:10+00:00 | libnetwork/portallocator/portallocator_windows.go | package portallocator
func init() {
defaultPortRangeStart = 60000
defaultPortRangeEnd = 65000
}
func getDynamicPortRange() (start int, end int, err error) {
return defaultPortRangeStart, defaultPortRangeEnd, nil
}
| package portallocator
const (
// defaultPortRangeStart indicates the first port in port range
defaultPortRangeStart = 60000
// defaultPortRangeEnd indicates the last port in port range
defaultPortRangeEnd = 65000
)
func getDynamicPortRange() (start int, end int, err error) {
return defaultPortRangeStart, defaultPortRangeEnd, nil
}
| thaJeztah | 342dfb352129a9a0974d569973e4652dcf152b09 | 5e62ca1a05f2aab49066d91c9070cb7dc7ad2461 | This was really weird code; the default was set in a non-platform specific file, and then a (windows-only) `init()` to overwrite the defaults. | thaJeztah | 4,712 |
moby/moby | 42,486 | Remove a lot of outdated "project/PACKAGERS.md" text | This is just a first pass to axe some low hanging fruit. At this point, the project is generally mature enough that I do not believe most of this is necessary. | null | 2021-06-08 15:58:18+00:00 | 2021-06-09 06:23:10+00:00 | project/PACKAGERS.md | # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Getting Started
We want to help you package Docker successfully. Before doing any packaging, a
good first step is to introduce yourself on the [docker-dev mailing
list](https://groups.google.com/d/forum/docker-dev), explain what you're trying
to achieve, and tell us how we can help. Don't worry, we don't bite! There might
even be someone already working on packaging for the same distro!
You can also join the IRC channel - #docker and #docker-dev on Freenode are both
active and friendly.
We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our
"Packagers Relations", since he's always working to make sure our packagers have
a good, healthy upstream to work with (both in our communication and in our
build scripts). If you're having any kind of trouble, feel free to ping him
directly. He also likes to keep track of what distributions we have packagers
for, so feel free to reach out to him even just to say "Hi!"
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
To build Docker, you will need the following:
* A recent version of Git and Mercurial
* Go version 1.6 or later
* A clean checkout of the source added to a valid [Go
workspace](https://golang.org/doc/code.html#Workspaces) under the path
*src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
explained in more detail below)
To build the Docker daemon, you will additionally need:
* An amd64/x86_64 machine running Linux
* SQLite version 3.7.9 or later
* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version
2.02.89 or later
* btrfs-progs version 3.16.1 or later (unless using an older version is
absolutely necessary, in which case 3.8 is the minimum)
* libseccomp version 2.2.1 or later (for build tag seccomp)
Be sure to also check out Docker's Dockerfile for the most up-to-date list of
these build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
To use the vendored dependencies, simply make sure the path to "./vendor" is
included in `GOPATH` (or use `AUTO_GOPATH`, as explained below).
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
NOTE: if you're not able to package the exact version (to the exact commit) of a
given dependency, please get in touch so we can remediate! Who knows what
discrepancies can be caused by even the slightest deviation. We promise to do
our best to make everybody happy.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for all your compilation of
Docker. If there's something you need that it isn't doing, or something it could
be doing to make your life as a packager easier, please get in touch with Tianon
and help us rectify the situation. Chances are good that other packagers have
probably run into the same problems and a fix might already be in the works, but
none of us will know for sure unless you harass Tianon about it. :)
All the commands listed within this section should be run with the Docker source
checkout as the current working directory.
### `AUTO_GOPATH`
If you'd rather not be bothered with the hassles that setting up `GOPATH`
appropriately can be, and prefer to just get a "build that works", you should
add something similar to this to whatever script or process you're using to
build Docker:
```bash
export AUTO_GOPATH=1
```
This will cause the build scripts to set up a reasonable `GOPATH` that
automatically and properly includes both docker/docker from the local
directory, and the local "./vendor" directory as necessary.
### `DOCKER_BUILDTAGS`
If you're building a binary that may need to be used on platforms that include
AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
```bash
export DOCKER_BUILDTAGS='apparmor'
```
If you're building a binary that may need to be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
### Static Daemon
If it is feasible within the constraints of your distribution, you should
seriously consider packaging Docker as a single static binary. A good comparison
is Busybox, which is often packaged statically as a feature to enable mass
portability. Because of the unique way Docker operates, being similarly static
is a "feature".
To build a static Docker daemon binary, run the following command (first
ensuring that all the necessary libraries are available in static form for
linking - see the "Build Dependencies" section above, and the relevant lines
within Docker's own Dockerfile that set up our official build environment):
```bash
./hack/make.sh binary
```
This will create a static binary under
"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of
the file "./VERSION". This binary is usually installed somewhere like
"/usr/bin/docker".
### Dynamic Daemon / Client-only Binary
If you are only interested in a Docker client binary, you can build using:
```bash
./hack/make.sh binary-client
```
If you need to (due to distro policy, distro library availability, or for other
reasons) create a dynamically compiled daemon binary, or if you are only
interested in creating a client binary for Docker, use something similar to the
following:
```bash
./hack/make.sh dynbinary-client
```
This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for
client-only builds is the important file to grab and install as appropriate.
### Cross Compilation
Limited cross compilation is supported due to requiring cgo for critical
functionality (such as seccomp support).
To cross compile run `make cross`. You can specify the platforms to target by
setting the `DOCKER_CROSSPLATFORMS` environment variable to a list of platforms
in the format `<GOOS>/<GOARCH>`. Specify multiple platforms by using a space
in between each desired platform.
For setting arm variants, you can specify the `GOARM` value by append `/v<GOARM>`
to your `<GOOS>/arm`. Example:
```
make DOCKER_CROSSPLATFORMS=linux/arm/v7 cross
```
This will create a linux binary targeting arm 7.
See `hack/make/.binary` for supported cross compliation platforms.
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* a [properly
mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
[is](https://github.com/docker/docker/issues/2683)
[not](https://github.com/docker/docker/issues/3485)
[sufficient](https://github.com/docker/docker/issues/4568))
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you will either need to discover the options necessary via trial and
error, or check out the [Gentoo
ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild),
in which a list is maintained (and if there are any issues or discrepancies in
that list, please contact Tianon so they can be rectified).
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists (and if one does not, contact Tianon about whether it might be
appropriate for your distro's init script to live there too!).
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, a `DOCKER_OPTS` variable of some kind is available for adding more
flags (such as changing the graph driver to use BTRFS, switching the location of
"/var/lib/docker", etc).
## Communicate
As a final note, please do feel free to reach out to Tianon at any time for
pretty much anything. He really does love hearing from our packagers and wants
to make sure we're not being a "hostile upstream". As should be a given, we
appreciate the work our packagers do to make sure we have broad distribution!
| # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
The Dockerfile contains the most up-to-date list of build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for compilation.
### `DOCKER_BUILDTAGS`
If you're building a binary that might be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* pigz (optional)
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you should check out `contrib/check-config.sh`.
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists.
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, it is encouraged that additional configuration be placed in
`/etc/docker/daemon.json`.
| tianon | b0cef58b5e1153068ec1e86d29c292b201204edc | fb2c4b2cd6dc47b2d83ae8893bdbe7349484736b | Do we still have the code for the AUTO_GOPATH, or was it removed?
I recall seeing the variable recently, but don't recall where 🤔 | thaJeztah | 4,713 |
moby/moby | 42,486 | Remove a lot of outdated "project/PACKAGERS.md" text | This is just a first pass to axe some low hanging fruit. At this point, the project is generally mature enough that I do not believe most of this is necessary. | null | 2021-06-08 15:58:18+00:00 | 2021-06-09 06:23:10+00:00 | project/PACKAGERS.md | # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Getting Started
We want to help you package Docker successfully. Before doing any packaging, a
good first step is to introduce yourself on the [docker-dev mailing
list](https://groups.google.com/d/forum/docker-dev), explain what you're trying
to achieve, and tell us how we can help. Don't worry, we don't bite! There might
even be someone already working on packaging for the same distro!
You can also join the IRC channel - #docker and #docker-dev on Freenode are both
active and friendly.
We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our
"Packagers Relations", since he's always working to make sure our packagers have
a good, healthy upstream to work with (both in our communication and in our
build scripts). If you're having any kind of trouble, feel free to ping him
directly. He also likes to keep track of what distributions we have packagers
for, so feel free to reach out to him even just to say "Hi!"
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
To build Docker, you will need the following:
* A recent version of Git and Mercurial
* Go version 1.6 or later
* A clean checkout of the source added to a valid [Go
workspace](https://golang.org/doc/code.html#Workspaces) under the path
*src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
explained in more detail below)
To build the Docker daemon, you will additionally need:
* An amd64/x86_64 machine running Linux
* SQLite version 3.7.9 or later
* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version
2.02.89 or later
* btrfs-progs version 3.16.1 or later (unless using an older version is
absolutely necessary, in which case 3.8 is the minimum)
* libseccomp version 2.2.1 or later (for build tag seccomp)
Be sure to also check out Docker's Dockerfile for the most up-to-date list of
these build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
To use the vendored dependencies, simply make sure the path to "./vendor" is
included in `GOPATH` (or use `AUTO_GOPATH`, as explained below).
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
NOTE: if you're not able to package the exact version (to the exact commit) of a
given dependency, please get in touch so we can remediate! Who knows what
discrepancies can be caused by even the slightest deviation. We promise to do
our best to make everybody happy.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for all your compilation of
Docker. If there's something you need that it isn't doing, or something it could
be doing to make your life as a packager easier, please get in touch with Tianon
and help us rectify the situation. Chances are good that other packagers have
probably run into the same problems and a fix might already be in the works, but
none of us will know for sure unless you harass Tianon about it. :)
All the commands listed within this section should be run with the Docker source
checkout as the current working directory.
### `AUTO_GOPATH`
If you'd rather not be bothered with the hassles that setting up `GOPATH`
appropriately can be, and prefer to just get a "build that works", you should
add something similar to this to whatever script or process you're using to
build Docker:
```bash
export AUTO_GOPATH=1
```
This will cause the build scripts to set up a reasonable `GOPATH` that
automatically and properly includes both docker/docker from the local
directory, and the local "./vendor" directory as necessary.
### `DOCKER_BUILDTAGS`
If you're building a binary that may need to be used on platforms that include
AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
```bash
export DOCKER_BUILDTAGS='apparmor'
```
If you're building a binary that may need to be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
### Static Daemon
If it is feasible within the constraints of your distribution, you should
seriously consider packaging Docker as a single static binary. A good comparison
is Busybox, which is often packaged statically as a feature to enable mass
portability. Because of the unique way Docker operates, being similarly static
is a "feature".
To build a static Docker daemon binary, run the following command (first
ensuring that all the necessary libraries are available in static form for
linking - see the "Build Dependencies" section above, and the relevant lines
within Docker's own Dockerfile that set up our official build environment):
```bash
./hack/make.sh binary
```
This will create a static binary under
"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of
the file "./VERSION". This binary is usually installed somewhere like
"/usr/bin/docker".
### Dynamic Daemon / Client-only Binary
If you are only interested in a Docker client binary, you can build using:
```bash
./hack/make.sh binary-client
```
If you need to (due to distro policy, distro library availability, or for other
reasons) create a dynamically compiled daemon binary, or if you are only
interested in creating a client binary for Docker, use something similar to the
following:
```bash
./hack/make.sh dynbinary-client
```
This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for
client-only builds is the important file to grab and install as appropriate.
### Cross Compilation
Limited cross compilation is supported due to requiring cgo for critical
functionality (such as seccomp support).
To cross compile run `make cross`. You can specify the platforms to target by
setting the `DOCKER_CROSSPLATFORMS` environment variable to a list of platforms
in the format `<GOOS>/<GOARCH>`. Specify multiple platforms by using a space
in between each desired platform.
For setting arm variants, you can specify the `GOARM` value by append `/v<GOARM>`
to your `<GOOS>/arm`. Example:
```
make DOCKER_CROSSPLATFORMS=linux/arm/v7 cross
```
This will create a linux binary targeting arm 7.
See `hack/make/.binary` for supported cross compliation platforms.
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* a [properly
mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
[is](https://github.com/docker/docker/issues/2683)
[not](https://github.com/docker/docker/issues/3485)
[sufficient](https://github.com/docker/docker/issues/4568))
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you will either need to discover the options necessary via trial and
error, or check out the [Gentoo
ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild),
in which a list is maintained (and if there are any issues or discrepancies in
that list, please contact Tianon so they can be rectified).
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists (and if one does not, contact Tianon about whether it might be
appropriate for your distro's init script to live there too!).
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, a `DOCKER_OPTS` variable of some kind is available for adding more
flags (such as changing the graph driver to use BTRFS, switching the location of
"/var/lib/docker", etc).
## Communicate
As a final note, please do feel free to reach out to Tianon at any time for
pretty much anything. He really does love hearing from our packagers and wants
to make sure we're not being a "hostile upstream". As should be a given, we
appreciate the work our packagers do to make sure we have broad distribution!
| # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
The Dockerfile contains the most up-to-date list of build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for compilation.
### `DOCKER_BUILDTAGS`
If you're building a binary that might be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* pigz (optional)
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you should check out `contrib/check-config.sh`.
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists.
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, it is encouraged that additional configuration be placed in
`/etc/docker/daemon.json`.
| tianon | b0cef58b5e1153068ec1e86d29c292b201204edc | fb2c4b2cd6dc47b2d83ae8893bdbe7349484736b | Yeah, we do still have the code for it: https://github.com/moby/moby/blob/0ad2293d0e5bbf4c966a0e8b27c3ac3835265577/hack/make.sh#L67-L78
However, in Go 1.17+ we have to migrate completely off `GOPATH`, and I don't think many packagers will _actually_ be using that functionality, so I figured it was a good time to remove it here at least. | tianon | 4,714 |
moby/moby | 42,486 | Remove a lot of outdated "project/PACKAGERS.md" text | This is just a first pass to axe some low hanging fruit. At this point, the project is generally mature enough that I do not believe most of this is necessary. | null | 2021-06-08 15:58:18+00:00 | 2021-06-09 06:23:10+00:00 | project/PACKAGERS.md | # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Getting Started
We want to help you package Docker successfully. Before doing any packaging, a
good first step is to introduce yourself on the [docker-dev mailing
list](https://groups.google.com/d/forum/docker-dev), explain what you're trying
to achieve, and tell us how we can help. Don't worry, we don't bite! There might
even be someone already working on packaging for the same distro!
You can also join the IRC channel - #docker and #docker-dev on Freenode are both
active and friendly.
We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our
"Packagers Relations", since he's always working to make sure our packagers have
a good, healthy upstream to work with (both in our communication and in our
build scripts). If you're having any kind of trouble, feel free to ping him
directly. He also likes to keep track of what distributions we have packagers
for, so feel free to reach out to him even just to say "Hi!"
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
To build Docker, you will need the following:
* A recent version of Git and Mercurial
* Go version 1.6 or later
* A clean checkout of the source added to a valid [Go
workspace](https://golang.org/doc/code.html#Workspaces) under the path
*src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
explained in more detail below)
To build the Docker daemon, you will additionally need:
* An amd64/x86_64 machine running Linux
* SQLite version 3.7.9 or later
* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version
2.02.89 or later
* btrfs-progs version 3.16.1 or later (unless using an older version is
absolutely necessary, in which case 3.8 is the minimum)
* libseccomp version 2.2.1 or later (for build tag seccomp)
Be sure to also check out Docker's Dockerfile for the most up-to-date list of
these build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
To use the vendored dependencies, simply make sure the path to "./vendor" is
included in `GOPATH` (or use `AUTO_GOPATH`, as explained below).
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
NOTE: if you're not able to package the exact version (to the exact commit) of a
given dependency, please get in touch so we can remediate! Who knows what
discrepancies can be caused by even the slightest deviation. We promise to do
our best to make everybody happy.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for all your compilation of
Docker. If there's something you need that it isn't doing, or something it could
be doing to make your life as a packager easier, please get in touch with Tianon
and help us rectify the situation. Chances are good that other packagers have
probably run into the same problems and a fix might already be in the works, but
none of us will know for sure unless you harass Tianon about it. :)
All the commands listed within this section should be run with the Docker source
checkout as the current working directory.
### `AUTO_GOPATH`
If you'd rather not be bothered with the hassles that setting up `GOPATH`
appropriately can be, and prefer to just get a "build that works", you should
add something similar to this to whatever script or process you're using to
build Docker:
```bash
export AUTO_GOPATH=1
```
This will cause the build scripts to set up a reasonable `GOPATH` that
automatically and properly includes both docker/docker from the local
directory, and the local "./vendor" directory as necessary.
### `DOCKER_BUILDTAGS`
If you're building a binary that may need to be used on platforms that include
AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
```bash
export DOCKER_BUILDTAGS='apparmor'
```
If you're building a binary that may need to be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
### Static Daemon
If it is feasible within the constraints of your distribution, you should
seriously consider packaging Docker as a single static binary. A good comparison
is Busybox, which is often packaged statically as a feature to enable mass
portability. Because of the unique way Docker operates, being similarly static
is a "feature".
To build a static Docker daemon binary, run the following command (first
ensuring that all the necessary libraries are available in static form for
linking - see the "Build Dependencies" section above, and the relevant lines
within Docker's own Dockerfile that set up our official build environment):
```bash
./hack/make.sh binary
```
This will create a static binary under
"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of
the file "./VERSION". This binary is usually installed somewhere like
"/usr/bin/docker".
### Dynamic Daemon / Client-only Binary
If you are only interested in a Docker client binary, you can build using:
```bash
./hack/make.sh binary-client
```
If you need to (due to distro policy, distro library availability, or for other
reasons) create a dynamically compiled daemon binary, or if you are only
interested in creating a client binary for Docker, use something similar to the
following:
```bash
./hack/make.sh dynbinary-client
```
This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for
client-only builds is the important file to grab and install as appropriate.
### Cross Compilation
Limited cross compilation is supported due to requiring cgo for critical
functionality (such as seccomp support).
To cross compile run `make cross`. You can specify the platforms to target by
setting the `DOCKER_CROSSPLATFORMS` environment variable to a list of platforms
in the format `<GOOS>/<GOARCH>`. Specify multiple platforms by using a space
in between each desired platform.
For setting arm variants, you can specify the `GOARM` value by append `/v<GOARM>`
to your `<GOOS>/arm`. Example:
```
make DOCKER_CROSSPLATFORMS=linux/arm/v7 cross
```
This will create a linux binary targeting arm 7.
See `hack/make/.binary` for supported cross compliation platforms.
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* a [properly
mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
[is](https://github.com/docker/docker/issues/2683)
[not](https://github.com/docker/docker/issues/3485)
[sufficient](https://github.com/docker/docker/issues/4568))
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you will either need to discover the options necessary via trial and
error, or check out the [Gentoo
ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild),
in which a list is maintained (and if there are any issues or discrepancies in
that list, please contact Tianon so they can be rectified).
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists (and if one does not, contact Tianon about whether it might be
appropriate for your distro's init script to live there too!).
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, a `DOCKER_OPTS` variable of some kind is available for adding more
flags (such as changing the graph driver to use BTRFS, switching the location of
"/var/lib/docker", etc).
## Communicate
As a final note, please do feel free to reach out to Tianon at any time for
pretty much anything. He really does love hearing from our packagers and wants
to make sure we're not being a "hostile upstream". As should be a given, we
appreciate the work our packagers do to make sure we have broad distribution!
| # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
The Dockerfile contains the most up-to-date list of build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for compilation.
### `DOCKER_BUILDTAGS`
If you're building a binary that might be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* pigz (optional)
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you should check out `contrib/check-config.sh`.
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists.
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, it is encouraged that additional configuration be placed in
`/etc/docker/daemon.json`.
| tianon | b0cef58b5e1153068ec1e86d29c292b201204edc | fb2c4b2cd6dc47b2d83ae8893bdbe7349484736b | Oh, I don't think this is actually used anymore? Can someone else confirm too? :innocent: | tianon | 4,715 |
moby/moby | 42,486 | Remove a lot of outdated "project/PACKAGERS.md" text | This is just a first pass to axe some low hanging fruit. At this point, the project is generally mature enough that I do not believe most of this is necessary. | null | 2021-06-08 15:58:18+00:00 | 2021-06-09 06:23:10+00:00 | project/PACKAGERS.md | # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Getting Started
We want to help you package Docker successfully. Before doing any packaging, a
good first step is to introduce yourself on the [docker-dev mailing
list](https://groups.google.com/d/forum/docker-dev), explain what you're trying
to achieve, and tell us how we can help. Don't worry, we don't bite! There might
even be someone already working on packaging for the same distro!
You can also join the IRC channel - #docker and #docker-dev on Freenode are both
active and friendly.
We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our
"Packagers Relations", since he's always working to make sure our packagers have
a good, healthy upstream to work with (both in our communication and in our
build scripts). If you're having any kind of trouble, feel free to ping him
directly. He also likes to keep track of what distributions we have packagers
for, so feel free to reach out to him even just to say "Hi!"
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
To build Docker, you will need the following:
* A recent version of Git and Mercurial
* Go version 1.6 or later
* A clean checkout of the source added to a valid [Go
workspace](https://golang.org/doc/code.html#Workspaces) under the path
*src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
explained in more detail below)
To build the Docker daemon, you will additionally need:
* An amd64/x86_64 machine running Linux
* SQLite version 3.7.9 or later
* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version
2.02.89 or later
* btrfs-progs version 3.16.1 or later (unless using an older version is
absolutely necessary, in which case 3.8 is the minimum)
* libseccomp version 2.2.1 or later (for build tag seccomp)
Be sure to also check out Docker's Dockerfile for the most up-to-date list of
these build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
To use the vendored dependencies, simply make sure the path to "./vendor" is
included in `GOPATH` (or use `AUTO_GOPATH`, as explained below).
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
NOTE: if you're not able to package the exact version (to the exact commit) of a
given dependency, please get in touch so we can remediate! Who knows what
discrepancies can be caused by even the slightest deviation. We promise to do
our best to make everybody happy.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for all your compilation of
Docker. If there's something you need that it isn't doing, or something it could
be doing to make your life as a packager easier, please get in touch with Tianon
and help us rectify the situation. Chances are good that other packagers have
probably run into the same problems and a fix might already be in the works, but
none of us will know for sure unless you harass Tianon about it. :)
All the commands listed within this section should be run with the Docker source
checkout as the current working directory.
### `AUTO_GOPATH`
If you'd rather not be bothered with the hassles that setting up `GOPATH`
appropriately can be, and prefer to just get a "build that works", you should
add something similar to this to whatever script or process you're using to
build Docker:
```bash
export AUTO_GOPATH=1
```
This will cause the build scripts to set up a reasonable `GOPATH` that
automatically and properly includes both docker/docker from the local
directory, and the local "./vendor" directory as necessary.
### `DOCKER_BUILDTAGS`
If you're building a binary that may need to be used on platforms that include
AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
```bash
export DOCKER_BUILDTAGS='apparmor'
```
If you're building a binary that may need to be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
### Static Daemon
If it is feasible within the constraints of your distribution, you should
seriously consider packaging Docker as a single static binary. A good comparison
is Busybox, which is often packaged statically as a feature to enable mass
portability. Because of the unique way Docker operates, being similarly static
is a "feature".
To build a static Docker daemon binary, run the following command (first
ensuring that all the necessary libraries are available in static form for
linking - see the "Build Dependencies" section above, and the relevant lines
within Docker's own Dockerfile that set up our official build environment):
```bash
./hack/make.sh binary
```
This will create a static binary under
"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of
the file "./VERSION". This binary is usually installed somewhere like
"/usr/bin/docker".
### Dynamic Daemon / Client-only Binary
If you are only interested in a Docker client binary, you can build using:
```bash
./hack/make.sh binary-client
```
If you need to (due to distro policy, distro library availability, or for other
reasons) create a dynamically compiled daemon binary, or if you are only
interested in creating a client binary for Docker, use something similar to the
following:
```bash
./hack/make.sh dynbinary-client
```
This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for
client-only builds is the important file to grab and install as appropriate.
### Cross Compilation
Limited cross compilation is supported due to requiring cgo for critical
functionality (such as seccomp support).
To cross compile run `make cross`. You can specify the platforms to target by
setting the `DOCKER_CROSSPLATFORMS` environment variable to a list of platforms
in the format `<GOOS>/<GOARCH>`. Specify multiple platforms by using a space
in between each desired platform.
For setting arm variants, you can specify the `GOARM` value by append `/v<GOARM>`
to your `<GOOS>/arm`. Example:
```
make DOCKER_CROSSPLATFORMS=linux/arm/v7 cross
```
This will create a linux binary targeting arm 7.
See `hack/make/.binary` for supported cross compliation platforms.
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* a [properly
mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
[is](https://github.com/docker/docker/issues/2683)
[not](https://github.com/docker/docker/issues/3485)
[sufficient](https://github.com/docker/docker/issues/4568))
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you will either need to discover the options necessary via trial and
error, or check out the [Gentoo
ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild),
in which a list is maintained (and if there are any issues or discrepancies in
that list, please contact Tianon so they can be rectified).
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists (and if one does not, contact Tianon about whether it might be
appropriate for your distro's init script to live there too!).
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, a `DOCKER_OPTS` variable of some kind is available for adding more
flags (such as changing the graph driver to use BTRFS, switching the location of
"/var/lib/docker", etc).
## Communicate
As a final note, please do feel free to reach out to Tianon at any time for
pretty much anything. He really does love hearing from our packagers and wants
to make sure we're not being a "hostile upstream". As should be a given, we
appreciate the work our packagers do to make sure we have broad distribution!
| # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
The Dockerfile contains the most up-to-date list of build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for compilation.
### `DOCKER_BUILDTAGS`
If you're building a binary that might be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* pigz (optional)
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you should check out `contrib/check-config.sh`.
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists.
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, it is encouraged that additional configuration be placed in
`/etc/docker/daemon.json`.
| tianon | b0cef58b5e1153068ec1e86d29c292b201204edc | fb2c4b2cd6dc47b2d83ae8893bdbe7349484736b | It's not directly related, but I think this one might be worth reversing (`exclude_seccomp`) or removing completely now. | tianon | 4,716 |
moby/moby | 42,486 | Remove a lot of outdated "project/PACKAGERS.md" text | This is just a first pass to axe some low hanging fruit. At this point, the project is generally mature enough that I do not believe most of this is necessary. | null | 2021-06-08 15:58:18+00:00 | 2021-06-09 06:23:10+00:00 | project/PACKAGERS.md | # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Getting Started
We want to help you package Docker successfully. Before doing any packaging, a
good first step is to introduce yourself on the [docker-dev mailing
list](https://groups.google.com/d/forum/docker-dev), explain what you're trying
to achieve, and tell us how we can help. Don't worry, we don't bite! There might
even be someone already working on packaging for the same distro!
You can also join the IRC channel - #docker and #docker-dev on Freenode are both
active and friendly.
We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our
"Packagers Relations", since he's always working to make sure our packagers have
a good, healthy upstream to work with (both in our communication and in our
build scripts). If you're having any kind of trouble, feel free to ping him
directly. He also likes to keep track of what distributions we have packagers
for, so feel free to reach out to him even just to say "Hi!"
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
To build Docker, you will need the following:
* A recent version of Git and Mercurial
* Go version 1.6 or later
* A clean checkout of the source added to a valid [Go
workspace](https://golang.org/doc/code.html#Workspaces) under the path
*src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
explained in more detail below)
To build the Docker daemon, you will additionally need:
* An amd64/x86_64 machine running Linux
* SQLite version 3.7.9 or later
* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version
2.02.89 or later
* btrfs-progs version 3.16.1 or later (unless using an older version is
absolutely necessary, in which case 3.8 is the minimum)
* libseccomp version 2.2.1 or later (for build tag seccomp)
Be sure to also check out Docker's Dockerfile for the most up-to-date list of
these build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
To use the vendored dependencies, simply make sure the path to "./vendor" is
included in `GOPATH` (or use `AUTO_GOPATH`, as explained below).
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
NOTE: if you're not able to package the exact version (to the exact commit) of a
given dependency, please get in touch so we can remediate! Who knows what
discrepancies can be caused by even the slightest deviation. We promise to do
our best to make everybody happy.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for all your compilation of
Docker. If there's something you need that it isn't doing, or something it could
be doing to make your life as a packager easier, please get in touch with Tianon
and help us rectify the situation. Chances are good that other packagers have
probably run into the same problems and a fix might already be in the works, but
none of us will know for sure unless you harass Tianon about it. :)
All the commands listed within this section should be run with the Docker source
checkout as the current working directory.
### `AUTO_GOPATH`
If you'd rather not be bothered with the hassles that setting up `GOPATH`
appropriately can be, and prefer to just get a "build that works", you should
add something similar to this to whatever script or process you're using to
build Docker:
```bash
export AUTO_GOPATH=1
```
This will cause the build scripts to set up a reasonable `GOPATH` that
automatically and properly includes both docker/docker from the local
directory, and the local "./vendor" directory as necessary.
### `DOCKER_BUILDTAGS`
If you're building a binary that may need to be used on platforms that include
AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
```bash
export DOCKER_BUILDTAGS='apparmor'
```
If you're building a binary that may need to be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
### Static Daemon
If it is feasible within the constraints of your distribution, you should
seriously consider packaging Docker as a single static binary. A good comparison
is Busybox, which is often packaged statically as a feature to enable mass
portability. Because of the unique way Docker operates, being similarly static
is a "feature".
To build a static Docker daemon binary, run the following command (first
ensuring that all the necessary libraries are available in static form for
linking - see the "Build Dependencies" section above, and the relevant lines
within Docker's own Dockerfile that set up our official build environment):
```bash
./hack/make.sh binary
```
This will create a static binary under
"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of
the file "./VERSION". This binary is usually installed somewhere like
"/usr/bin/docker".
### Dynamic Daemon / Client-only Binary
If you are only interested in a Docker client binary, you can build using:
```bash
./hack/make.sh binary-client
```
If you need to (due to distro policy, distro library availability, or for other
reasons) create a dynamically compiled daemon binary, or if you are only
interested in creating a client binary for Docker, use something similar to the
following:
```bash
./hack/make.sh dynbinary-client
```
This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for
client-only builds is the important file to grab and install as appropriate.
### Cross Compilation
Limited cross compilation is supported due to requiring cgo for critical
functionality (such as seccomp support).
To cross compile run `make cross`. You can specify the platforms to target by
setting the `DOCKER_CROSSPLATFORMS` environment variable to a list of platforms
in the format `<GOOS>/<GOARCH>`. Specify multiple platforms by using a space
in between each desired platform.
For setting arm variants, you can specify the `GOARM` value by append `/v<GOARM>`
to your `<GOOS>/arm`. Example:
```
make DOCKER_CROSSPLATFORMS=linux/arm/v7 cross
```
This will create a linux binary targeting arm 7.
See `hack/make/.binary` for supported cross compliation platforms.
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* a [properly
mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
[is](https://github.com/docker/docker/issues/2683)
[not](https://github.com/docker/docker/issues/3485)
[sufficient](https://github.com/docker/docker/issues/4568))
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you will either need to discover the options necessary via trial and
error, or check out the [Gentoo
ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild),
in which a list is maintained (and if there are any issues or discrepancies in
that list, please contact Tianon so they can be rectified).
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists (and if one does not, contact Tianon about whether it might be
appropriate for your distro's init script to live there too!).
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, a `DOCKER_OPTS` variable of some kind is available for adding more
flags (such as changing the graph driver to use BTRFS, switching the location of
"/var/lib/docker", etc).
## Communicate
As a final note, please do feel free to reach out to Tianon at any time for
pretty much anything. He really does love hearing from our packagers and wants
to make sure we're not being a "hostile upstream". As should be a given, we
appreciate the work our packagers do to make sure we have broad distribution!
| # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
The Dockerfile contains the most up-to-date list of build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for compilation.
### `DOCKER_BUILDTAGS`
If you're building a binary that might be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* pigz (optional)
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you should check out `contrib/check-config.sh`.
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists.
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, it is encouraged that additional configuration be placed in
`/etc/docker/daemon.json`.
| tianon | b0cef58b5e1153068ec1e86d29c292b201204edc | fb2c4b2cd6dc47b2d83ae8893bdbe7349484736b | Indeed, looks like https://github.com/moby/moby/pull/42254 was the PR which finally removed our last `+build apparmor`. | tianon | 4,717 |
moby/moby | 42,486 | Remove a lot of outdated "project/PACKAGERS.md" text | This is just a first pass to axe some low hanging fruit. At this point, the project is generally mature enough that I do not believe most of this is necessary. | null | 2021-06-08 15:58:18+00:00 | 2021-06-09 06:23:10+00:00 | project/PACKAGERS.md | # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Getting Started
We want to help you package Docker successfully. Before doing any packaging, a
good first step is to introduce yourself on the [docker-dev mailing
list](https://groups.google.com/d/forum/docker-dev), explain what you're trying
to achieve, and tell us how we can help. Don't worry, we don't bite! There might
even be someone already working on packaging for the same distro!
You can also join the IRC channel - #docker and #docker-dev on Freenode are both
active and friendly.
We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our
"Packagers Relations", since he's always working to make sure our packagers have
a good, healthy upstream to work with (both in our communication and in our
build scripts). If you're having any kind of trouble, feel free to ping him
directly. He also likes to keep track of what distributions we have packagers
for, so feel free to reach out to him even just to say "Hi!"
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
To build Docker, you will need the following:
* A recent version of Git and Mercurial
* Go version 1.6 or later
* A clean checkout of the source added to a valid [Go
workspace](https://golang.org/doc/code.html#Workspaces) under the path
*src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
explained in more detail below)
To build the Docker daemon, you will additionally need:
* An amd64/x86_64 machine running Linux
* SQLite version 3.7.9 or later
* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version
2.02.89 or later
* btrfs-progs version 3.16.1 or later (unless using an older version is
absolutely necessary, in which case 3.8 is the minimum)
* libseccomp version 2.2.1 or later (for build tag seccomp)
Be sure to also check out Docker's Dockerfile for the most up-to-date list of
these build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
To use the vendored dependencies, simply make sure the path to "./vendor" is
included in `GOPATH` (or use `AUTO_GOPATH`, as explained below).
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
NOTE: if you're not able to package the exact version (to the exact commit) of a
given dependency, please get in touch so we can remediate! Who knows what
discrepancies can be caused by even the slightest deviation. We promise to do
our best to make everybody happy.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for all your compilation of
Docker. If there's something you need that it isn't doing, or something it could
be doing to make your life as a packager easier, please get in touch with Tianon
and help us rectify the situation. Chances are good that other packagers have
probably run into the same problems and a fix might already be in the works, but
none of us will know for sure unless you harass Tianon about it. :)
All the commands listed within this section should be run with the Docker source
checkout as the current working directory.
### `AUTO_GOPATH`
If you'd rather not be bothered with the hassles that setting up `GOPATH`
appropriately can be, and prefer to just get a "build that works", you should
add something similar to this to whatever script or process you're using to
build Docker:
```bash
export AUTO_GOPATH=1
```
This will cause the build scripts to set up a reasonable `GOPATH` that
automatically and properly includes both docker/docker from the local
directory, and the local "./vendor" directory as necessary.
### `DOCKER_BUILDTAGS`
If you're building a binary that may need to be used on platforms that include
AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
```bash
export DOCKER_BUILDTAGS='apparmor'
```
If you're building a binary that may need to be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
### Static Daemon
If it is feasible within the constraints of your distribution, you should
seriously consider packaging Docker as a single static binary. A good comparison
is Busybox, which is often packaged statically as a feature to enable mass
portability. Because of the unique way Docker operates, being similarly static
is a "feature".
To build a static Docker daemon binary, run the following command (first
ensuring that all the necessary libraries are available in static form for
linking - see the "Build Dependencies" section above, and the relevant lines
within Docker's own Dockerfile that set up our official build environment):
```bash
./hack/make.sh binary
```
This will create a static binary under
"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of
the file "./VERSION". This binary is usually installed somewhere like
"/usr/bin/docker".
### Dynamic Daemon / Client-only Binary
If you are only interested in a Docker client binary, you can build using:
```bash
./hack/make.sh binary-client
```
If you need to (due to distro policy, distro library availability, or for other
reasons) create a dynamically compiled daemon binary, or if you are only
interested in creating a client binary for Docker, use something similar to the
following:
```bash
./hack/make.sh dynbinary-client
```
This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for
client-only builds is the important file to grab and install as appropriate.
### Cross Compilation
Limited cross compilation is supported due to requiring cgo for critical
functionality (such as seccomp support).
To cross compile run `make cross`. You can specify the platforms to target by
setting the `DOCKER_CROSSPLATFORMS` environment variable to a list of platforms
in the format `<GOOS>/<GOARCH>`. Specify multiple platforms by using a space
in between each desired platform.
For setting arm variants, you can specify the `GOARM` value by append `/v<GOARM>`
to your `<GOOS>/arm`. Example:
```
make DOCKER_CROSSPLATFORMS=linux/arm/v7 cross
```
This will create a linux binary targeting arm 7.
See `hack/make/.binary` for supported cross compliation platforms.
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* a [properly
mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
[is](https://github.com/docker/docker/issues/2683)
[not](https://github.com/docker/docker/issues/3485)
[sufficient](https://github.com/docker/docker/issues/4568))
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you will either need to discover the options necessary via trial and
error, or check out the [Gentoo
ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild),
in which a list is maintained (and if there are any issues or discrepancies in
that list, please contact Tianon so they can be rectified).
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists (and if one does not, contact Tianon about whether it might be
appropriate for your distro's init script to live there too!).
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, a `DOCKER_OPTS` variable of some kind is available for adding more
flags (such as changing the graph driver to use BTRFS, switching the location of
"/var/lib/docker", etc).
## Communicate
As a final note, please do feel free to reach out to Tianon at any time for
pretty much anything. He really does love hearing from our packagers and wants
to make sure we're not being a "hostile upstream". As should be a given, we
appreciate the work our packagers do to make sure we have broad distribution!
| # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
The Dockerfile contains the most up-to-date list of build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for compilation.
### `DOCKER_BUILDTAGS`
If you're building a binary that might be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* pigz (optional)
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you should check out `contrib/check-config.sh`.
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists.
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, it is encouraged that additional configuration be placed in
`/etc/docker/daemon.json`.
| tianon | b0cef58b5e1153068ec1e86d29c292b201204edc | fb2c4b2cd6dc47b2d83ae8893bdbe7349484736b | Reversing sounds good to me
Perhaps same for apparmor and selinux? I recall them being removed from runc as well | thaJeztah | 4,718 |
moby/moby | 42,486 | Remove a lot of outdated "project/PACKAGERS.md" text | This is just a first pass to axe some low hanging fruit. At this point, the project is generally mature enough that I do not believe most of this is necessary. | null | 2021-06-08 15:58:18+00:00 | 2021-06-09 06:23:10+00:00 | project/PACKAGERS.md | # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Getting Started
We want to help you package Docker successfully. Before doing any packaging, a
good first step is to introduce yourself on the [docker-dev mailing
list](https://groups.google.com/d/forum/docker-dev), explain what you're trying
to achieve, and tell us how we can help. Don't worry, we don't bite! There might
even be someone already working on packaging for the same distro!
You can also join the IRC channel - #docker and #docker-dev on Freenode are both
active and friendly.
We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our
"Packagers Relations", since he's always working to make sure our packagers have
a good, healthy upstream to work with (both in our communication and in our
build scripts). If you're having any kind of trouble, feel free to ping him
directly. He also likes to keep track of what distributions we have packagers
for, so feel free to reach out to him even just to say "Hi!"
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
To build Docker, you will need the following:
* A recent version of Git and Mercurial
* Go version 1.6 or later
* A clean checkout of the source added to a valid [Go
workspace](https://golang.org/doc/code.html#Workspaces) under the path
*src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
explained in more detail below)
To build the Docker daemon, you will additionally need:
* An amd64/x86_64 machine running Linux
* SQLite version 3.7.9 or later
* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version
2.02.89 or later
* btrfs-progs version 3.16.1 or later (unless using an older version is
absolutely necessary, in which case 3.8 is the minimum)
* libseccomp version 2.2.1 or later (for build tag seccomp)
Be sure to also check out Docker's Dockerfile for the most up-to-date list of
these build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
To use the vendored dependencies, simply make sure the path to "./vendor" is
included in `GOPATH` (or use `AUTO_GOPATH`, as explained below).
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
NOTE: if you're not able to package the exact version (to the exact commit) of a
given dependency, please get in touch so we can remediate! Who knows what
discrepancies can be caused by even the slightest deviation. We promise to do
our best to make everybody happy.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for all your compilation of
Docker. If there's something you need that it isn't doing, or something it could
be doing to make your life as a packager easier, please get in touch with Tianon
and help us rectify the situation. Chances are good that other packagers have
probably run into the same problems and a fix might already be in the works, but
none of us will know for sure unless you harass Tianon about it. :)
All the commands listed within this section should be run with the Docker source
checkout as the current working directory.
### `AUTO_GOPATH`
If you'd rather not be bothered with the hassles that setting up `GOPATH`
appropriately can be, and prefer to just get a "build that works", you should
add something similar to this to whatever script or process you're using to
build Docker:
```bash
export AUTO_GOPATH=1
```
This will cause the build scripts to set up a reasonable `GOPATH` that
automatically and properly includes both docker/docker from the local
directory, and the local "./vendor" directory as necessary.
### `DOCKER_BUILDTAGS`
If you're building a binary that may need to be used on platforms that include
AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
```bash
export DOCKER_BUILDTAGS='apparmor'
```
If you're building a binary that may need to be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
### Static Daemon
If it is feasible within the constraints of your distribution, you should
seriously consider packaging Docker as a single static binary. A good comparison
is Busybox, which is often packaged statically as a feature to enable mass
portability. Because of the unique way Docker operates, being similarly static
is a "feature".
To build a static Docker daemon binary, run the following command (first
ensuring that all the necessary libraries are available in static form for
linking - see the "Build Dependencies" section above, and the relevant lines
within Docker's own Dockerfile that set up our official build environment):
```bash
./hack/make.sh binary
```
This will create a static binary under
"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of
the file "./VERSION". This binary is usually installed somewhere like
"/usr/bin/docker".
### Dynamic Daemon / Client-only Binary
If you are only interested in a Docker client binary, you can build using:
```bash
./hack/make.sh binary-client
```
If you need to (due to distro policy, distro library availability, or for other
reasons) create a dynamically compiled daemon binary, or if you are only
interested in creating a client binary for Docker, use something similar to the
following:
```bash
./hack/make.sh dynbinary-client
```
This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for
client-only builds is the important file to grab and install as appropriate.
### Cross Compilation
Limited cross compilation is supported due to requiring cgo for critical
functionality (such as seccomp support).
To cross compile run `make cross`. You can specify the platforms to target by
setting the `DOCKER_CROSSPLATFORMS` environment variable to a list of platforms
in the format `<GOOS>/<GOARCH>`. Specify multiple platforms by using a space
in between each desired platform.
For setting arm variants, you can specify the `GOARM` value by append `/v<GOARM>`
to your `<GOOS>/arm`. Example:
```
make DOCKER_CROSSPLATFORMS=linux/arm/v7 cross
```
This will create a linux binary targeting arm 7.
See `hack/make/.binary` for supported cross compliation platforms.
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* a [properly
mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
[is](https://github.com/docker/docker/issues/2683)
[not](https://github.com/docker/docker/issues/3485)
[sufficient](https://github.com/docker/docker/issues/4568))
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you will either need to discover the options necessary via trial and
error, or check out the [Gentoo
ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild),
in which a list is maintained (and if there are any issues or discrepancies in
that list, please contact Tianon so they can be rectified).
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists (and if one does not, contact Tianon about whether it might be
appropriate for your distro's init script to live there too!).
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, a `DOCKER_OPTS` variable of some kind is available for adding more
flags (such as changing the graph driver to use BTRFS, switching the location of
"/var/lib/docker", etc).
## Communicate
As a final note, please do feel free to reach out to Tianon at any time for
pretty much anything. He really does love hearing from our packagers and wants
to make sure we're not being a "hostile upstream". As should be a given, we
appreciate the work our packagers do to make sure we have broad distribution!
| # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
The Dockerfile contains the most up-to-date list of build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for compilation.
### `DOCKER_BUILDTAGS`
If you're building a binary that might be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* pigz (optional)
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you should check out `contrib/check-config.sh`.
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists.
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, it is encouraged that additional configuration be placed in
`/etc/docker/daemon.json`.
| tianon | b0cef58b5e1153068ec1e86d29c292b201204edc | fb2c4b2cd6dc47b2d83ae8893bdbe7349484736b | Yeah, we don't actually have any `apparmor` or `selinux` build tags any more -- `seccomp` is the only of these variety that we have left (which is why after reviewing I ended up removing the `apparmor` bits from this document completely :smile:). I'm honestly thinking that perhaps we should now just completely remove the optional `seccomp` tag instead of reversing it with a similar justification. It no longer requires `cgo`, so anyone building a Docker binary for Linux should be including `seccomp`, especially as it's a critical bit of Docker's "secure by default" settings. | tianon | 4,719 |
moby/moby | 42,481 | Fix daemon.json and daemon --seccomp-profile not accepting "unconfined", and rename default profile to "builtin" | waiting for https://github.com/moby/moby/pull/42393, then I can add some tests.
Possibly will be splitting this PR in some chunks as well, in case we want to backport the "unconfined" daemon configuration fix.
- Add const for "unconfined" and "default" seccomp profiles
- Fix daemon.json and daemon --seccomp-profile not accepting "unconfined"
Commit b237189e6c8a4f97be59f08c63cdcb1f2f4680a8 implemented an option to
set the default seccomp profile in the daemon configuration. When that PR
was reviewed, it was discussed to have the option accept the path to a custom
profile JSON file; https://github.com/moby/moby/pull/26276#issuecomment-253546966
However, in the implementation, the special "unconfined" value was not taken into
account. The "unconfined" value is meant to disable seccomp (more factually:
run with an empty profile).
While it's likely possible to achieve this by creating a file with an an empty
(`{}`) profile, and passing the path to that file, it's inconsistent with the
`--security-opt seccomp=unconfined` option on `docker run` and `docker create`,
which is both confusing, and makes it harder to use (especially on Docker Desktop,
where there's no direct access to the VM's filesystem).
This patch adds the missing check for the special "unconfined" value.
- daemon: allow "builtin" as valid value for seccomp profiles
This allows containers to use the embedded default profile if a different
default is set (e.g. "unconfined") in the daemon configuration. Without this
option, users would have to copy the default profile to a file in order to
use the default.
- daemon: move custom seccomp profile warning from CLI to daemon side
**- What I did**
**- How I did it**
**- How to verify it**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-07 16:45:45+00:00 | 2021-08-11 17:02:06+00:00 | cmd/dockerd/config_unix.go | // +build linux freebsd
package main
import (
"os/exec"
"github.com/containerd/cgroups"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/opts"
"github.com/docker/docker/rootless"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon
func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) error {
// First handle install flags which are consistent cross-platform
if err := installCommonConfigFlags(conf, flags); err != nil {
return err
}
// Then install flags common to unix platforms
installUnixConfigFlags(conf, flags)
conf.Ulimits = make(map[string]*units.Ulimit)
conf.NetworkConfig.DefaultAddressPools = opts.PoolsOpt{}
// Set default value for `--default-shm-size`
conf.ShmSize = opts.MemBytes(config.DefaultShmSize)
// Then platform-specific install flags
flags.BoolVar(&conf.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support")
flags.Var(opts.NewNamedUlimitOpt("default-ulimits", &conf.Ulimits), "default-ulimit", "Default ulimits for containers")
flags.BoolVar(&conf.BridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules")
flags.BoolVar(&conf.BridgeConfig.EnableIP6Tables, "ip6tables", false, "Enable addition of ip6tables rules")
flags.BoolVar(&conf.BridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward")
flags.BoolVar(&conf.BridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading")
flags.BoolVar(&conf.BridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking")
flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs")
flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic")
defaultUserlandProxyPath := ""
if rootless.RunningWithRootlessKit() {
var err error
// use rootlesskit-docker-proxy for exposing the ports in RootlessKit netns to the initial namespace.
defaultUserlandProxyPath, err = exec.LookPath(rootless.RootlessKitDockerProxyBinary)
if err != nil {
return errors.Wrapf(err, "running with RootlessKit, but %s not installed", rootless.RootlessKitDockerProxyBinary)
}
}
flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", defaultUserlandProxyPath, "Path to the userland proxy binary")
flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers")
flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces")
flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running")
flags.IntVar(&conf.OOMScoreAdjust, "oom-score-adjust", 0, "Set the oom_score_adj for the daemon")
flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes")
flags.StringVar(&conf.InitPath, "init-path", "", "Path to the docker-init binary")
flags.Int64Var(&conf.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds for the parent cgroup for all containers")
flags.Int64Var(&conf.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds for the parent cgroup for all containers")
flags.StringVar(&conf.SeccompProfile, "seccomp-profile", "", "Path to seccomp profile")
flags.Var(&conf.ShmSize, "default-shm-size", "Default shm size for containers")
flags.BoolVar(&conf.NoNewPrivileges, "no-new-privileges", false, "Set no-new-privileges by default for new containers")
flags.StringVar(&conf.IpcMode, "default-ipc-mode", config.DefaultIpcMode, `Default mode for containers ipc ("shareable" | "private")`)
flags.Var(&conf.NetworkConfig.DefaultAddressPools, "default-address-pool", "Default address pools for node specific local networks")
// rootless needs to be explicitly specified for running "rootful" dockerd in rootless dockerd (#38702)
// Note that defaultUserlandProxyPath and honorXDG are configured according to the value of rootless.RunningWithRootlessKit, not the value of --rootless.
flags.BoolVar(&conf.Rootless, "rootless", rootless.RunningWithRootlessKit(), "Enable rootless mode; typically used with RootlessKit")
defaultCgroupNamespaceMode := "host"
if cgroups.Mode() == cgroups.Unified {
defaultCgroupNamespaceMode = "private"
}
flags.StringVar(&conf.CgroupNamespaceMode, "default-cgroupns-mode", defaultCgroupNamespaceMode, `Default mode for containers cgroup namespace ("host" | "private")`)
return nil
}
| // +build linux freebsd
package main
import (
"os/exec"
"github.com/containerd/cgroups"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/opts"
"github.com/docker/docker/rootless"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon
func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) error {
// First handle install flags which are consistent cross-platform
if err := installCommonConfigFlags(conf, flags); err != nil {
return err
}
// Then install flags common to unix platforms
installUnixConfigFlags(conf, flags)
conf.Ulimits = make(map[string]*units.Ulimit)
conf.NetworkConfig.DefaultAddressPools = opts.PoolsOpt{}
// Set default value for `--default-shm-size`
conf.ShmSize = opts.MemBytes(config.DefaultShmSize)
// Then platform-specific install flags
flags.BoolVar(&conf.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support")
flags.Var(opts.NewNamedUlimitOpt("default-ulimits", &conf.Ulimits), "default-ulimit", "Default ulimits for containers")
flags.BoolVar(&conf.BridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules")
flags.BoolVar(&conf.BridgeConfig.EnableIP6Tables, "ip6tables", false, "Enable addition of ip6tables rules")
flags.BoolVar(&conf.BridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward")
flags.BoolVar(&conf.BridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading")
flags.BoolVar(&conf.BridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking")
flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs")
flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic")
defaultUserlandProxyPath := ""
if rootless.RunningWithRootlessKit() {
var err error
// use rootlesskit-docker-proxy for exposing the ports in RootlessKit netns to the initial namespace.
defaultUserlandProxyPath, err = exec.LookPath(rootless.RootlessKitDockerProxyBinary)
if err != nil {
return errors.Wrapf(err, "running with RootlessKit, but %s not installed", rootless.RootlessKitDockerProxyBinary)
}
}
flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", defaultUserlandProxyPath, "Path to the userland proxy binary")
flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers")
flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces")
flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running")
flags.IntVar(&conf.OOMScoreAdjust, "oom-score-adjust", 0, "Set the oom_score_adj for the daemon")
flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes")
flags.StringVar(&conf.InitPath, "init-path", "", "Path to the docker-init binary")
flags.Int64Var(&conf.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds for the parent cgroup for all containers")
flags.Int64Var(&conf.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds for the parent cgroup for all containers")
flags.StringVar(&conf.SeccompProfile, "seccomp-profile", config.SeccompProfileDefault, `Path to seccomp profile. Use "unconfined" to disable the default seccomp profile`)
flags.Var(&conf.ShmSize, "default-shm-size", "Default shm size for containers")
flags.BoolVar(&conf.NoNewPrivileges, "no-new-privileges", false, "Set no-new-privileges by default for new containers")
flags.StringVar(&conf.IpcMode, "default-ipc-mode", config.DefaultIpcMode, `Default mode for containers ipc ("shareable" | "private")`)
flags.Var(&conf.NetworkConfig.DefaultAddressPools, "default-address-pool", "Default address pools for node specific local networks")
// rootless needs to be explicitly specified for running "rootful" dockerd in rootless dockerd (#38702)
// Note that defaultUserlandProxyPath and honorXDG are configured according to the value of rootless.RunningWithRootlessKit, not the value of --rootless.
flags.BoolVar(&conf.Rootless, "rootless", rootless.RunningWithRootlessKit(), "Enable rootless mode; typically used with RootlessKit")
defaultCgroupNamespaceMode := "host"
if cgroups.Mode() == cgroups.Unified {
defaultCgroupNamespaceMode = "private"
}
flags.StringVar(&conf.CgroupNamespaceMode, "default-cgroupns-mode", defaultCgroupNamespaceMode, `Default mode for containers cgroup namespace ("host" | "private")`)
return nil
}
| thaJeztah | 343665850e3a7ee160989b33b39477b6dd3f15cb | 7681a3eb40ab49341d4e88420dafc7f4944ac7b7 | Should this default to `config.SeccompProfileDefault` so that the effective default value shows up in the help output? | tianon | 4,720 |
moby/moby | 42,481 | Fix daemon.json and daemon --seccomp-profile not accepting "unconfined", and rename default profile to "builtin" | waiting for https://github.com/moby/moby/pull/42393, then I can add some tests.
Possibly will be splitting this PR in some chunks as well, in case we want to backport the "unconfined" daemon configuration fix.
- Add const for "unconfined" and "default" seccomp profiles
- Fix daemon.json and daemon --seccomp-profile not accepting "unconfined"
Commit b237189e6c8a4f97be59f08c63cdcb1f2f4680a8 implemented an option to
set the default seccomp profile in the daemon configuration. When that PR
was reviewed, it was discussed to have the option accept the path to a custom
profile JSON file; https://github.com/moby/moby/pull/26276#issuecomment-253546966
However, in the implementation, the special "unconfined" value was not taken into
account. The "unconfined" value is meant to disable seccomp (more factually:
run with an empty profile).
While it's likely possible to achieve this by creating a file with an an empty
(`{}`) profile, and passing the path to that file, it's inconsistent with the
`--security-opt seccomp=unconfined` option on `docker run` and `docker create`,
which is both confusing, and makes it harder to use (especially on Docker Desktop,
where there's no direct access to the VM's filesystem).
This patch adds the missing check for the special "unconfined" value.
- daemon: allow "builtin" as valid value for seccomp profiles
This allows containers to use the embedded default profile if a different
default is set (e.g. "unconfined") in the daemon configuration. Without this
option, users would have to copy the default profile to a file in order to
use the default.
- daemon: move custom seccomp profile warning from CLI to daemon side
**- What I did**
**- How I did it**
**- How to verify it**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-07 16:45:45+00:00 | 2021-08-11 17:02:06+00:00 | cmd/dockerd/config_unix.go | // +build linux freebsd
package main
import (
"os/exec"
"github.com/containerd/cgroups"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/opts"
"github.com/docker/docker/rootless"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon
func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) error {
// First handle install flags which are consistent cross-platform
if err := installCommonConfigFlags(conf, flags); err != nil {
return err
}
// Then install flags common to unix platforms
installUnixConfigFlags(conf, flags)
conf.Ulimits = make(map[string]*units.Ulimit)
conf.NetworkConfig.DefaultAddressPools = opts.PoolsOpt{}
// Set default value for `--default-shm-size`
conf.ShmSize = opts.MemBytes(config.DefaultShmSize)
// Then platform-specific install flags
flags.BoolVar(&conf.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support")
flags.Var(opts.NewNamedUlimitOpt("default-ulimits", &conf.Ulimits), "default-ulimit", "Default ulimits for containers")
flags.BoolVar(&conf.BridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules")
flags.BoolVar(&conf.BridgeConfig.EnableIP6Tables, "ip6tables", false, "Enable addition of ip6tables rules")
flags.BoolVar(&conf.BridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward")
flags.BoolVar(&conf.BridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading")
flags.BoolVar(&conf.BridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking")
flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs")
flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic")
defaultUserlandProxyPath := ""
if rootless.RunningWithRootlessKit() {
var err error
// use rootlesskit-docker-proxy for exposing the ports in RootlessKit netns to the initial namespace.
defaultUserlandProxyPath, err = exec.LookPath(rootless.RootlessKitDockerProxyBinary)
if err != nil {
return errors.Wrapf(err, "running with RootlessKit, but %s not installed", rootless.RootlessKitDockerProxyBinary)
}
}
flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", defaultUserlandProxyPath, "Path to the userland proxy binary")
flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers")
flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces")
flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running")
flags.IntVar(&conf.OOMScoreAdjust, "oom-score-adjust", 0, "Set the oom_score_adj for the daemon")
flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes")
flags.StringVar(&conf.InitPath, "init-path", "", "Path to the docker-init binary")
flags.Int64Var(&conf.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds for the parent cgroup for all containers")
flags.Int64Var(&conf.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds for the parent cgroup for all containers")
flags.StringVar(&conf.SeccompProfile, "seccomp-profile", "", "Path to seccomp profile")
flags.Var(&conf.ShmSize, "default-shm-size", "Default shm size for containers")
flags.BoolVar(&conf.NoNewPrivileges, "no-new-privileges", false, "Set no-new-privileges by default for new containers")
flags.StringVar(&conf.IpcMode, "default-ipc-mode", config.DefaultIpcMode, `Default mode for containers ipc ("shareable" | "private")`)
flags.Var(&conf.NetworkConfig.DefaultAddressPools, "default-address-pool", "Default address pools for node specific local networks")
// rootless needs to be explicitly specified for running "rootful" dockerd in rootless dockerd (#38702)
// Note that defaultUserlandProxyPath and honorXDG are configured according to the value of rootless.RunningWithRootlessKit, not the value of --rootless.
flags.BoolVar(&conf.Rootless, "rootless", rootless.RunningWithRootlessKit(), "Enable rootless mode; typically used with RootlessKit")
defaultCgroupNamespaceMode := "host"
if cgroups.Mode() == cgroups.Unified {
defaultCgroupNamespaceMode = "private"
}
flags.StringVar(&conf.CgroupNamespaceMode, "default-cgroupns-mode", defaultCgroupNamespaceMode, `Default mode for containers cgroup namespace ("host" | "private")`)
return nil
}
| // +build linux freebsd
package main
import (
"os/exec"
"github.com/containerd/cgroups"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/opts"
"github.com/docker/docker/rootless"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon
func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) error {
// First handle install flags which are consistent cross-platform
if err := installCommonConfigFlags(conf, flags); err != nil {
return err
}
// Then install flags common to unix platforms
installUnixConfigFlags(conf, flags)
conf.Ulimits = make(map[string]*units.Ulimit)
conf.NetworkConfig.DefaultAddressPools = opts.PoolsOpt{}
// Set default value for `--default-shm-size`
conf.ShmSize = opts.MemBytes(config.DefaultShmSize)
// Then platform-specific install flags
flags.BoolVar(&conf.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support")
flags.Var(opts.NewNamedUlimitOpt("default-ulimits", &conf.Ulimits), "default-ulimit", "Default ulimits for containers")
flags.BoolVar(&conf.BridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules")
flags.BoolVar(&conf.BridgeConfig.EnableIP6Tables, "ip6tables", false, "Enable addition of ip6tables rules")
flags.BoolVar(&conf.BridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward")
flags.BoolVar(&conf.BridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading")
flags.BoolVar(&conf.BridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking")
flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs")
flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic")
defaultUserlandProxyPath := ""
if rootless.RunningWithRootlessKit() {
var err error
// use rootlesskit-docker-proxy for exposing the ports in RootlessKit netns to the initial namespace.
defaultUserlandProxyPath, err = exec.LookPath(rootless.RootlessKitDockerProxyBinary)
if err != nil {
return errors.Wrapf(err, "running with RootlessKit, but %s not installed", rootless.RootlessKitDockerProxyBinary)
}
}
flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", defaultUserlandProxyPath, "Path to the userland proxy binary")
flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers")
flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces")
flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running")
flags.IntVar(&conf.OOMScoreAdjust, "oom-score-adjust", 0, "Set the oom_score_adj for the daemon")
flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes")
flags.StringVar(&conf.InitPath, "init-path", "", "Path to the docker-init binary")
flags.Int64Var(&conf.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds for the parent cgroup for all containers")
flags.Int64Var(&conf.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds for the parent cgroup for all containers")
flags.StringVar(&conf.SeccompProfile, "seccomp-profile", config.SeccompProfileDefault, `Path to seccomp profile. Use "unconfined" to disable the default seccomp profile`)
flags.Var(&conf.ShmSize, "default-shm-size", "Default shm size for containers")
flags.BoolVar(&conf.NoNewPrivileges, "no-new-privileges", false, "Set no-new-privileges by default for new containers")
flags.StringVar(&conf.IpcMode, "default-ipc-mode", config.DefaultIpcMode, `Default mode for containers ipc ("shareable" | "private")`)
flags.Var(&conf.NetworkConfig.DefaultAddressPools, "default-address-pool", "Default address pools for node specific local networks")
// rootless needs to be explicitly specified for running "rootful" dockerd in rootless dockerd (#38702)
// Note that defaultUserlandProxyPath and honorXDG are configured according to the value of rootless.RunningWithRootlessKit, not the value of --rootless.
flags.BoolVar(&conf.Rootless, "rootless", rootless.RunningWithRootlessKit(), "Enable rootless mode; typically used with RootlessKit")
defaultCgroupNamespaceMode := "host"
if cgroups.Mode() == cgroups.Unified {
defaultCgroupNamespaceMode = "private"
}
flags.StringVar(&conf.CgroupNamespaceMode, "default-cgroupns-mode", defaultCgroupNamespaceMode, `Default mode for containers cgroup namespace ("host" | "private")`)
return nil
}
| thaJeztah | 343665850e3a7ee160989b33b39477b6dd3f15cb | 7681a3eb40ab49341d4e88420dafc7f4944ac7b7 | I was a bit in doubt; I think for most options we consider `""` (default string value) to also be `default`. The ambiguity was a bit in that when using `docker info`, it will show `default` (if nothing is set), so yes, from that perspective perhaps it makes sense to use it here as well. 😅 | thaJeztah | 4,721 |
moby/moby | 42,481 | Fix daemon.json and daemon --seccomp-profile not accepting "unconfined", and rename default profile to "builtin" | waiting for https://github.com/moby/moby/pull/42393, then I can add some tests.
Possibly will be splitting this PR in some chunks as well, in case we want to backport the "unconfined" daemon configuration fix.
- Add const for "unconfined" and "default" seccomp profiles
- Fix daemon.json and daemon --seccomp-profile not accepting "unconfined"
Commit b237189e6c8a4f97be59f08c63cdcb1f2f4680a8 implemented an option to
set the default seccomp profile in the daemon configuration. When that PR
was reviewed, it was discussed to have the option accept the path to a custom
profile JSON file; https://github.com/moby/moby/pull/26276#issuecomment-253546966
However, in the implementation, the special "unconfined" value was not taken into
account. The "unconfined" value is meant to disable seccomp (more factually:
run with an empty profile).
While it's likely possible to achieve this by creating a file with an an empty
(`{}`) profile, and passing the path to that file, it's inconsistent with the
`--security-opt seccomp=unconfined` option on `docker run` and `docker create`,
which is both confusing, and makes it harder to use (especially on Docker Desktop,
where there's no direct access to the VM's filesystem).
This patch adds the missing check for the special "unconfined" value.
- daemon: allow "builtin" as valid value for seccomp profiles
This allows containers to use the embedded default profile if a different
default is set (e.g. "unconfined") in the daemon configuration. Without this
option, users would have to copy the default profile to a file in order to
use the default.
- daemon: move custom seccomp profile warning from CLI to daemon side
**- What I did**
**- How I did it**
**- How to verify it**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-07 16:45:45+00:00 | 2021-08-11 17:02:06+00:00 | daemon/config/config.go | package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
return &Config{
CommonConfig: CommonConfig{
LogConfig: LogConfig{
Config: make(map[string]string),
},
ClusterOpts: make(map[string]string),
},
}
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
b = bytes.TrimSpace(b)
if len(b) == 0 {
// empty config file
return &config, nil
}
if flags != nil {
var jsonConfig map[string]interface{}
if err := json.Unmarshal(b, &jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
if err := json.Unmarshal(b, &config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
// SeccompProfileDefault is the built-in default seccomp profile.
SeccompProfileDefault = "builtin"
// SeccompProfileUnconfined is a special profile name for seccomp to use an
// "unconfined" seccomp profile.
SeccompProfileUnconfined = "unconfined"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
return &Config{
CommonConfig: CommonConfig{
LogConfig: LogConfig{
Config: make(map[string]string),
},
ClusterOpts: make(map[string]string),
},
}
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
b = bytes.TrimSpace(b)
if len(b) == 0 {
// empty config file
return &config, nil
}
if flags != nil {
var jsonConfig map[string]interface{}
if err := json.Unmarshal(b, &jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
if err := json.Unmarshal(b, &config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| thaJeztah | 343665850e3a7ee160989b33b39477b6dd3f15cb | 7681a3eb40ab49341d4e88420dafc7f4944ac7b7 | I think this naming choice is probably a little bit ambiguous -- if I set the daemon-wide `--secomp-profile` to something other than the built-in profile, does `--security-opt seccomp=default` get me the daemon-wide default, or the Docker built-in profile? (Maybe `builtin` or `docker` are better name options if it's the latter?) | tianon | 4,722 |
moby/moby | 42,481 | Fix daemon.json and daemon --seccomp-profile not accepting "unconfined", and rename default profile to "builtin" | waiting for https://github.com/moby/moby/pull/42393, then I can add some tests.
Possibly will be splitting this PR in some chunks as well, in case we want to backport the "unconfined" daemon configuration fix.
- Add const for "unconfined" and "default" seccomp profiles
- Fix daemon.json and daemon --seccomp-profile not accepting "unconfined"
Commit b237189e6c8a4f97be59f08c63cdcb1f2f4680a8 implemented an option to
set the default seccomp profile in the daemon configuration. When that PR
was reviewed, it was discussed to have the option accept the path to a custom
profile JSON file; https://github.com/moby/moby/pull/26276#issuecomment-253546966
However, in the implementation, the special "unconfined" value was not taken into
account. The "unconfined" value is meant to disable seccomp (more factually:
run with an empty profile).
While it's likely possible to achieve this by creating a file with an an empty
(`{}`) profile, and passing the path to that file, it's inconsistent with the
`--security-opt seccomp=unconfined` option on `docker run` and `docker create`,
which is both confusing, and makes it harder to use (especially on Docker Desktop,
where there's no direct access to the VM's filesystem).
This patch adds the missing check for the special "unconfined" value.
- daemon: allow "builtin" as valid value for seccomp profiles
This allows containers to use the embedded default profile if a different
default is set (e.g. "unconfined") in the daemon configuration. Without this
option, users would have to copy the default profile to a file in order to
use the default.
- daemon: move custom seccomp profile warning from CLI to daemon side
**- What I did**
**- How I did it**
**- How to verify it**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-07 16:45:45+00:00 | 2021-08-11 17:02:06+00:00 | daemon/info.go | package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"net/url"
"os"
"runtime"
"strings"
"time"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/parsers/operatingsystem"
"github.com/docker/docker/pkg/platform"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/registry"
metrics "github.com/docker/go-metrics"
"github.com/opencontainers/selinux/go-selinux"
"github.com/sirupsen/logrus"
)
// SystemInfo returns information about the host server the daemon is running on.
func (daemon *Daemon) SystemInfo() *types.Info {
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_info"))()
sysInfo := daemon.RawSysInfo()
cRunning, cPaused, cStopped := stateCtr.get()
v := &types.Info{
ID: daemon.ID,
Containers: cRunning + cPaused + cStopped,
ContainersRunning: cRunning,
ContainersPaused: cPaused,
ContainersStopped: cStopped,
Images: daemon.imageService.CountImages(),
IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled,
BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled,
BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled,
Debug: debug.IsEnabled(),
Name: hostName(),
NFd: fileutils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
SystemTime: time.Now().Format(time.RFC3339Nano),
LoggingDriver: daemon.defaultLogConfig.Type,
NEventsListener: daemon.EventsService.SubscribersCount(),
KernelVersion: kernelVersion(),
OperatingSystem: operatingSystem(),
OSVersion: osVersion(),
IndexServerAddress: registry.IndexServer,
OSType: platform.OSType,
Architecture: platform.Architecture,
RegistryConfig: daemon.RegistryService.ServiceConfig(),
NCPU: sysinfo.NumCPU(),
MemTotal: memInfo().MemTotal,
GenericResources: daemon.genericResources,
DockerRootDir: daemon.configStore.Root,
Labels: daemon.configStore.Labels,
ExperimentalBuild: daemon.configStore.Experimental,
ServerVersion: dockerversion.Version,
HTTPProxy: maskCredentials(getEnvAny("HTTP_PROXY", "http_proxy")),
HTTPSProxy: maskCredentials(getEnvAny("HTTPS_PROXY", "https_proxy")),
NoProxy: getEnvAny("NO_PROXY", "no_proxy"),
LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled,
Isolation: daemon.defaultIsolation,
}
daemon.fillClusterInfo(v)
daemon.fillAPIInfo(v)
// Retrieve platform specific info
daemon.fillPlatformInfo(v, sysInfo)
daemon.fillDriverInfo(v)
daemon.fillPluginsInfo(v)
daemon.fillSecurityOptions(v, sysInfo)
daemon.fillLicense(v)
daemon.fillDefaultAddressPools(v)
if v.DefaultRuntime == config.LinuxV1RuntimeName {
v.Warnings = append(v.Warnings, fmt.Sprintf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName))
}
return v
}
// SystemVersion returns version information about the daemon.
func (daemon *Daemon) SystemVersion() types.Version {
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_version"))()
kernelVersion := kernelVersion()
v := types.Version{
Components: []types.ComponentVersion{
{
Name: "Engine",
Version: dockerversion.Version,
Details: map[string]string{
"GitCommit": dockerversion.GitCommit,
"ApiVersion": api.DefaultVersion,
"MinAPIVersion": api.MinVersion,
"GoVersion": runtime.Version(),
"Os": runtime.GOOS,
"Arch": runtime.GOARCH,
"BuildTime": dockerversion.BuildTime,
"KernelVersion": kernelVersion,
"Experimental": fmt.Sprintf("%t", daemon.configStore.Experimental),
},
},
},
// Populate deprecated fields for older clients
Version: dockerversion.Version,
GitCommit: dockerversion.GitCommit,
APIVersion: api.DefaultVersion,
MinAPIVersion: api.MinVersion,
GoVersion: runtime.Version(),
Os: runtime.GOOS,
Arch: runtime.GOARCH,
BuildTime: dockerversion.BuildTime,
KernelVersion: kernelVersion,
Experimental: daemon.configStore.Experimental,
}
v.Platform.Name = dockerversion.PlatformName
daemon.fillPlatformVersion(&v)
return v
}
func (daemon *Daemon) fillClusterInfo(v *types.Info) {
v.ClusterAdvertise = daemon.configStore.ClusterAdvertise
v.ClusterStore = daemon.configStore.ClusterStore
if v.ClusterAdvertise != "" || v.ClusterStore != "" {
v.Warnings = append(v.Warnings, `WARNING: node discovery and overlay networks with an external k/v store (cluster-advertise,
cluster-store, cluster-store-opt) are deprecated and will be removed in a future release.`)
}
}
func (daemon *Daemon) fillDriverInfo(v *types.Info) {
switch daemon.graphDriver {
case "aufs", "devicemapper", "overlay":
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: the %s storage-driver is deprecated, and will be removed in a future release.", daemon.graphDriver))
}
v.Driver = daemon.graphDriver
v.DriverStatus = daemon.imageService.LayerStoreStatus()
fillDriverWarnings(v)
}
func (daemon *Daemon) fillPluginsInfo(v *types.Info) {
v.Plugins = types.PluginsInfo{
Volume: daemon.volumes.GetDriverList(),
Network: daemon.GetNetworkDriverList(),
// The authorization plugins are returned in the order they are
// used as they constitute a request/response modification chain.
Authorization: daemon.configStore.AuthorizationPlugins,
Log: logger.ListDrivers(),
}
}
func (daemon *Daemon) fillSecurityOptions(v *types.Info, sysInfo *sysinfo.SysInfo) {
var securityOptions []string
if sysInfo.AppArmor {
securityOptions = append(securityOptions, "name=apparmor")
}
if sysInfo.Seccomp && supportsSeccomp {
profile := daemon.seccompProfilePath
if profile == "" {
profile = "default"
}
securityOptions = append(securityOptions, fmt.Sprintf("name=seccomp,profile=%s", profile))
}
if selinux.GetEnabled() {
securityOptions = append(securityOptions, "name=selinux")
}
if rootIDs := daemon.idMapping.RootPair(); rootIDs.UID != 0 || rootIDs.GID != 0 {
securityOptions = append(securityOptions, "name=userns")
}
if daemon.Rootless() {
securityOptions = append(securityOptions, "name=rootless")
}
if daemon.cgroupNamespacesEnabled(sysInfo) {
securityOptions = append(securityOptions, "name=cgroupns")
}
v.SecurityOptions = securityOptions
}
func (daemon *Daemon) fillAPIInfo(v *types.Info) {
const warn string = `
Access to the remote API is equivalent to root access on the host. Refer
to the 'Docker daemon attack surface' section in the documentation for
more information: https://docs.docker.com/go/attack-surface/`
cfg := daemon.configStore
for _, host := range cfg.Hosts {
// cnf.Hosts is normalized during startup, so should always have a scheme/proto
h := strings.SplitN(host, "://", 2)
proto := h[0]
addr := h[1]
if proto != "tcp" {
continue
}
if cfg.TLS == nil || !*cfg.TLS {
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: API is accessible on http://%s without encryption.%s", addr, warn))
continue
}
if cfg.TLSVerify == nil || !*cfg.TLSVerify {
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: API is accessible on https://%s without TLS client verification.%s", addr, warn))
continue
}
}
}
func (daemon *Daemon) fillDefaultAddressPools(v *types.Info) {
for _, pool := range daemon.configStore.DefaultAddressPools.Value() {
v.DefaultAddressPools = append(v.DefaultAddressPools, types.NetworkAddressPool{
Base: pool.Base,
Size: pool.Size,
})
}
}
func hostName() string {
hostname := ""
if hn, err := os.Hostname(); err != nil {
logrus.Warnf("Could not get hostname: %v", err)
} else {
hostname = hn
}
return hostname
}
func kernelVersion() string {
var kernelVersion string
if kv, err := kernel.GetKernelVersion(); err != nil {
logrus.Warnf("Could not get kernel version: %v", err)
} else {
kernelVersion = kv.String()
}
return kernelVersion
}
func memInfo() *system.MemInfo {
memInfo, err := system.ReadMemInfo()
if err != nil {
logrus.Errorf("Could not read system memory info: %v", err)
memInfo = &system.MemInfo{}
}
return memInfo
}
func operatingSystem() (operatingSystem string) {
defer metrics.StartTimer(hostInfoFunctions.WithValues("operating_system"))()
if s, err := operatingsystem.GetOperatingSystem(); err != nil {
logrus.Warnf("Could not get operating system name: %v", err)
} else {
operatingSystem = s
}
// Don't do containerized check on Windows
if runtime.GOOS != "windows" {
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
logrus.Errorf("Could not determine if daemon is containerized: %v", err)
operatingSystem += " (error determining if containerized)"
} else if inContainer {
operatingSystem += " (containerized)"
}
}
return operatingSystem
}
func osVersion() (version string) {
defer metrics.StartTimer(hostInfoFunctions.WithValues("os_version"))()
version, err := operatingsystem.GetOperatingSystemVersion()
if err != nil {
logrus.Warnf("Could not get operating system version: %v", err)
}
return version
}
func maskCredentials(rawURL string) string {
parsedURL, err := url.Parse(rawURL)
if err != nil || parsedURL.User == nil {
return rawURL
}
parsedURL.User = url.UserPassword("xxxxx", "xxxxx")
maskedURL := parsedURL.String()
return maskedURL
}
func getEnvAny(names ...string) string {
for _, n := range names {
if val := os.Getenv(n); val != "" {
return val
}
}
return ""
}
| package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"net/url"
"os"
"runtime"
"strings"
"time"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/parsers/operatingsystem"
"github.com/docker/docker/pkg/platform"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/registry"
metrics "github.com/docker/go-metrics"
"github.com/opencontainers/selinux/go-selinux"
"github.com/sirupsen/logrus"
)
// SystemInfo returns information about the host server the daemon is running on.
func (daemon *Daemon) SystemInfo() *types.Info {
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_info"))()
sysInfo := daemon.RawSysInfo()
cRunning, cPaused, cStopped := stateCtr.get()
v := &types.Info{
ID: daemon.ID,
Containers: cRunning + cPaused + cStopped,
ContainersRunning: cRunning,
ContainersPaused: cPaused,
ContainersStopped: cStopped,
Images: daemon.imageService.CountImages(),
IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled,
BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled,
BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled,
Debug: debug.IsEnabled(),
Name: hostName(),
NFd: fileutils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
SystemTime: time.Now().Format(time.RFC3339Nano),
LoggingDriver: daemon.defaultLogConfig.Type,
NEventsListener: daemon.EventsService.SubscribersCount(),
KernelVersion: kernelVersion(),
OperatingSystem: operatingSystem(),
OSVersion: osVersion(),
IndexServerAddress: registry.IndexServer,
OSType: platform.OSType,
Architecture: platform.Architecture,
RegistryConfig: daemon.RegistryService.ServiceConfig(),
NCPU: sysinfo.NumCPU(),
MemTotal: memInfo().MemTotal,
GenericResources: daemon.genericResources,
DockerRootDir: daemon.configStore.Root,
Labels: daemon.configStore.Labels,
ExperimentalBuild: daemon.configStore.Experimental,
ServerVersion: dockerversion.Version,
HTTPProxy: maskCredentials(getEnvAny("HTTP_PROXY", "http_proxy")),
HTTPSProxy: maskCredentials(getEnvAny("HTTPS_PROXY", "https_proxy")),
NoProxy: getEnvAny("NO_PROXY", "no_proxy"),
LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled,
Isolation: daemon.defaultIsolation,
}
daemon.fillClusterInfo(v)
daemon.fillAPIInfo(v)
// Retrieve platform specific info
daemon.fillPlatformInfo(v, sysInfo)
daemon.fillDriverInfo(v)
daemon.fillPluginsInfo(v)
daemon.fillSecurityOptions(v, sysInfo)
daemon.fillLicense(v)
daemon.fillDefaultAddressPools(v)
if v.DefaultRuntime == config.LinuxV1RuntimeName {
v.Warnings = append(v.Warnings, fmt.Sprintf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName))
}
return v
}
// SystemVersion returns version information about the daemon.
func (daemon *Daemon) SystemVersion() types.Version {
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_version"))()
kernelVersion := kernelVersion()
v := types.Version{
Components: []types.ComponentVersion{
{
Name: "Engine",
Version: dockerversion.Version,
Details: map[string]string{
"GitCommit": dockerversion.GitCommit,
"ApiVersion": api.DefaultVersion,
"MinAPIVersion": api.MinVersion,
"GoVersion": runtime.Version(),
"Os": runtime.GOOS,
"Arch": runtime.GOARCH,
"BuildTime": dockerversion.BuildTime,
"KernelVersion": kernelVersion,
"Experimental": fmt.Sprintf("%t", daemon.configStore.Experimental),
},
},
},
// Populate deprecated fields for older clients
Version: dockerversion.Version,
GitCommit: dockerversion.GitCommit,
APIVersion: api.DefaultVersion,
MinAPIVersion: api.MinVersion,
GoVersion: runtime.Version(),
Os: runtime.GOOS,
Arch: runtime.GOARCH,
BuildTime: dockerversion.BuildTime,
KernelVersion: kernelVersion,
Experimental: daemon.configStore.Experimental,
}
v.Platform.Name = dockerversion.PlatformName
daemon.fillPlatformVersion(&v)
return v
}
func (daemon *Daemon) fillClusterInfo(v *types.Info) {
v.ClusterAdvertise = daemon.configStore.ClusterAdvertise
v.ClusterStore = daemon.configStore.ClusterStore
if v.ClusterAdvertise != "" || v.ClusterStore != "" {
v.Warnings = append(v.Warnings, `WARNING: node discovery and overlay networks with an external k/v store (cluster-advertise,
cluster-store, cluster-store-opt) are deprecated and will be removed in a future release.`)
}
}
func (daemon *Daemon) fillDriverInfo(v *types.Info) {
switch daemon.graphDriver {
case "aufs", "devicemapper", "overlay":
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: the %s storage-driver is deprecated, and will be removed in a future release.", daemon.graphDriver))
}
v.Driver = daemon.graphDriver
v.DriverStatus = daemon.imageService.LayerStoreStatus()
fillDriverWarnings(v)
}
func (daemon *Daemon) fillPluginsInfo(v *types.Info) {
v.Plugins = types.PluginsInfo{
Volume: daemon.volumes.GetDriverList(),
Network: daemon.GetNetworkDriverList(),
// The authorization plugins are returned in the order they are
// used as they constitute a request/response modification chain.
Authorization: daemon.configStore.AuthorizationPlugins,
Log: logger.ListDrivers(),
}
}
func (daemon *Daemon) fillSecurityOptions(v *types.Info, sysInfo *sysinfo.SysInfo) {
var securityOptions []string
if sysInfo.AppArmor {
securityOptions = append(securityOptions, "name=apparmor")
}
if sysInfo.Seccomp && supportsSeccomp {
if daemon.seccompProfilePath != config.SeccompProfileDefault {
v.Warnings = append(v.Warnings, "WARNING: daemon is not using the default seccomp profile")
}
securityOptions = append(securityOptions, "name=seccomp,profile="+daemon.seccompProfilePath)
}
if selinux.GetEnabled() {
securityOptions = append(securityOptions, "name=selinux")
}
if rootIDs := daemon.idMapping.RootPair(); rootIDs.UID != 0 || rootIDs.GID != 0 {
securityOptions = append(securityOptions, "name=userns")
}
if daemon.Rootless() {
securityOptions = append(securityOptions, "name=rootless")
}
if daemon.cgroupNamespacesEnabled(sysInfo) {
securityOptions = append(securityOptions, "name=cgroupns")
}
v.SecurityOptions = securityOptions
}
func (daemon *Daemon) fillAPIInfo(v *types.Info) {
const warn string = `
Access to the remote API is equivalent to root access on the host. Refer
to the 'Docker daemon attack surface' section in the documentation for
more information: https://docs.docker.com/go/attack-surface/`
cfg := daemon.configStore
for _, host := range cfg.Hosts {
// cnf.Hosts is normalized during startup, so should always have a scheme/proto
h := strings.SplitN(host, "://", 2)
proto := h[0]
addr := h[1]
if proto != "tcp" {
continue
}
if cfg.TLS == nil || !*cfg.TLS {
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: API is accessible on http://%s without encryption.%s", addr, warn))
continue
}
if cfg.TLSVerify == nil || !*cfg.TLSVerify {
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: API is accessible on https://%s without TLS client verification.%s", addr, warn))
continue
}
}
}
func (daemon *Daemon) fillDefaultAddressPools(v *types.Info) {
for _, pool := range daemon.configStore.DefaultAddressPools.Value() {
v.DefaultAddressPools = append(v.DefaultAddressPools, types.NetworkAddressPool{
Base: pool.Base,
Size: pool.Size,
})
}
}
func hostName() string {
hostname := ""
if hn, err := os.Hostname(); err != nil {
logrus.Warnf("Could not get hostname: %v", err)
} else {
hostname = hn
}
return hostname
}
func kernelVersion() string {
var kernelVersion string
if kv, err := kernel.GetKernelVersion(); err != nil {
logrus.Warnf("Could not get kernel version: %v", err)
} else {
kernelVersion = kv.String()
}
return kernelVersion
}
func memInfo() *system.MemInfo {
memInfo, err := system.ReadMemInfo()
if err != nil {
logrus.Errorf("Could not read system memory info: %v", err)
memInfo = &system.MemInfo{}
}
return memInfo
}
func operatingSystem() (operatingSystem string) {
defer metrics.StartTimer(hostInfoFunctions.WithValues("operating_system"))()
if s, err := operatingsystem.GetOperatingSystem(); err != nil {
logrus.Warnf("Could not get operating system name: %v", err)
} else {
operatingSystem = s
}
// Don't do containerized check on Windows
if runtime.GOOS != "windows" {
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
logrus.Errorf("Could not determine if daemon is containerized: %v", err)
operatingSystem += " (error determining if containerized)"
} else if inContainer {
operatingSystem += " (containerized)"
}
}
return operatingSystem
}
func osVersion() (version string) {
defer metrics.StartTimer(hostInfoFunctions.WithValues("os_version"))()
version, err := operatingsystem.GetOperatingSystemVersion()
if err != nil {
logrus.Warnf("Could not get operating system version: %v", err)
}
return version
}
func maskCredentials(rawURL string) string {
parsedURL, err := url.Parse(rawURL)
if err != nil || parsedURL.User == nil {
return rawURL
}
parsedURL.User = url.UserPassword("xxxxx", "xxxxx")
maskedURL := parsedURL.String()
return maskedURL
}
func getEnvAny(names ...string) string {
for _, n := range names {
if val := os.Getenv(n); val != "" {
return val
}
}
return ""
}
| thaJeztah | 343665850e3a7ee160989b33b39477b6dd3f15cb | 7681a3eb40ab49341d4e88420dafc7f4944ac7b7 | Should this be normalized in the option parsing instead of being normalized in the info output? | tianon | 4,723 |
moby/moby | 42,481 | Fix daemon.json and daemon --seccomp-profile not accepting "unconfined", and rename default profile to "builtin" | waiting for https://github.com/moby/moby/pull/42393, then I can add some tests.
Possibly will be splitting this PR in some chunks as well, in case we want to backport the "unconfined" daemon configuration fix.
- Add const for "unconfined" and "default" seccomp profiles
- Fix daemon.json and daemon --seccomp-profile not accepting "unconfined"
Commit b237189e6c8a4f97be59f08c63cdcb1f2f4680a8 implemented an option to
set the default seccomp profile in the daemon configuration. When that PR
was reviewed, it was discussed to have the option accept the path to a custom
profile JSON file; https://github.com/moby/moby/pull/26276#issuecomment-253546966
However, in the implementation, the special "unconfined" value was not taken into
account. The "unconfined" value is meant to disable seccomp (more factually:
run with an empty profile).
While it's likely possible to achieve this by creating a file with an an empty
(`{}`) profile, and passing the path to that file, it's inconsistent with the
`--security-opt seccomp=unconfined` option on `docker run` and `docker create`,
which is both confusing, and makes it harder to use (especially on Docker Desktop,
where there's no direct access to the VM's filesystem).
This patch adds the missing check for the special "unconfined" value.
- daemon: allow "builtin" as valid value for seccomp profiles
This allows containers to use the embedded default profile if a different
default is set (e.g. "unconfined") in the daemon configuration. Without this
option, users would have to copy the default profile to a file in order to
use the default.
- daemon: move custom seccomp profile warning from CLI to daemon side
**- What I did**
**- How I did it**
**- How to verify it**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-07 16:45:45+00:00 | 2021-08-11 17:02:06+00:00 | daemon/info.go | package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"net/url"
"os"
"runtime"
"strings"
"time"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/parsers/operatingsystem"
"github.com/docker/docker/pkg/platform"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/registry"
metrics "github.com/docker/go-metrics"
"github.com/opencontainers/selinux/go-selinux"
"github.com/sirupsen/logrus"
)
// SystemInfo returns information about the host server the daemon is running on.
func (daemon *Daemon) SystemInfo() *types.Info {
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_info"))()
sysInfo := daemon.RawSysInfo()
cRunning, cPaused, cStopped := stateCtr.get()
v := &types.Info{
ID: daemon.ID,
Containers: cRunning + cPaused + cStopped,
ContainersRunning: cRunning,
ContainersPaused: cPaused,
ContainersStopped: cStopped,
Images: daemon.imageService.CountImages(),
IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled,
BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled,
BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled,
Debug: debug.IsEnabled(),
Name: hostName(),
NFd: fileutils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
SystemTime: time.Now().Format(time.RFC3339Nano),
LoggingDriver: daemon.defaultLogConfig.Type,
NEventsListener: daemon.EventsService.SubscribersCount(),
KernelVersion: kernelVersion(),
OperatingSystem: operatingSystem(),
OSVersion: osVersion(),
IndexServerAddress: registry.IndexServer,
OSType: platform.OSType,
Architecture: platform.Architecture,
RegistryConfig: daemon.RegistryService.ServiceConfig(),
NCPU: sysinfo.NumCPU(),
MemTotal: memInfo().MemTotal,
GenericResources: daemon.genericResources,
DockerRootDir: daemon.configStore.Root,
Labels: daemon.configStore.Labels,
ExperimentalBuild: daemon.configStore.Experimental,
ServerVersion: dockerversion.Version,
HTTPProxy: maskCredentials(getEnvAny("HTTP_PROXY", "http_proxy")),
HTTPSProxy: maskCredentials(getEnvAny("HTTPS_PROXY", "https_proxy")),
NoProxy: getEnvAny("NO_PROXY", "no_proxy"),
LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled,
Isolation: daemon.defaultIsolation,
}
daemon.fillClusterInfo(v)
daemon.fillAPIInfo(v)
// Retrieve platform specific info
daemon.fillPlatformInfo(v, sysInfo)
daemon.fillDriverInfo(v)
daemon.fillPluginsInfo(v)
daemon.fillSecurityOptions(v, sysInfo)
daemon.fillLicense(v)
daemon.fillDefaultAddressPools(v)
if v.DefaultRuntime == config.LinuxV1RuntimeName {
v.Warnings = append(v.Warnings, fmt.Sprintf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName))
}
return v
}
// SystemVersion returns version information about the daemon.
func (daemon *Daemon) SystemVersion() types.Version {
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_version"))()
kernelVersion := kernelVersion()
v := types.Version{
Components: []types.ComponentVersion{
{
Name: "Engine",
Version: dockerversion.Version,
Details: map[string]string{
"GitCommit": dockerversion.GitCommit,
"ApiVersion": api.DefaultVersion,
"MinAPIVersion": api.MinVersion,
"GoVersion": runtime.Version(),
"Os": runtime.GOOS,
"Arch": runtime.GOARCH,
"BuildTime": dockerversion.BuildTime,
"KernelVersion": kernelVersion,
"Experimental": fmt.Sprintf("%t", daemon.configStore.Experimental),
},
},
},
// Populate deprecated fields for older clients
Version: dockerversion.Version,
GitCommit: dockerversion.GitCommit,
APIVersion: api.DefaultVersion,
MinAPIVersion: api.MinVersion,
GoVersion: runtime.Version(),
Os: runtime.GOOS,
Arch: runtime.GOARCH,
BuildTime: dockerversion.BuildTime,
KernelVersion: kernelVersion,
Experimental: daemon.configStore.Experimental,
}
v.Platform.Name = dockerversion.PlatformName
daemon.fillPlatformVersion(&v)
return v
}
func (daemon *Daemon) fillClusterInfo(v *types.Info) {
v.ClusterAdvertise = daemon.configStore.ClusterAdvertise
v.ClusterStore = daemon.configStore.ClusterStore
if v.ClusterAdvertise != "" || v.ClusterStore != "" {
v.Warnings = append(v.Warnings, `WARNING: node discovery and overlay networks with an external k/v store (cluster-advertise,
cluster-store, cluster-store-opt) are deprecated and will be removed in a future release.`)
}
}
func (daemon *Daemon) fillDriverInfo(v *types.Info) {
switch daemon.graphDriver {
case "aufs", "devicemapper", "overlay":
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: the %s storage-driver is deprecated, and will be removed in a future release.", daemon.graphDriver))
}
v.Driver = daemon.graphDriver
v.DriverStatus = daemon.imageService.LayerStoreStatus()
fillDriverWarnings(v)
}
func (daemon *Daemon) fillPluginsInfo(v *types.Info) {
v.Plugins = types.PluginsInfo{
Volume: daemon.volumes.GetDriverList(),
Network: daemon.GetNetworkDriverList(),
// The authorization plugins are returned in the order they are
// used as they constitute a request/response modification chain.
Authorization: daemon.configStore.AuthorizationPlugins,
Log: logger.ListDrivers(),
}
}
func (daemon *Daemon) fillSecurityOptions(v *types.Info, sysInfo *sysinfo.SysInfo) {
var securityOptions []string
if sysInfo.AppArmor {
securityOptions = append(securityOptions, "name=apparmor")
}
if sysInfo.Seccomp && supportsSeccomp {
profile := daemon.seccompProfilePath
if profile == "" {
profile = "default"
}
securityOptions = append(securityOptions, fmt.Sprintf("name=seccomp,profile=%s", profile))
}
if selinux.GetEnabled() {
securityOptions = append(securityOptions, "name=selinux")
}
if rootIDs := daemon.idMapping.RootPair(); rootIDs.UID != 0 || rootIDs.GID != 0 {
securityOptions = append(securityOptions, "name=userns")
}
if daemon.Rootless() {
securityOptions = append(securityOptions, "name=rootless")
}
if daemon.cgroupNamespacesEnabled(sysInfo) {
securityOptions = append(securityOptions, "name=cgroupns")
}
v.SecurityOptions = securityOptions
}
func (daemon *Daemon) fillAPIInfo(v *types.Info) {
const warn string = `
Access to the remote API is equivalent to root access on the host. Refer
to the 'Docker daemon attack surface' section in the documentation for
more information: https://docs.docker.com/go/attack-surface/`
cfg := daemon.configStore
for _, host := range cfg.Hosts {
// cnf.Hosts is normalized during startup, so should always have a scheme/proto
h := strings.SplitN(host, "://", 2)
proto := h[0]
addr := h[1]
if proto != "tcp" {
continue
}
if cfg.TLS == nil || !*cfg.TLS {
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: API is accessible on http://%s without encryption.%s", addr, warn))
continue
}
if cfg.TLSVerify == nil || !*cfg.TLSVerify {
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: API is accessible on https://%s without TLS client verification.%s", addr, warn))
continue
}
}
}
func (daemon *Daemon) fillDefaultAddressPools(v *types.Info) {
for _, pool := range daemon.configStore.DefaultAddressPools.Value() {
v.DefaultAddressPools = append(v.DefaultAddressPools, types.NetworkAddressPool{
Base: pool.Base,
Size: pool.Size,
})
}
}
func hostName() string {
hostname := ""
if hn, err := os.Hostname(); err != nil {
logrus.Warnf("Could not get hostname: %v", err)
} else {
hostname = hn
}
return hostname
}
func kernelVersion() string {
var kernelVersion string
if kv, err := kernel.GetKernelVersion(); err != nil {
logrus.Warnf("Could not get kernel version: %v", err)
} else {
kernelVersion = kv.String()
}
return kernelVersion
}
func memInfo() *system.MemInfo {
memInfo, err := system.ReadMemInfo()
if err != nil {
logrus.Errorf("Could not read system memory info: %v", err)
memInfo = &system.MemInfo{}
}
return memInfo
}
func operatingSystem() (operatingSystem string) {
defer metrics.StartTimer(hostInfoFunctions.WithValues("operating_system"))()
if s, err := operatingsystem.GetOperatingSystem(); err != nil {
logrus.Warnf("Could not get operating system name: %v", err)
} else {
operatingSystem = s
}
// Don't do containerized check on Windows
if runtime.GOOS != "windows" {
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
logrus.Errorf("Could not determine if daemon is containerized: %v", err)
operatingSystem += " (error determining if containerized)"
} else if inContainer {
operatingSystem += " (containerized)"
}
}
return operatingSystem
}
func osVersion() (version string) {
defer metrics.StartTimer(hostInfoFunctions.WithValues("os_version"))()
version, err := operatingsystem.GetOperatingSystemVersion()
if err != nil {
logrus.Warnf("Could not get operating system version: %v", err)
}
return version
}
func maskCredentials(rawURL string) string {
parsedURL, err := url.Parse(rawURL)
if err != nil || parsedURL.User == nil {
return rawURL
}
parsedURL.User = url.UserPassword("xxxxx", "xxxxx")
maskedURL := parsedURL.String()
return maskedURL
}
func getEnvAny(names ...string) string {
for _, n := range names {
if val := os.Getenv(n); val != "" {
return val
}
}
return ""
}
| package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"net/url"
"os"
"runtime"
"strings"
"time"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/parsers/operatingsystem"
"github.com/docker/docker/pkg/platform"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/registry"
metrics "github.com/docker/go-metrics"
"github.com/opencontainers/selinux/go-selinux"
"github.com/sirupsen/logrus"
)
// SystemInfo returns information about the host server the daemon is running on.
func (daemon *Daemon) SystemInfo() *types.Info {
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_info"))()
sysInfo := daemon.RawSysInfo()
cRunning, cPaused, cStopped := stateCtr.get()
v := &types.Info{
ID: daemon.ID,
Containers: cRunning + cPaused + cStopped,
ContainersRunning: cRunning,
ContainersPaused: cPaused,
ContainersStopped: cStopped,
Images: daemon.imageService.CountImages(),
IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled,
BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled,
BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled,
Debug: debug.IsEnabled(),
Name: hostName(),
NFd: fileutils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
SystemTime: time.Now().Format(time.RFC3339Nano),
LoggingDriver: daemon.defaultLogConfig.Type,
NEventsListener: daemon.EventsService.SubscribersCount(),
KernelVersion: kernelVersion(),
OperatingSystem: operatingSystem(),
OSVersion: osVersion(),
IndexServerAddress: registry.IndexServer,
OSType: platform.OSType,
Architecture: platform.Architecture,
RegistryConfig: daemon.RegistryService.ServiceConfig(),
NCPU: sysinfo.NumCPU(),
MemTotal: memInfo().MemTotal,
GenericResources: daemon.genericResources,
DockerRootDir: daemon.configStore.Root,
Labels: daemon.configStore.Labels,
ExperimentalBuild: daemon.configStore.Experimental,
ServerVersion: dockerversion.Version,
HTTPProxy: maskCredentials(getEnvAny("HTTP_PROXY", "http_proxy")),
HTTPSProxy: maskCredentials(getEnvAny("HTTPS_PROXY", "https_proxy")),
NoProxy: getEnvAny("NO_PROXY", "no_proxy"),
LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled,
Isolation: daemon.defaultIsolation,
}
daemon.fillClusterInfo(v)
daemon.fillAPIInfo(v)
// Retrieve platform specific info
daemon.fillPlatformInfo(v, sysInfo)
daemon.fillDriverInfo(v)
daemon.fillPluginsInfo(v)
daemon.fillSecurityOptions(v, sysInfo)
daemon.fillLicense(v)
daemon.fillDefaultAddressPools(v)
if v.DefaultRuntime == config.LinuxV1RuntimeName {
v.Warnings = append(v.Warnings, fmt.Sprintf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName))
}
return v
}
// SystemVersion returns version information about the daemon.
func (daemon *Daemon) SystemVersion() types.Version {
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_version"))()
kernelVersion := kernelVersion()
v := types.Version{
Components: []types.ComponentVersion{
{
Name: "Engine",
Version: dockerversion.Version,
Details: map[string]string{
"GitCommit": dockerversion.GitCommit,
"ApiVersion": api.DefaultVersion,
"MinAPIVersion": api.MinVersion,
"GoVersion": runtime.Version(),
"Os": runtime.GOOS,
"Arch": runtime.GOARCH,
"BuildTime": dockerversion.BuildTime,
"KernelVersion": kernelVersion,
"Experimental": fmt.Sprintf("%t", daemon.configStore.Experimental),
},
},
},
// Populate deprecated fields for older clients
Version: dockerversion.Version,
GitCommit: dockerversion.GitCommit,
APIVersion: api.DefaultVersion,
MinAPIVersion: api.MinVersion,
GoVersion: runtime.Version(),
Os: runtime.GOOS,
Arch: runtime.GOARCH,
BuildTime: dockerversion.BuildTime,
KernelVersion: kernelVersion,
Experimental: daemon.configStore.Experimental,
}
v.Platform.Name = dockerversion.PlatformName
daemon.fillPlatformVersion(&v)
return v
}
func (daemon *Daemon) fillClusterInfo(v *types.Info) {
v.ClusterAdvertise = daemon.configStore.ClusterAdvertise
v.ClusterStore = daemon.configStore.ClusterStore
if v.ClusterAdvertise != "" || v.ClusterStore != "" {
v.Warnings = append(v.Warnings, `WARNING: node discovery and overlay networks with an external k/v store (cluster-advertise,
cluster-store, cluster-store-opt) are deprecated and will be removed in a future release.`)
}
}
func (daemon *Daemon) fillDriverInfo(v *types.Info) {
switch daemon.graphDriver {
case "aufs", "devicemapper", "overlay":
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: the %s storage-driver is deprecated, and will be removed in a future release.", daemon.graphDriver))
}
v.Driver = daemon.graphDriver
v.DriverStatus = daemon.imageService.LayerStoreStatus()
fillDriverWarnings(v)
}
func (daemon *Daemon) fillPluginsInfo(v *types.Info) {
v.Plugins = types.PluginsInfo{
Volume: daemon.volumes.GetDriverList(),
Network: daemon.GetNetworkDriverList(),
// The authorization plugins are returned in the order they are
// used as they constitute a request/response modification chain.
Authorization: daemon.configStore.AuthorizationPlugins,
Log: logger.ListDrivers(),
}
}
func (daemon *Daemon) fillSecurityOptions(v *types.Info, sysInfo *sysinfo.SysInfo) {
var securityOptions []string
if sysInfo.AppArmor {
securityOptions = append(securityOptions, "name=apparmor")
}
if sysInfo.Seccomp && supportsSeccomp {
if daemon.seccompProfilePath != config.SeccompProfileDefault {
v.Warnings = append(v.Warnings, "WARNING: daemon is not using the default seccomp profile")
}
securityOptions = append(securityOptions, "name=seccomp,profile="+daemon.seccompProfilePath)
}
if selinux.GetEnabled() {
securityOptions = append(securityOptions, "name=selinux")
}
if rootIDs := daemon.idMapping.RootPair(); rootIDs.UID != 0 || rootIDs.GID != 0 {
securityOptions = append(securityOptions, "name=userns")
}
if daemon.Rootless() {
securityOptions = append(securityOptions, "name=rootless")
}
if daemon.cgroupNamespacesEnabled(sysInfo) {
securityOptions = append(securityOptions, "name=cgroupns")
}
v.SecurityOptions = securityOptions
}
func (daemon *Daemon) fillAPIInfo(v *types.Info) {
const warn string = `
Access to the remote API is equivalent to root access on the host. Refer
to the 'Docker daemon attack surface' section in the documentation for
more information: https://docs.docker.com/go/attack-surface/`
cfg := daemon.configStore
for _, host := range cfg.Hosts {
// cnf.Hosts is normalized during startup, so should always have a scheme/proto
h := strings.SplitN(host, "://", 2)
proto := h[0]
addr := h[1]
if proto != "tcp" {
continue
}
if cfg.TLS == nil || !*cfg.TLS {
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: API is accessible on http://%s without encryption.%s", addr, warn))
continue
}
if cfg.TLSVerify == nil || !*cfg.TLSVerify {
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: API is accessible on https://%s without TLS client verification.%s", addr, warn))
continue
}
}
}
func (daemon *Daemon) fillDefaultAddressPools(v *types.Info) {
for _, pool := range daemon.configStore.DefaultAddressPools.Value() {
v.DefaultAddressPools = append(v.DefaultAddressPools, types.NetworkAddressPool{
Base: pool.Base,
Size: pool.Size,
})
}
}
func hostName() string {
hostname := ""
if hn, err := os.Hostname(); err != nil {
logrus.Warnf("Could not get hostname: %v", err)
} else {
hostname = hn
}
return hostname
}
func kernelVersion() string {
var kernelVersion string
if kv, err := kernel.GetKernelVersion(); err != nil {
logrus.Warnf("Could not get kernel version: %v", err)
} else {
kernelVersion = kv.String()
}
return kernelVersion
}
func memInfo() *system.MemInfo {
memInfo, err := system.ReadMemInfo()
if err != nil {
logrus.Errorf("Could not read system memory info: %v", err)
memInfo = &system.MemInfo{}
}
return memInfo
}
func operatingSystem() (operatingSystem string) {
defer metrics.StartTimer(hostInfoFunctions.WithValues("operating_system"))()
if s, err := operatingsystem.GetOperatingSystem(); err != nil {
logrus.Warnf("Could not get operating system name: %v", err)
} else {
operatingSystem = s
}
// Don't do containerized check on Windows
if runtime.GOOS != "windows" {
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
logrus.Errorf("Could not determine if daemon is containerized: %v", err)
operatingSystem += " (error determining if containerized)"
} else if inContainer {
operatingSystem += " (containerized)"
}
}
return operatingSystem
}
func osVersion() (version string) {
defer metrics.StartTimer(hostInfoFunctions.WithValues("os_version"))()
version, err := operatingsystem.GetOperatingSystemVersion()
if err != nil {
logrus.Warnf("Could not get operating system version: %v", err)
}
return version
}
func maskCredentials(rawURL string) string {
parsedURL, err := url.Parse(rawURL)
if err != nil || parsedURL.User == nil {
return rawURL
}
parsedURL.User = url.UserPassword("xxxxx", "xxxxx")
maskedURL := parsedURL.String()
return maskedURL
}
func getEnvAny(names ...string) string {
for _, n := range names {
if val := os.Getenv(n); val != "" {
return val
}
}
return ""
}
| thaJeztah | 343665850e3a7ee160989b33b39477b6dd3f15cb | 7681a3eb40ab49341d4e88420dafc7f4944ac7b7 | (That would also cut down on the number of `... != "" && ... != Default` dual comparisons.) | tianon | 4,724 |
moby/moby | 42,481 | Fix daemon.json and daemon --seccomp-profile not accepting "unconfined", and rename default profile to "builtin" | waiting for https://github.com/moby/moby/pull/42393, then I can add some tests.
Possibly will be splitting this PR in some chunks as well, in case we want to backport the "unconfined" daemon configuration fix.
- Add const for "unconfined" and "default" seccomp profiles
- Fix daemon.json and daemon --seccomp-profile not accepting "unconfined"
Commit b237189e6c8a4f97be59f08c63cdcb1f2f4680a8 implemented an option to
set the default seccomp profile in the daemon configuration. When that PR
was reviewed, it was discussed to have the option accept the path to a custom
profile JSON file; https://github.com/moby/moby/pull/26276#issuecomment-253546966
However, in the implementation, the special "unconfined" value was not taken into
account. The "unconfined" value is meant to disable seccomp (more factually:
run with an empty profile).
While it's likely possible to achieve this by creating a file with an an empty
(`{}`) profile, and passing the path to that file, it's inconsistent with the
`--security-opt seccomp=unconfined` option on `docker run` and `docker create`,
which is both confusing, and makes it harder to use (especially on Docker Desktop,
where there's no direct access to the VM's filesystem).
This patch adds the missing check for the special "unconfined" value.
- daemon: allow "builtin" as valid value for seccomp profiles
This allows containers to use the embedded default profile if a different
default is set (e.g. "unconfined") in the daemon configuration. Without this
option, users would have to copy the default profile to a file in order to
use the default.
- daemon: move custom seccomp profile warning from CLI to daemon side
**- What I did**
**- How I did it**
**- How to verify it**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-07 16:45:45+00:00 | 2021-08-11 17:02:06+00:00 | daemon/info.go | package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"net/url"
"os"
"runtime"
"strings"
"time"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/parsers/operatingsystem"
"github.com/docker/docker/pkg/platform"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/registry"
metrics "github.com/docker/go-metrics"
"github.com/opencontainers/selinux/go-selinux"
"github.com/sirupsen/logrus"
)
// SystemInfo returns information about the host server the daemon is running on.
func (daemon *Daemon) SystemInfo() *types.Info {
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_info"))()
sysInfo := daemon.RawSysInfo()
cRunning, cPaused, cStopped := stateCtr.get()
v := &types.Info{
ID: daemon.ID,
Containers: cRunning + cPaused + cStopped,
ContainersRunning: cRunning,
ContainersPaused: cPaused,
ContainersStopped: cStopped,
Images: daemon.imageService.CountImages(),
IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled,
BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled,
BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled,
Debug: debug.IsEnabled(),
Name: hostName(),
NFd: fileutils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
SystemTime: time.Now().Format(time.RFC3339Nano),
LoggingDriver: daemon.defaultLogConfig.Type,
NEventsListener: daemon.EventsService.SubscribersCount(),
KernelVersion: kernelVersion(),
OperatingSystem: operatingSystem(),
OSVersion: osVersion(),
IndexServerAddress: registry.IndexServer,
OSType: platform.OSType,
Architecture: platform.Architecture,
RegistryConfig: daemon.RegistryService.ServiceConfig(),
NCPU: sysinfo.NumCPU(),
MemTotal: memInfo().MemTotal,
GenericResources: daemon.genericResources,
DockerRootDir: daemon.configStore.Root,
Labels: daemon.configStore.Labels,
ExperimentalBuild: daemon.configStore.Experimental,
ServerVersion: dockerversion.Version,
HTTPProxy: maskCredentials(getEnvAny("HTTP_PROXY", "http_proxy")),
HTTPSProxy: maskCredentials(getEnvAny("HTTPS_PROXY", "https_proxy")),
NoProxy: getEnvAny("NO_PROXY", "no_proxy"),
LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled,
Isolation: daemon.defaultIsolation,
}
daemon.fillClusterInfo(v)
daemon.fillAPIInfo(v)
// Retrieve platform specific info
daemon.fillPlatformInfo(v, sysInfo)
daemon.fillDriverInfo(v)
daemon.fillPluginsInfo(v)
daemon.fillSecurityOptions(v, sysInfo)
daemon.fillLicense(v)
daemon.fillDefaultAddressPools(v)
if v.DefaultRuntime == config.LinuxV1RuntimeName {
v.Warnings = append(v.Warnings, fmt.Sprintf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName))
}
return v
}
// SystemVersion returns version information about the daemon.
func (daemon *Daemon) SystemVersion() types.Version {
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_version"))()
kernelVersion := kernelVersion()
v := types.Version{
Components: []types.ComponentVersion{
{
Name: "Engine",
Version: dockerversion.Version,
Details: map[string]string{
"GitCommit": dockerversion.GitCommit,
"ApiVersion": api.DefaultVersion,
"MinAPIVersion": api.MinVersion,
"GoVersion": runtime.Version(),
"Os": runtime.GOOS,
"Arch": runtime.GOARCH,
"BuildTime": dockerversion.BuildTime,
"KernelVersion": kernelVersion,
"Experimental": fmt.Sprintf("%t", daemon.configStore.Experimental),
},
},
},
// Populate deprecated fields for older clients
Version: dockerversion.Version,
GitCommit: dockerversion.GitCommit,
APIVersion: api.DefaultVersion,
MinAPIVersion: api.MinVersion,
GoVersion: runtime.Version(),
Os: runtime.GOOS,
Arch: runtime.GOARCH,
BuildTime: dockerversion.BuildTime,
KernelVersion: kernelVersion,
Experimental: daemon.configStore.Experimental,
}
v.Platform.Name = dockerversion.PlatformName
daemon.fillPlatformVersion(&v)
return v
}
func (daemon *Daemon) fillClusterInfo(v *types.Info) {
v.ClusterAdvertise = daemon.configStore.ClusterAdvertise
v.ClusterStore = daemon.configStore.ClusterStore
if v.ClusterAdvertise != "" || v.ClusterStore != "" {
v.Warnings = append(v.Warnings, `WARNING: node discovery and overlay networks with an external k/v store (cluster-advertise,
cluster-store, cluster-store-opt) are deprecated and will be removed in a future release.`)
}
}
func (daemon *Daemon) fillDriverInfo(v *types.Info) {
switch daemon.graphDriver {
case "aufs", "devicemapper", "overlay":
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: the %s storage-driver is deprecated, and will be removed in a future release.", daemon.graphDriver))
}
v.Driver = daemon.graphDriver
v.DriverStatus = daemon.imageService.LayerStoreStatus()
fillDriverWarnings(v)
}
func (daemon *Daemon) fillPluginsInfo(v *types.Info) {
v.Plugins = types.PluginsInfo{
Volume: daemon.volumes.GetDriverList(),
Network: daemon.GetNetworkDriverList(),
// The authorization plugins are returned in the order they are
// used as they constitute a request/response modification chain.
Authorization: daemon.configStore.AuthorizationPlugins,
Log: logger.ListDrivers(),
}
}
func (daemon *Daemon) fillSecurityOptions(v *types.Info, sysInfo *sysinfo.SysInfo) {
var securityOptions []string
if sysInfo.AppArmor {
securityOptions = append(securityOptions, "name=apparmor")
}
if sysInfo.Seccomp && supportsSeccomp {
profile := daemon.seccompProfilePath
if profile == "" {
profile = "default"
}
securityOptions = append(securityOptions, fmt.Sprintf("name=seccomp,profile=%s", profile))
}
if selinux.GetEnabled() {
securityOptions = append(securityOptions, "name=selinux")
}
if rootIDs := daemon.idMapping.RootPair(); rootIDs.UID != 0 || rootIDs.GID != 0 {
securityOptions = append(securityOptions, "name=userns")
}
if daemon.Rootless() {
securityOptions = append(securityOptions, "name=rootless")
}
if daemon.cgroupNamespacesEnabled(sysInfo) {
securityOptions = append(securityOptions, "name=cgroupns")
}
v.SecurityOptions = securityOptions
}
func (daemon *Daemon) fillAPIInfo(v *types.Info) {
const warn string = `
Access to the remote API is equivalent to root access on the host. Refer
to the 'Docker daemon attack surface' section in the documentation for
more information: https://docs.docker.com/go/attack-surface/`
cfg := daemon.configStore
for _, host := range cfg.Hosts {
// cnf.Hosts is normalized during startup, so should always have a scheme/proto
h := strings.SplitN(host, "://", 2)
proto := h[0]
addr := h[1]
if proto != "tcp" {
continue
}
if cfg.TLS == nil || !*cfg.TLS {
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: API is accessible on http://%s without encryption.%s", addr, warn))
continue
}
if cfg.TLSVerify == nil || !*cfg.TLSVerify {
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: API is accessible on https://%s without TLS client verification.%s", addr, warn))
continue
}
}
}
func (daemon *Daemon) fillDefaultAddressPools(v *types.Info) {
for _, pool := range daemon.configStore.DefaultAddressPools.Value() {
v.DefaultAddressPools = append(v.DefaultAddressPools, types.NetworkAddressPool{
Base: pool.Base,
Size: pool.Size,
})
}
}
func hostName() string {
hostname := ""
if hn, err := os.Hostname(); err != nil {
logrus.Warnf("Could not get hostname: %v", err)
} else {
hostname = hn
}
return hostname
}
func kernelVersion() string {
var kernelVersion string
if kv, err := kernel.GetKernelVersion(); err != nil {
logrus.Warnf("Could not get kernel version: %v", err)
} else {
kernelVersion = kv.String()
}
return kernelVersion
}
func memInfo() *system.MemInfo {
memInfo, err := system.ReadMemInfo()
if err != nil {
logrus.Errorf("Could not read system memory info: %v", err)
memInfo = &system.MemInfo{}
}
return memInfo
}
func operatingSystem() (operatingSystem string) {
defer metrics.StartTimer(hostInfoFunctions.WithValues("operating_system"))()
if s, err := operatingsystem.GetOperatingSystem(); err != nil {
logrus.Warnf("Could not get operating system name: %v", err)
} else {
operatingSystem = s
}
// Don't do containerized check on Windows
if runtime.GOOS != "windows" {
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
logrus.Errorf("Could not determine if daemon is containerized: %v", err)
operatingSystem += " (error determining if containerized)"
} else if inContainer {
operatingSystem += " (containerized)"
}
}
return operatingSystem
}
func osVersion() (version string) {
defer metrics.StartTimer(hostInfoFunctions.WithValues("os_version"))()
version, err := operatingsystem.GetOperatingSystemVersion()
if err != nil {
logrus.Warnf("Could not get operating system version: %v", err)
}
return version
}
func maskCredentials(rawURL string) string {
parsedURL, err := url.Parse(rawURL)
if err != nil || parsedURL.User == nil {
return rawURL
}
parsedURL.User = url.UserPassword("xxxxx", "xxxxx")
maskedURL := parsedURL.String()
return maskedURL
}
func getEnvAny(names ...string) string {
for _, n := range names {
if val := os.Getenv(n); val != "" {
return val
}
}
return ""
}
| package daemon // import "github.com/docker/docker/daemon"
import (
"fmt"
"net/url"
"os"
"runtime"
"strings"
"time"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/parsers/operatingsystem"
"github.com/docker/docker/pkg/platform"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/registry"
metrics "github.com/docker/go-metrics"
"github.com/opencontainers/selinux/go-selinux"
"github.com/sirupsen/logrus"
)
// SystemInfo returns information about the host server the daemon is running on.
func (daemon *Daemon) SystemInfo() *types.Info {
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_info"))()
sysInfo := daemon.RawSysInfo()
cRunning, cPaused, cStopped := stateCtr.get()
v := &types.Info{
ID: daemon.ID,
Containers: cRunning + cPaused + cStopped,
ContainersRunning: cRunning,
ContainersPaused: cPaused,
ContainersStopped: cStopped,
Images: daemon.imageService.CountImages(),
IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled,
BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled,
BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled,
Debug: debug.IsEnabled(),
Name: hostName(),
NFd: fileutils.GetTotalUsedFds(),
NGoroutines: runtime.NumGoroutine(),
SystemTime: time.Now().Format(time.RFC3339Nano),
LoggingDriver: daemon.defaultLogConfig.Type,
NEventsListener: daemon.EventsService.SubscribersCount(),
KernelVersion: kernelVersion(),
OperatingSystem: operatingSystem(),
OSVersion: osVersion(),
IndexServerAddress: registry.IndexServer,
OSType: platform.OSType,
Architecture: platform.Architecture,
RegistryConfig: daemon.RegistryService.ServiceConfig(),
NCPU: sysinfo.NumCPU(),
MemTotal: memInfo().MemTotal,
GenericResources: daemon.genericResources,
DockerRootDir: daemon.configStore.Root,
Labels: daemon.configStore.Labels,
ExperimentalBuild: daemon.configStore.Experimental,
ServerVersion: dockerversion.Version,
HTTPProxy: maskCredentials(getEnvAny("HTTP_PROXY", "http_proxy")),
HTTPSProxy: maskCredentials(getEnvAny("HTTPS_PROXY", "https_proxy")),
NoProxy: getEnvAny("NO_PROXY", "no_proxy"),
LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled,
Isolation: daemon.defaultIsolation,
}
daemon.fillClusterInfo(v)
daemon.fillAPIInfo(v)
// Retrieve platform specific info
daemon.fillPlatformInfo(v, sysInfo)
daemon.fillDriverInfo(v)
daemon.fillPluginsInfo(v)
daemon.fillSecurityOptions(v, sysInfo)
daemon.fillLicense(v)
daemon.fillDefaultAddressPools(v)
if v.DefaultRuntime == config.LinuxV1RuntimeName {
v.Warnings = append(v.Warnings, fmt.Sprintf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName))
}
return v
}
// SystemVersion returns version information about the daemon.
func (daemon *Daemon) SystemVersion() types.Version {
defer metrics.StartTimer(hostInfoFunctions.WithValues("system_version"))()
kernelVersion := kernelVersion()
v := types.Version{
Components: []types.ComponentVersion{
{
Name: "Engine",
Version: dockerversion.Version,
Details: map[string]string{
"GitCommit": dockerversion.GitCommit,
"ApiVersion": api.DefaultVersion,
"MinAPIVersion": api.MinVersion,
"GoVersion": runtime.Version(),
"Os": runtime.GOOS,
"Arch": runtime.GOARCH,
"BuildTime": dockerversion.BuildTime,
"KernelVersion": kernelVersion,
"Experimental": fmt.Sprintf("%t", daemon.configStore.Experimental),
},
},
},
// Populate deprecated fields for older clients
Version: dockerversion.Version,
GitCommit: dockerversion.GitCommit,
APIVersion: api.DefaultVersion,
MinAPIVersion: api.MinVersion,
GoVersion: runtime.Version(),
Os: runtime.GOOS,
Arch: runtime.GOARCH,
BuildTime: dockerversion.BuildTime,
KernelVersion: kernelVersion,
Experimental: daemon.configStore.Experimental,
}
v.Platform.Name = dockerversion.PlatformName
daemon.fillPlatformVersion(&v)
return v
}
func (daemon *Daemon) fillClusterInfo(v *types.Info) {
v.ClusterAdvertise = daemon.configStore.ClusterAdvertise
v.ClusterStore = daemon.configStore.ClusterStore
if v.ClusterAdvertise != "" || v.ClusterStore != "" {
v.Warnings = append(v.Warnings, `WARNING: node discovery and overlay networks with an external k/v store (cluster-advertise,
cluster-store, cluster-store-opt) are deprecated and will be removed in a future release.`)
}
}
func (daemon *Daemon) fillDriverInfo(v *types.Info) {
switch daemon.graphDriver {
case "aufs", "devicemapper", "overlay":
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: the %s storage-driver is deprecated, and will be removed in a future release.", daemon.graphDriver))
}
v.Driver = daemon.graphDriver
v.DriverStatus = daemon.imageService.LayerStoreStatus()
fillDriverWarnings(v)
}
func (daemon *Daemon) fillPluginsInfo(v *types.Info) {
v.Plugins = types.PluginsInfo{
Volume: daemon.volumes.GetDriverList(),
Network: daemon.GetNetworkDriverList(),
// The authorization plugins are returned in the order they are
// used as they constitute a request/response modification chain.
Authorization: daemon.configStore.AuthorizationPlugins,
Log: logger.ListDrivers(),
}
}
func (daemon *Daemon) fillSecurityOptions(v *types.Info, sysInfo *sysinfo.SysInfo) {
var securityOptions []string
if sysInfo.AppArmor {
securityOptions = append(securityOptions, "name=apparmor")
}
if sysInfo.Seccomp && supportsSeccomp {
if daemon.seccompProfilePath != config.SeccompProfileDefault {
v.Warnings = append(v.Warnings, "WARNING: daemon is not using the default seccomp profile")
}
securityOptions = append(securityOptions, "name=seccomp,profile="+daemon.seccompProfilePath)
}
if selinux.GetEnabled() {
securityOptions = append(securityOptions, "name=selinux")
}
if rootIDs := daemon.idMapping.RootPair(); rootIDs.UID != 0 || rootIDs.GID != 0 {
securityOptions = append(securityOptions, "name=userns")
}
if daemon.Rootless() {
securityOptions = append(securityOptions, "name=rootless")
}
if daemon.cgroupNamespacesEnabled(sysInfo) {
securityOptions = append(securityOptions, "name=cgroupns")
}
v.SecurityOptions = securityOptions
}
func (daemon *Daemon) fillAPIInfo(v *types.Info) {
const warn string = `
Access to the remote API is equivalent to root access on the host. Refer
to the 'Docker daemon attack surface' section in the documentation for
more information: https://docs.docker.com/go/attack-surface/`
cfg := daemon.configStore
for _, host := range cfg.Hosts {
// cnf.Hosts is normalized during startup, so should always have a scheme/proto
h := strings.SplitN(host, "://", 2)
proto := h[0]
addr := h[1]
if proto != "tcp" {
continue
}
if cfg.TLS == nil || !*cfg.TLS {
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: API is accessible on http://%s without encryption.%s", addr, warn))
continue
}
if cfg.TLSVerify == nil || !*cfg.TLSVerify {
v.Warnings = append(v.Warnings, fmt.Sprintf("WARNING: API is accessible on https://%s without TLS client verification.%s", addr, warn))
continue
}
}
}
func (daemon *Daemon) fillDefaultAddressPools(v *types.Info) {
for _, pool := range daemon.configStore.DefaultAddressPools.Value() {
v.DefaultAddressPools = append(v.DefaultAddressPools, types.NetworkAddressPool{
Base: pool.Base,
Size: pool.Size,
})
}
}
func hostName() string {
hostname := ""
if hn, err := os.Hostname(); err != nil {
logrus.Warnf("Could not get hostname: %v", err)
} else {
hostname = hn
}
return hostname
}
func kernelVersion() string {
var kernelVersion string
if kv, err := kernel.GetKernelVersion(); err != nil {
logrus.Warnf("Could not get kernel version: %v", err)
} else {
kernelVersion = kv.String()
}
return kernelVersion
}
func memInfo() *system.MemInfo {
memInfo, err := system.ReadMemInfo()
if err != nil {
logrus.Errorf("Could not read system memory info: %v", err)
memInfo = &system.MemInfo{}
}
return memInfo
}
func operatingSystem() (operatingSystem string) {
defer metrics.StartTimer(hostInfoFunctions.WithValues("operating_system"))()
if s, err := operatingsystem.GetOperatingSystem(); err != nil {
logrus.Warnf("Could not get operating system name: %v", err)
} else {
operatingSystem = s
}
// Don't do containerized check on Windows
if runtime.GOOS != "windows" {
if inContainer, err := operatingsystem.IsContainerized(); err != nil {
logrus.Errorf("Could not determine if daemon is containerized: %v", err)
operatingSystem += " (error determining if containerized)"
} else if inContainer {
operatingSystem += " (containerized)"
}
}
return operatingSystem
}
func osVersion() (version string) {
defer metrics.StartTimer(hostInfoFunctions.WithValues("os_version"))()
version, err := operatingsystem.GetOperatingSystemVersion()
if err != nil {
logrus.Warnf("Could not get operating system version: %v", err)
}
return version
}
func maskCredentials(rawURL string) string {
parsedURL, err := url.Parse(rawURL)
if err != nil || parsedURL.User == nil {
return rawURL
}
parsedURL.User = url.UserPassword("xxxxx", "xxxxx")
maskedURL := parsedURL.String()
return maskedURL
}
func getEnvAny(names ...string) string {
for _, n := range names {
if val := os.Getenv(n); val != "" {
return val
}
}
return ""
}
| thaJeztah | 343665850e3a7ee160989b33b39477b6dd3f15cb | 7681a3eb40ab49341d4e88420dafc7f4944ac7b7 | Added a commit that moves this logic to `setupSeccompProfile()` | thaJeztah | 4,725 |
moby/moby | 42,481 | Fix daemon.json and daemon --seccomp-profile not accepting "unconfined", and rename default profile to "builtin" | waiting for https://github.com/moby/moby/pull/42393, then I can add some tests.
Possibly will be splitting this PR in some chunks as well, in case we want to backport the "unconfined" daemon configuration fix.
- Add const for "unconfined" and "default" seccomp profiles
- Fix daemon.json and daemon --seccomp-profile not accepting "unconfined"
Commit b237189e6c8a4f97be59f08c63cdcb1f2f4680a8 implemented an option to
set the default seccomp profile in the daemon configuration. When that PR
was reviewed, it was discussed to have the option accept the path to a custom
profile JSON file; https://github.com/moby/moby/pull/26276#issuecomment-253546966
However, in the implementation, the special "unconfined" value was not taken into
account. The "unconfined" value is meant to disable seccomp (more factually:
run with an empty profile).
While it's likely possible to achieve this by creating a file with an an empty
(`{}`) profile, and passing the path to that file, it's inconsistent with the
`--security-opt seccomp=unconfined` option on `docker run` and `docker create`,
which is both confusing, and makes it harder to use (especially on Docker Desktop,
where there's no direct access to the VM's filesystem).
This patch adds the missing check for the special "unconfined" value.
- daemon: allow "builtin" as valid value for seccomp profiles
This allows containers to use the embedded default profile if a different
default is set (e.g. "unconfined") in the daemon configuration. Without this
option, users would have to copy the default profile to a file in order to
use the default.
- daemon: move custom seccomp profile warning from CLI to daemon side
**- What I did**
**- How I did it**
**- How to verify it**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-07 16:45:45+00:00 | 2021-08-11 17:02:06+00:00 | integration/daemon/daemon_test.go | package daemon // import "github.com/docker/docker/integration/daemon"
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
"github.com/docker/docker/testutil/daemon"
"gotest.tools/v3/assert"
"gotest.tools/v3/skip"
is "gotest.tools/v3/assert/cmp"
)
func TestConfigDaemonLibtrustID(t *testing.T) {
skip.If(t, runtime.GOOS != "linux")
d := daemon.New(t)
defer d.Stop(t)
trustKey := filepath.Join(d.RootDir(), "key.json")
err := ioutil.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644)
assert.NilError(t, err)
config := filepath.Join(d.RootDir(), "daemon.json")
err = ioutil.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644)
assert.NilError(t, err)
d.Start(t, "--config-file", config)
info := d.Info(t)
assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB")
}
func TestDaemonConfigValidation(t *testing.T) {
skip.If(t, runtime.GOOS != "linux")
d := daemon.New(t)
dockerBinary, err := d.BinaryPath()
assert.NilError(t, err)
params := []string{"--validate", "--config-file"}
dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
if dest == "" {
dest = os.Getenv("DEST")
}
testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata")
const (
validOut = "configuration OK"
failedOut = "unable to configure the Docker daemon with file"
)
tests := []struct {
name string
args []string
expectedOut string
}{
{
name: "config with no content",
args: append(params, filepath.Join(testdata, "empty-config-1.json")),
expectedOut: validOut,
},
{
name: "config with {}",
args: append(params, filepath.Join(testdata, "empty-config-2.json")),
expectedOut: validOut,
},
{
name: "invalid config",
args: append(params, filepath.Join(testdata, "invalid-config-1.json")),
expectedOut: failedOut,
},
{
name: "malformed config",
args: append(params, filepath.Join(testdata, "malformed-config.json")),
expectedOut: failedOut,
},
{
name: "valid config",
args: append(params, filepath.Join(testdata, "valid-config-1.json")),
expectedOut: validOut,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
cmd := exec.Command(dockerBinary, tc.args...)
out, err := cmd.CombinedOutput()
assert.Check(t, is.Contains(string(out), tc.expectedOut))
if tc.expectedOut == failedOut {
assert.ErrorContains(t, err, "", "expected an error, but got none")
} else {
assert.NilError(t, err)
}
})
}
}
| package daemon // import "github.com/docker/docker/integration/daemon"
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/testutil/daemon"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
"gotest.tools/v3/skip"
)
func TestConfigDaemonLibtrustID(t *testing.T) {
skip.If(t, runtime.GOOS != "linux")
d := daemon.New(t)
defer d.Stop(t)
trustKey := filepath.Join(d.RootDir(), "key.json")
err := ioutil.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644)
assert.NilError(t, err)
config := filepath.Join(d.RootDir(), "daemon.json")
err = ioutil.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644)
assert.NilError(t, err)
d.Start(t, "--config-file", config)
info := d.Info(t)
assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB")
}
func TestDaemonConfigValidation(t *testing.T) {
skip.If(t, runtime.GOOS != "linux")
d := daemon.New(t)
dockerBinary, err := d.BinaryPath()
assert.NilError(t, err)
params := []string{"--validate", "--config-file"}
dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
if dest == "" {
dest = os.Getenv("DEST")
}
testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata")
const (
validOut = "configuration OK"
failedOut = "unable to configure the Docker daemon with file"
)
tests := []struct {
name string
args []string
expectedOut string
}{
{
name: "config with no content",
args: append(params, filepath.Join(testdata, "empty-config-1.json")),
expectedOut: validOut,
},
{
name: "config with {}",
args: append(params, filepath.Join(testdata, "empty-config-2.json")),
expectedOut: validOut,
},
{
name: "invalid config",
args: append(params, filepath.Join(testdata, "invalid-config-1.json")),
expectedOut: failedOut,
},
{
name: "malformed config",
args: append(params, filepath.Join(testdata, "malformed-config.json")),
expectedOut: failedOut,
},
{
name: "valid config",
args: append(params, filepath.Join(testdata, "valid-config-1.json")),
expectedOut: validOut,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
cmd := exec.Command(dockerBinary, tc.args...)
out, err := cmd.CombinedOutput()
assert.Check(t, is.Contains(string(out), tc.expectedOut))
if tc.expectedOut == failedOut {
assert.ErrorContains(t, err, "", "expected an error, but got none")
} else {
assert.NilError(t, err)
}
})
}
}
func TestConfigDaemonSeccompProfiles(t *testing.T) {
skip.If(t, runtime.GOOS != "linux")
d := daemon.New(t)
defer d.Stop(t)
tests := []struct {
doc string
profile string
expectedProfile string
}{
{
doc: "empty profile set",
profile: "",
expectedProfile: config.SeccompProfileDefault,
},
{
doc: "default profile",
profile: config.SeccompProfileDefault,
expectedProfile: config.SeccompProfileDefault,
},
{
doc: "unconfined profile",
profile: config.SeccompProfileUnconfined,
expectedProfile: config.SeccompProfileUnconfined,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.doc, func(t *testing.T) {
d.Start(t, "--seccomp-profile="+tc.profile)
info := d.Info(t)
assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile))
d.Stop(t)
cfg := filepath.Join(d.RootDir(), "daemon.json")
err := ioutil.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644)
assert.NilError(t, err)
d.Start(t, "--config-file", cfg)
info = d.Info(t)
assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile))
d.Stop(t)
})
}
}
| thaJeztah | 343665850e3a7ee160989b33b39477b6dd3f15cb | 7681a3eb40ab49341d4e88420dafc7f4944ac7b7 | Should we test this last case _before_ we write `daemon.json` to make sure it's testing the flag all by itself? ala:
```suggestion
d.Start(t, "--seccomp-profile", tc.profile)
info = d.Info(t)
assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile))
d.Stop(t)
cfg := filepath.Join(d.RootDir(), "daemon.json")
err := ioutil.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644)
assert.NilError(t, err)
d.Start(t, "--config-file", cfg)
info := d.Info(t)
assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile))
d.Stop(t)
``` | tianon | 4,726 |
moby/moby | 42,481 | Fix daemon.json and daemon --seccomp-profile not accepting "unconfined", and rename default profile to "builtin" | waiting for https://github.com/moby/moby/pull/42393, then I can add some tests.
Possibly will be splitting this PR in some chunks as well, in case we want to backport the "unconfined" daemon configuration fix.
- Add const for "unconfined" and "default" seccomp profiles
- Fix daemon.json and daemon --seccomp-profile not accepting "unconfined"
Commit b237189e6c8a4f97be59f08c63cdcb1f2f4680a8 implemented an option to
set the default seccomp profile in the daemon configuration. When that PR
was reviewed, it was discussed to have the option accept the path to a custom
profile JSON file; https://github.com/moby/moby/pull/26276#issuecomment-253546966
However, in the implementation, the special "unconfined" value was not taken into
account. The "unconfined" value is meant to disable seccomp (more factually:
run with an empty profile).
While it's likely possible to achieve this by creating a file with an an empty
(`{}`) profile, and passing the path to that file, it's inconsistent with the
`--security-opt seccomp=unconfined` option on `docker run` and `docker create`,
which is both confusing, and makes it harder to use (especially on Docker Desktop,
where there's no direct access to the VM's filesystem).
This patch adds the missing check for the special "unconfined" value.
- daemon: allow "builtin" as valid value for seccomp profiles
This allows containers to use the embedded default profile if a different
default is set (e.g. "unconfined") in the daemon configuration. Without this
option, users would have to copy the default profile to a file in order to
use the default.
- daemon: move custom seccomp profile warning from CLI to daemon side
**- What I did**
**- How I did it**
**- How to verify it**
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-07 16:45:45+00:00 | 2021-08-11 17:02:06+00:00 | integration/daemon/daemon_test.go | package daemon // import "github.com/docker/docker/integration/daemon"
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
"github.com/docker/docker/testutil/daemon"
"gotest.tools/v3/assert"
"gotest.tools/v3/skip"
is "gotest.tools/v3/assert/cmp"
)
func TestConfigDaemonLibtrustID(t *testing.T) {
skip.If(t, runtime.GOOS != "linux")
d := daemon.New(t)
defer d.Stop(t)
trustKey := filepath.Join(d.RootDir(), "key.json")
err := ioutil.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644)
assert.NilError(t, err)
config := filepath.Join(d.RootDir(), "daemon.json")
err = ioutil.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644)
assert.NilError(t, err)
d.Start(t, "--config-file", config)
info := d.Info(t)
assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB")
}
func TestDaemonConfigValidation(t *testing.T) {
skip.If(t, runtime.GOOS != "linux")
d := daemon.New(t)
dockerBinary, err := d.BinaryPath()
assert.NilError(t, err)
params := []string{"--validate", "--config-file"}
dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
if dest == "" {
dest = os.Getenv("DEST")
}
testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata")
const (
validOut = "configuration OK"
failedOut = "unable to configure the Docker daemon with file"
)
tests := []struct {
name string
args []string
expectedOut string
}{
{
name: "config with no content",
args: append(params, filepath.Join(testdata, "empty-config-1.json")),
expectedOut: validOut,
},
{
name: "config with {}",
args: append(params, filepath.Join(testdata, "empty-config-2.json")),
expectedOut: validOut,
},
{
name: "invalid config",
args: append(params, filepath.Join(testdata, "invalid-config-1.json")),
expectedOut: failedOut,
},
{
name: "malformed config",
args: append(params, filepath.Join(testdata, "malformed-config.json")),
expectedOut: failedOut,
},
{
name: "valid config",
args: append(params, filepath.Join(testdata, "valid-config-1.json")),
expectedOut: validOut,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
cmd := exec.Command(dockerBinary, tc.args...)
out, err := cmd.CombinedOutput()
assert.Check(t, is.Contains(string(out), tc.expectedOut))
if tc.expectedOut == failedOut {
assert.ErrorContains(t, err, "", "expected an error, but got none")
} else {
assert.NilError(t, err)
}
})
}
}
| package daemon // import "github.com/docker/docker/integration/daemon"
import (
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/testutil/daemon"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
"gotest.tools/v3/skip"
)
func TestConfigDaemonLibtrustID(t *testing.T) {
skip.If(t, runtime.GOOS != "linux")
d := daemon.New(t)
defer d.Stop(t)
trustKey := filepath.Join(d.RootDir(), "key.json")
err := ioutil.WriteFile(trustKey, []byte(`{"crv":"P-256","d":"dm28PH4Z4EbyUN8L0bPonAciAQa1QJmmyYd876mnypY","kid":"WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB","kty":"EC","x":"Mh5-JINSjaa_EZdXDttri255Z5fbCEOTQIZjAcScFTk","y":"eUyuAjfxevb07hCCpvi4Zi334Dy4GDWQvEToGEX4exQ"}`), 0644)
assert.NilError(t, err)
config := filepath.Join(d.RootDir(), "daemon.json")
err = ioutil.WriteFile(config, []byte(`{"deprecated-key-path": "`+trustKey+`"}`), 0644)
assert.NilError(t, err)
d.Start(t, "--config-file", config)
info := d.Info(t)
assert.Equal(t, info.ID, "WTJ3:YSIP:CE2E:G6KJ:PSBD:YX2Y:WEYD:M64G:NU2V:XPZV:H2CR:VLUB")
}
func TestDaemonConfigValidation(t *testing.T) {
skip.If(t, runtime.GOOS != "linux")
d := daemon.New(t)
dockerBinary, err := d.BinaryPath()
assert.NilError(t, err)
params := []string{"--validate", "--config-file"}
dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
if dest == "" {
dest = os.Getenv("DEST")
}
testdata := filepath.Join(dest, "..", "..", "integration", "daemon", "testdata")
const (
validOut = "configuration OK"
failedOut = "unable to configure the Docker daemon with file"
)
tests := []struct {
name string
args []string
expectedOut string
}{
{
name: "config with no content",
args: append(params, filepath.Join(testdata, "empty-config-1.json")),
expectedOut: validOut,
},
{
name: "config with {}",
args: append(params, filepath.Join(testdata, "empty-config-2.json")),
expectedOut: validOut,
},
{
name: "invalid config",
args: append(params, filepath.Join(testdata, "invalid-config-1.json")),
expectedOut: failedOut,
},
{
name: "malformed config",
args: append(params, filepath.Join(testdata, "malformed-config.json")),
expectedOut: failedOut,
},
{
name: "valid config",
args: append(params, filepath.Join(testdata, "valid-config-1.json")),
expectedOut: validOut,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
cmd := exec.Command(dockerBinary, tc.args...)
out, err := cmd.CombinedOutput()
assert.Check(t, is.Contains(string(out), tc.expectedOut))
if tc.expectedOut == failedOut {
assert.ErrorContains(t, err, "", "expected an error, but got none")
} else {
assert.NilError(t, err)
}
})
}
}
func TestConfigDaemonSeccompProfiles(t *testing.T) {
skip.If(t, runtime.GOOS != "linux")
d := daemon.New(t)
defer d.Stop(t)
tests := []struct {
doc string
profile string
expectedProfile string
}{
{
doc: "empty profile set",
profile: "",
expectedProfile: config.SeccompProfileDefault,
},
{
doc: "default profile",
profile: config.SeccompProfileDefault,
expectedProfile: config.SeccompProfileDefault,
},
{
doc: "unconfined profile",
profile: config.SeccompProfileUnconfined,
expectedProfile: config.SeccompProfileUnconfined,
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.doc, func(t *testing.T) {
d.Start(t, "--seccomp-profile="+tc.profile)
info := d.Info(t)
assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile))
d.Stop(t)
cfg := filepath.Join(d.RootDir(), "daemon.json")
err := ioutil.WriteFile(cfg, []byte(`{"seccomp-profile": "`+tc.profile+`"}`), 0644)
assert.NilError(t, err)
d.Start(t, "--config-file", cfg)
info = d.Info(t)
assert.Assert(t, is.Contains(info.SecurityOptions, "name=seccomp,profile="+tc.expectedProfile))
d.Stop(t)
})
}
}
| thaJeztah | 343665850e3a7ee160989b33b39477b6dd3f15cb | 7681a3eb40ab49341d4e88420dafc7f4944ac7b7 | Makes sense, yes. I was also thinking that it would be better to just use a `tmpdir` and `tmpfile` somewhere perhaps for the config file to make sure we have a "clean slate" for each run. | thaJeztah | 4,727 |
moby/moby | 42,473 | revendor BuildKit (master branch) | relates to https://github.com/moby/moby/issues/42471
depends on:
- [x] https://github.com/moby/buildkit/pull/2150
- [x] https://github.com/moby/buildkit/pull/2151
Updates:
- vendor: golang.org/x/crypto 0c34fe9e7dc2486962ef9867e3edb3503537209f
full diff: https://github.com/golang/crypto/compare/c1f2f97bffc9c53fc40a1a28a5b460094c0050d9...0c34fe9e7dc2486962ef9867e3edb3503537209f
- vendor: golang.org/x/time 3af7569d3a1e776fc2a3c1cec133b43105ea9c2e
full diff: https://github.com/golang/time/compare/555d28b269f0569763d25dbe1a237ae74c6bcc82...3af7569d3a1e776fc2a3c1cec133b43105ea9c2e
- vendor: golang.org/x/net e18ecbb051101a46fc263334b127c89bc7bff7ea
full diff: https://github.com/golang/net/compare/6772e930b67bb09bf22262c7378e7d2f67cf59d1...e18ecbb051101a46fc263334b127c89bc7bff7ea
- vendor: golang.org/x/sync 036812b2e83c0ddf193dd5a34e034151da389d09
full diff: https://github.com/golang/sync/compare/6e8e738ad208923de99951fe0b48239bfd864f28...036812b2e83c0ddf193dd5a34e034151da389d09
- ~vendor: google.golang.org/genproto
full diff: https://github.com/googleapis/go-genproto/compare/3f1135a288c9a07e340ae8ba4cc6c7065a3160e8...8816d57aaa9ad8cba31b2a8ecb6199c494bdf8b4~
- vendor: github.com/tonistiigi/fsutil 5dfbf5db66b9c8fb7befcab1722381de99ec0430
full diff: https://github.com/tonistiigi/fsutil/compare/0834f99b7b85462efb69b4f571a4fa3ca7da5ac9...d72af97c0eaf93c1d20360e3cb9c63c223675b83
| null | 2021-06-05 20:00:48+00:00 | 2021-06-17 01:56:25+00:00 | builder/builder-next/worker/worker.go | package worker
import (
"context"
"fmt"
"io"
"io/ioutil"
nethttp "net/http"
"runtime"
"strings"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/rootfs"
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
"github.com/docker/docker/distribution"
distmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
pkgprogress "github.com/docker/docker/pkg/progress"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/exporter"
localexporter "github.com/moby/buildkit/exporter/local"
tarexporter "github.com/moby/buildkit/exporter/tar"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver/mounts"
"github.com/moby/buildkit/solver/llbsolver/ops"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/source/git"
"github.com/moby/buildkit/source/http"
"github.com/moby/buildkit/source/local"
"github.com/moby/buildkit/util/archutil"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/progress"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
)
const labelCreatedAt = "buildkit/createdat"
// LayerAccess provides access to a moby layer from a snapshot
type LayerAccess interface {
GetDiffIDs(ctx context.Context, key string) ([]layer.DiffID, error)
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
}
// Opt defines a structure for creating a worker.
type Opt struct {
ID string
Labels map[string]string
GCPolicy []client.PruneInfo
MetadataStore *metadata.Store
Executor executor.Executor
Snapshotter snapshot.Snapshotter
ContentStore content.Store
CacheManager cache.Manager
ImageSource *containerimage.Source
DownloadManager distribution.RootFSDownloadManager
V2MetadataService distmetadata.V2MetadataService
Transport nethttp.RoundTripper
Exporter exporter.Exporter
Layers LayerAccess
Platforms []ocispec.Platform
}
// Worker is a local worker instance with dedicated snapshotter, cache, and so on.
// TODO: s/Worker/OpWorker/g ?
type Worker struct {
Opt
SourceManager *source.Manager
}
// NewWorker instantiates a local worker
func NewWorker(opt Opt) (*Worker, error) {
sm, err := source.NewManager()
if err != nil {
return nil, err
}
cm := opt.CacheManager
sm.Register(opt.ImageSource)
gs, err := git.NewSource(git.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(gs)
} else {
logrus.Warnf("Could not register builder git source: %s", err)
}
hs, err := http.NewSource(http.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
Transport: opt.Transport,
})
if err == nil {
sm.Register(hs)
} else {
logrus.Warnf("Could not register builder http source: %s", err)
}
ss, err := local.NewSource(local.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(ss)
} else {
logrus.Warnf("Could not register builder local source: %s", err)
}
return &Worker{
Opt: opt,
SourceManager: sm,
}, nil
}
// ID returns worker ID
func (w *Worker) ID() string {
return w.Opt.ID
}
// Labels returns map of all worker labels
func (w *Worker) Labels() map[string]string {
return w.Opt.Labels
}
// Platforms returns one or more platforms supported by the image.
func (w *Worker) Platforms(noCache bool) []ocispec.Platform {
if noCache {
pm := make(map[string]struct{}, len(w.Opt.Platforms))
for _, p := range w.Opt.Platforms {
pm[platforms.Format(p)] = struct{}{}
}
for _, p := range archutil.SupportedPlatforms(noCache) {
if _, ok := pm[p]; !ok {
pp, _ := platforms.Parse(p)
w.Opt.Platforms = append(w.Opt.Platforms, pp)
}
}
}
if len(w.Opt.Platforms) == 0 {
return []ocispec.Platform{platforms.DefaultSpec()}
}
return w.Opt.Platforms
}
// GCPolicy returns automatic GC Policy
func (w *Worker) GCPolicy() []client.PruneInfo {
return w.Opt.GCPolicy
}
// ContentStore returns content store
func (w *Worker) ContentStore() content.Store {
return w.Opt.ContentStore
}
// MetadataStore returns the metadata store
func (w *Worker) MetadataStore() *metadata.Store {
return w.Opt.MetadataStore
}
// LoadRef loads a reference by ID
func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) {
var opts []cache.RefOption
if hidden {
opts = append(opts, cache.NoUpdateLastUsed)
}
return w.CacheManager().Get(ctx, id, opts...)
}
// ResolveOp converts a LLB vertex into a LLB operation
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
if baseOp, ok := v.Sys().(*pb.Op); ok {
switch op := baseOp.Op.(type) {
case *pb.Op_Source:
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w)
case *pb.Op_Exec:
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), sm, w.Opt.MetadataStore, w.Executor(), w)
case *pb.Op_File:
return ops.NewFileOp(v, op, w.CacheManager(), w.Opt.MetadataStore, w)
case *pb.Op_Build:
return ops.NewBuildOp(v, op, s, w)
}
}
return nil, errors.Errorf("could not resolve %v", v)
}
// ResolveImageConfig returns image config for an image
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
}
// DiskUsage returns disk usage report
func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
return w.CacheManager().DiskUsage(ctx, opt)
}
// Prune deletes reclaimable build cache
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error {
return w.CacheManager().Prune(ctx, ch, info...)
}
// Exporter returns exporter by name
func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
switch name {
case "moby":
return w.Opt.Exporter, nil
case client.ExporterLocal:
return localexporter.New(localexporter.Opt{
SessionManager: sm,
})
case client.ExporterTar:
return tarexporter.New(tarexporter.Opt{
SessionManager: sm,
})
default:
return nil, errors.Errorf("exporter %q could not be found", name)
}
}
// GetRemote returns a remote snapshot reference for a local one
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool, _ compression.Type, _ session.Group) (*solver.Remote, error) {
var diffIDs []layer.DiffID
var err error
if !createIfNeeded {
diffIDs, err = w.Layers.GetDiffIDs(ctx, ref.ID())
if err != nil {
return nil, err
}
} else {
if err := ref.Finalize(ctx, true); err != nil {
return nil, err
}
diffIDs, err = w.Layers.EnsureLayer(ctx, ref.ID())
if err != nil {
return nil, err
}
}
descriptors := make([]ocispec.Descriptor, len(diffIDs))
for i, dgst := range diffIDs {
descriptors[i] = ocispec.Descriptor{
MediaType: images.MediaTypeDockerSchema2Layer,
Digest: digest.Digest(dgst),
Size: -1,
}
}
return &solver.Remote{
Descriptors: descriptors,
Provider: &emptyProvider{},
}, nil
}
// PruneCacheMounts removes the current cache snapshots for specified IDs
func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
mu := mounts.CacheMountsLocker()
mu.Lock()
defer mu.Unlock()
for _, id := range ids {
id = "cache-dir:" + id
sis, err := w.Opt.MetadataStore.Search(id)
if err != nil {
return err
}
for _, si := range sis {
for _, k := range si.Indexes() {
if k == id || strings.HasPrefix(k, id+":") {
if siCached := w.CacheManager().Metadata(si.ID()); siCached != nil {
si = siCached
}
if err := cache.CachePolicyDefault(si); err != nil {
return err
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, k, nil)
})
if err := si.Commit(); err != nil {
return err
}
// if ref is unused try to clean it up right away by releasing it
if mref, err := w.CacheManager().GetMutable(ctx, si.ID()); err == nil {
go mref.Release(context.TODO())
}
break
}
}
}
}
mounts.ClearActiveCacheMounts()
return nil
}
func (w *Worker) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
var parent cache.ImmutableRef
if len(diffIDs) > 1 {
var err error
parent, err = w.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
if err != nil {
return nil, err
}
defer parent.Release(context.TODO())
}
return w.CacheManager().GetByBlob(context.TODO(), ocispec.Descriptor{
Annotations: map[string]string{
"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
},
}, parent, opts...)
}
// FromRemote converts a remote snapshot reference to a local one
func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
rootfs, err := getLayers(ctx, remote.Descriptors)
if err != nil {
return nil, err
}
layers := make([]xfer.DownloadDescriptor, 0, len(rootfs))
for _, l := range rootfs {
// ongoing.add(desc)
layers = append(layers, &layerDescriptor{
desc: l.Blob,
diffID: layer.DiffID(l.Diff.Digest),
provider: remote.Provider,
w: w,
pctx: ctx,
})
}
defer func() {
for _, l := range rootfs {
w.ContentStore().Delete(context.TODO(), l.Blob.Digest)
}
}()
r := image.NewRootFS()
rootFS, release, err := w.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, &discardProgress{})
if err != nil {
return nil, err
}
defer release()
if len(rootFS.DiffIDs) != len(layers) {
return nil, errors.Errorf("invalid layer count mismatch %d vs %d", len(rootFS.DiffIDs), len(layers))
}
for i := range rootFS.DiffIDs {
tm := time.Now()
if tmstr, ok := remote.Descriptors[i].Annotations[labelCreatedAt]; ok {
if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil {
return nil, err
}
}
descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)
if v, ok := remote.Descriptors[i].Annotations["buildkit/description"]; ok {
descr = v
}
ref, err := w.getRef(ctx, rootFS.DiffIDs[:i+1], cache.WithDescription(descr), cache.WithCreationTime(tm))
if err != nil {
return nil, err
}
if i == len(remote.Descriptors)-1 {
return ref, nil
}
defer ref.Release(context.TODO())
}
return nil, errors.Errorf("unreachable")
}
// Executor returns executor.Executor for running processes
func (w *Worker) Executor() executor.Executor {
return w.Opt.Executor
}
// CacheManager returns cache.Manager for accessing local storage
func (w *Worker) CacheManager() cache.Manager {
return w.Opt.CacheManager
}
type discardProgress struct{}
func (*discardProgress) WriteProgress(_ pkgprogress.Progress) error {
return nil
}
// Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
type layerDescriptor struct {
provider content.Provider
desc ocispec.Descriptor
diffID layer.DiffID
// ref ctdreference.Spec
w *Worker
pctx context.Context
}
func (ld *layerDescriptor) Key() string {
return "v2:" + ld.desc.Digest.String()
}
func (ld *layerDescriptor) ID() string {
return ld.desc.Digest.String()
}
func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
return ld.diffID, nil
}
func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
done := oneOffProgress(ld.pctx, fmt.Sprintf("pulling %s", ld.desc.Digest))
// TODO should this write output to progressOutput? Or use something similar to loggerFromContext()? see https://github.com/moby/buildkit/commit/aa29e7729464f3c2a773e27795e584023c751cb8
discardLogs := func(_ []byte) {}
if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, discardLogs); err != nil {
return nil, 0, done(err)
}
_ = done(nil)
ra, err := ld.w.ContentStore().ReaderAt(ctx, ld.desc)
if err != nil {
return nil, 0, err
}
return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
}
func (ld *layerDescriptor) Close() {
// ld.is.ContentStore().Delete(context.TODO(), ld.desc.Digest)
}
func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
// Cache mapping from this layer's DiffID to the blobsum
ld.w.V2MetadataService.Add(diffID, distmetadata.V2Metadata{Digest: ld.desc.Digest})
}
func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, error) {
layers := make([]rootfs.Layer, len(descs))
for i, desc := range descs {
diffIDStr := desc.Annotations["containerd.io/uncompressed"]
if diffIDStr == "" {
return nil, errors.Errorf("%s missing uncompressed digest", desc.Digest)
}
diffID, err := digest.Parse(diffIDStr)
if err != nil {
return nil, err
}
layers[i].Diff = ocispec.Descriptor{
MediaType: ocispec.MediaTypeImageLayer,
Digest: diffID,
}
layers[i].Blob = ocispec.Descriptor{
MediaType: desc.MediaType,
Digest: desc.Digest,
Size: desc.Size,
}
}
return layers, nil
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
_ = pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
_ = pw.Write(id, st)
_ = pw.Close()
return err
}
}
type emptyProvider struct {
}
func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
return nil, errors.Errorf("ReaderAt not implemented for empty provider")
}
| package worker
import (
"context"
"fmt"
"io"
"io/ioutil"
nethttp "net/http"
"runtime"
"strings"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/rootfs"
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
"github.com/docker/docker/distribution"
distmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
pkgprogress "github.com/docker/docker/pkg/progress"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/exporter"
localexporter "github.com/moby/buildkit/exporter/local"
tarexporter "github.com/moby/buildkit/exporter/tar"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver/mounts"
"github.com/moby/buildkit/solver/llbsolver/ops"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/source/git"
"github.com/moby/buildkit/source/http"
"github.com/moby/buildkit/source/local"
"github.com/moby/buildkit/util/archutil"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/progress"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
"golang.org/x/sync/semaphore"
)
const labelCreatedAt = "buildkit/createdat"
// LayerAccess provides access to a moby layer from a snapshot
type LayerAccess interface {
GetDiffIDs(ctx context.Context, key string) ([]layer.DiffID, error)
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
}
// Opt defines a structure for creating a worker.
type Opt struct {
ID string
Labels map[string]string
GCPolicy []client.PruneInfo
MetadataStore *metadata.Store
Executor executor.Executor
Snapshotter snapshot.Snapshotter
ContentStore content.Store
CacheManager cache.Manager
ImageSource *containerimage.Source
DownloadManager distribution.RootFSDownloadManager
V2MetadataService distmetadata.V2MetadataService
Transport nethttp.RoundTripper
Exporter exporter.Exporter
Layers LayerAccess
Platforms []ocispec.Platform
}
// Worker is a local worker instance with dedicated snapshotter, cache, and so on.
// TODO: s/Worker/OpWorker/g ?
type Worker struct {
Opt
SourceManager *source.Manager
}
// NewWorker instantiates a local worker
func NewWorker(opt Opt) (*Worker, error) {
sm, err := source.NewManager()
if err != nil {
return nil, err
}
cm := opt.CacheManager
sm.Register(opt.ImageSource)
gs, err := git.NewSource(git.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(gs)
} else {
logrus.Warnf("Could not register builder git source: %s", err)
}
hs, err := http.NewSource(http.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
Transport: opt.Transport,
})
if err == nil {
sm.Register(hs)
} else {
logrus.Warnf("Could not register builder http source: %s", err)
}
ss, err := local.NewSource(local.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(ss)
} else {
logrus.Warnf("Could not register builder local source: %s", err)
}
return &Worker{
Opt: opt,
SourceManager: sm,
}, nil
}
// ID returns worker ID
func (w *Worker) ID() string {
return w.Opt.ID
}
// Labels returns map of all worker labels
func (w *Worker) Labels() map[string]string {
return w.Opt.Labels
}
// Platforms returns one or more platforms supported by the image.
func (w *Worker) Platforms(noCache bool) []ocispec.Platform {
if noCache {
pm := make(map[string]struct{}, len(w.Opt.Platforms))
for _, p := range w.Opt.Platforms {
pm[platforms.Format(p)] = struct{}{}
}
for _, p := range archutil.SupportedPlatforms(noCache) {
if _, ok := pm[p]; !ok {
pp, _ := platforms.Parse(p)
w.Opt.Platforms = append(w.Opt.Platforms, pp)
}
}
}
if len(w.Opt.Platforms) == 0 {
return []ocispec.Platform{platforms.DefaultSpec()}
}
return w.Opt.Platforms
}
// GCPolicy returns automatic GC Policy
func (w *Worker) GCPolicy() []client.PruneInfo {
return w.Opt.GCPolicy
}
// ContentStore returns content store
func (w *Worker) ContentStore() content.Store {
return w.Opt.ContentStore
}
// MetadataStore returns the metadata store
func (w *Worker) MetadataStore() *metadata.Store {
return w.Opt.MetadataStore
}
// LoadRef loads a reference by ID
func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) {
var opts []cache.RefOption
if hidden {
opts = append(opts, cache.NoUpdateLastUsed)
}
return w.CacheManager().Get(ctx, id, opts...)
}
// ResolveOp converts a LLB vertex into a LLB operation
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
if baseOp, ok := v.Sys().(*pb.Op); ok {
// TODO do we need to pass a value here? Where should it come from? https://github.com/moby/buildkit/commit/b3cf7c43cfefdfd7a945002c0e76b54e346ab6cf
var parallelism *semaphore.Weighted
switch op := baseOp.Op.(type) {
case *pb.Op_Source:
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, parallelism, sm, w)
case *pb.Op_Exec:
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), parallelism, sm, w.Opt.MetadataStore, w.Executor(), w)
case *pb.Op_File:
return ops.NewFileOp(v, op, w.CacheManager(), parallelism, w.Opt.MetadataStore, w)
case *pb.Op_Build:
return ops.NewBuildOp(v, op, s, w)
}
}
return nil, errors.Errorf("could not resolve %v", v)
}
// ResolveImageConfig returns image config for an image
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
}
// DiskUsage returns disk usage report
func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
return w.CacheManager().DiskUsage(ctx, opt)
}
// Prune deletes reclaimable build cache
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error {
return w.CacheManager().Prune(ctx, ch, info...)
}
// Exporter returns exporter by name
func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
switch name {
case "moby":
return w.Opt.Exporter, nil
case client.ExporterLocal:
return localexporter.New(localexporter.Opt{
SessionManager: sm,
})
case client.ExporterTar:
return tarexporter.New(tarexporter.Opt{
SessionManager: sm,
})
default:
return nil, errors.Errorf("exporter %q could not be found", name)
}
}
// GetRemote returns a remote snapshot reference for a local one
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool, _ compression.Type, _ session.Group) (*solver.Remote, error) {
var diffIDs []layer.DiffID
var err error
if !createIfNeeded {
diffIDs, err = w.Layers.GetDiffIDs(ctx, ref.ID())
if err != nil {
return nil, err
}
} else {
if err := ref.Finalize(ctx, true); err != nil {
return nil, err
}
diffIDs, err = w.Layers.EnsureLayer(ctx, ref.ID())
if err != nil {
return nil, err
}
}
descriptors := make([]ocispec.Descriptor, len(diffIDs))
for i, dgst := range diffIDs {
descriptors[i] = ocispec.Descriptor{
MediaType: images.MediaTypeDockerSchema2Layer,
Digest: digest.Digest(dgst),
Size: -1,
}
}
return &solver.Remote{
Descriptors: descriptors,
Provider: &emptyProvider{},
}, nil
}
// PruneCacheMounts removes the current cache snapshots for specified IDs
func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
mu := mounts.CacheMountsLocker()
mu.Lock()
defer mu.Unlock()
for _, id := range ids {
id = "cache-dir:" + id
sis, err := w.Opt.MetadataStore.Search(id)
if err != nil {
return err
}
for _, si := range sis {
for _, k := range si.Indexes() {
if k == id || strings.HasPrefix(k, id+":") {
if siCached := w.CacheManager().Metadata(si.ID()); siCached != nil {
si = siCached
}
if err := cache.CachePolicyDefault(si); err != nil {
return err
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, k, nil)
})
if err := si.Commit(); err != nil {
return err
}
// if ref is unused try to clean it up right away by releasing it
if mref, err := w.CacheManager().GetMutable(ctx, si.ID()); err == nil {
go mref.Release(context.TODO())
}
break
}
}
}
}
mounts.ClearActiveCacheMounts()
return nil
}
func (w *Worker) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
var parent cache.ImmutableRef
if len(diffIDs) > 1 {
var err error
parent, err = w.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
if err != nil {
return nil, err
}
defer parent.Release(context.TODO())
}
return w.CacheManager().GetByBlob(context.TODO(), ocispec.Descriptor{
Annotations: map[string]string{
"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
},
}, parent, opts...)
}
// FromRemote converts a remote snapshot reference to a local one
func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
rootfs, err := getLayers(ctx, remote.Descriptors)
if err != nil {
return nil, err
}
layers := make([]xfer.DownloadDescriptor, 0, len(rootfs))
for _, l := range rootfs {
// ongoing.add(desc)
layers = append(layers, &layerDescriptor{
desc: l.Blob,
diffID: layer.DiffID(l.Diff.Digest),
provider: remote.Provider,
w: w,
pctx: ctx,
})
}
defer func() {
for _, l := range rootfs {
w.ContentStore().Delete(context.TODO(), l.Blob.Digest)
}
}()
r := image.NewRootFS()
rootFS, release, err := w.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, &discardProgress{})
if err != nil {
return nil, err
}
defer release()
if len(rootFS.DiffIDs) != len(layers) {
return nil, errors.Errorf("invalid layer count mismatch %d vs %d", len(rootFS.DiffIDs), len(layers))
}
for i := range rootFS.DiffIDs {
tm := time.Now()
if tmstr, ok := remote.Descriptors[i].Annotations[labelCreatedAt]; ok {
if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil {
return nil, err
}
}
descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)
if v, ok := remote.Descriptors[i].Annotations["buildkit/description"]; ok {
descr = v
}
ref, err := w.getRef(ctx, rootFS.DiffIDs[:i+1], cache.WithDescription(descr), cache.WithCreationTime(tm))
if err != nil {
return nil, err
}
if i == len(remote.Descriptors)-1 {
return ref, nil
}
defer ref.Release(context.TODO())
}
return nil, errors.Errorf("unreachable")
}
// Executor returns executor.Executor for running processes
func (w *Worker) Executor() executor.Executor {
return w.Opt.Executor
}
// CacheManager returns cache.Manager for accessing local storage
func (w *Worker) CacheManager() cache.Manager {
return w.Opt.CacheManager
}
type discardProgress struct{}
func (*discardProgress) WriteProgress(_ pkgprogress.Progress) error {
return nil
}
// Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
type layerDescriptor struct {
provider content.Provider
desc ocispec.Descriptor
diffID layer.DiffID
// ref ctdreference.Spec
w *Worker
pctx context.Context
}
func (ld *layerDescriptor) Key() string {
return "v2:" + ld.desc.Digest.String()
}
func (ld *layerDescriptor) ID() string {
return ld.desc.Digest.String()
}
func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
return ld.diffID, nil
}
func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
done := oneOffProgress(ld.pctx, fmt.Sprintf("pulling %s", ld.desc.Digest))
// TODO should this write output to progressOutput? Or use something similar to loggerFromContext()? see https://github.com/moby/buildkit/commit/aa29e7729464f3c2a773e27795e584023c751cb8
discardLogs := func(_ []byte) {}
if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, discardLogs); err != nil {
return nil, 0, done(err)
}
_ = done(nil)
ra, err := ld.w.ContentStore().ReaderAt(ctx, ld.desc)
if err != nil {
return nil, 0, err
}
return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
}
func (ld *layerDescriptor) Close() {
// ld.is.ContentStore().Delete(context.TODO(), ld.desc.Digest)
}
func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
// Cache mapping from this layer's DiffID to the blobsum
ld.w.V2MetadataService.Add(diffID, distmetadata.V2Metadata{Digest: ld.desc.Digest})
}
func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, error) {
layers := make([]rootfs.Layer, len(descs))
for i, desc := range descs {
diffIDStr := desc.Annotations["containerd.io/uncompressed"]
if diffIDStr == "" {
return nil, errors.Errorf("%s missing uncompressed digest", desc.Digest)
}
diffID, err := digest.Parse(diffIDStr)
if err != nil {
return nil, err
}
layers[i].Diff = ocispec.Descriptor{
MediaType: ocispec.MediaTypeImageLayer,
Digest: diffID,
}
layers[i].Blob = ocispec.Descriptor{
MediaType: desc.MediaType,
Digest: desc.Digest,
Size: desc.Size,
}
}
return layers, nil
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
_ = pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
_ = pw.Write(id, st)
_ = pw.Close()
return err
}
}
type emptyProvider struct {
}
func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
return nil, errors.Errorf("ReaderAt not implemented for empty provider")
}
| thaJeztah | 2773f81aa5e9e34733675a7aa7e4219391caccb0 | 9e8cf1016ead200aea31d7ff381ba2ed01416ddb | @tonistiigi @AkihiroSuda I could use some help here; this relates to https://github.com/moby/buildkit/pull/2049. Is there any value I should pass here? If so; where do I get this from? | thaJeztah | 4,728 |
moby/moby | 42,473 | revendor BuildKit (master branch) | relates to https://github.com/moby/moby/issues/42471
depends on:
- [x] https://github.com/moby/buildkit/pull/2150
- [x] https://github.com/moby/buildkit/pull/2151
Updates:
- vendor: golang.org/x/crypto 0c34fe9e7dc2486962ef9867e3edb3503537209f
full diff: https://github.com/golang/crypto/compare/c1f2f97bffc9c53fc40a1a28a5b460094c0050d9...0c34fe9e7dc2486962ef9867e3edb3503537209f
- vendor: golang.org/x/time 3af7569d3a1e776fc2a3c1cec133b43105ea9c2e
full diff: https://github.com/golang/time/compare/555d28b269f0569763d25dbe1a237ae74c6bcc82...3af7569d3a1e776fc2a3c1cec133b43105ea9c2e
- vendor: golang.org/x/net e18ecbb051101a46fc263334b127c89bc7bff7ea
full diff: https://github.com/golang/net/compare/6772e930b67bb09bf22262c7378e7d2f67cf59d1...e18ecbb051101a46fc263334b127c89bc7bff7ea
- vendor: golang.org/x/sync 036812b2e83c0ddf193dd5a34e034151da389d09
full diff: https://github.com/golang/sync/compare/6e8e738ad208923de99951fe0b48239bfd864f28...036812b2e83c0ddf193dd5a34e034151da389d09
- ~vendor: google.golang.org/genproto
full diff: https://github.com/googleapis/go-genproto/compare/3f1135a288c9a07e340ae8ba4cc6c7065a3160e8...8816d57aaa9ad8cba31b2a8ecb6199c494bdf8b4~
- vendor: github.com/tonistiigi/fsutil 5dfbf5db66b9c8fb7befcab1722381de99ec0430
full diff: https://github.com/tonistiigi/fsutil/compare/0834f99b7b85462efb69b4f571a4fa3ca7da5ac9...d72af97c0eaf93c1d20360e3cb9c63c223675b83
| null | 2021-06-05 20:00:48+00:00 | 2021-06-17 01:56:25+00:00 | builder/builder-next/worker/worker.go | package worker
import (
"context"
"fmt"
"io"
"io/ioutil"
nethttp "net/http"
"runtime"
"strings"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/rootfs"
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
"github.com/docker/docker/distribution"
distmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
pkgprogress "github.com/docker/docker/pkg/progress"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/exporter"
localexporter "github.com/moby/buildkit/exporter/local"
tarexporter "github.com/moby/buildkit/exporter/tar"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver/mounts"
"github.com/moby/buildkit/solver/llbsolver/ops"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/source/git"
"github.com/moby/buildkit/source/http"
"github.com/moby/buildkit/source/local"
"github.com/moby/buildkit/util/archutil"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/progress"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
)
const labelCreatedAt = "buildkit/createdat"
// LayerAccess provides access to a moby layer from a snapshot
type LayerAccess interface {
GetDiffIDs(ctx context.Context, key string) ([]layer.DiffID, error)
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
}
// Opt defines a structure for creating a worker.
type Opt struct {
ID string
Labels map[string]string
GCPolicy []client.PruneInfo
MetadataStore *metadata.Store
Executor executor.Executor
Snapshotter snapshot.Snapshotter
ContentStore content.Store
CacheManager cache.Manager
ImageSource *containerimage.Source
DownloadManager distribution.RootFSDownloadManager
V2MetadataService distmetadata.V2MetadataService
Transport nethttp.RoundTripper
Exporter exporter.Exporter
Layers LayerAccess
Platforms []ocispec.Platform
}
// Worker is a local worker instance with dedicated snapshotter, cache, and so on.
// TODO: s/Worker/OpWorker/g ?
type Worker struct {
Opt
SourceManager *source.Manager
}
// NewWorker instantiates a local worker
func NewWorker(opt Opt) (*Worker, error) {
sm, err := source.NewManager()
if err != nil {
return nil, err
}
cm := opt.CacheManager
sm.Register(opt.ImageSource)
gs, err := git.NewSource(git.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(gs)
} else {
logrus.Warnf("Could not register builder git source: %s", err)
}
hs, err := http.NewSource(http.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
Transport: opt.Transport,
})
if err == nil {
sm.Register(hs)
} else {
logrus.Warnf("Could not register builder http source: %s", err)
}
ss, err := local.NewSource(local.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(ss)
} else {
logrus.Warnf("Could not register builder local source: %s", err)
}
return &Worker{
Opt: opt,
SourceManager: sm,
}, nil
}
// ID returns worker ID
func (w *Worker) ID() string {
return w.Opt.ID
}
// Labels returns map of all worker labels
func (w *Worker) Labels() map[string]string {
return w.Opt.Labels
}
// Platforms returns one or more platforms supported by the image.
func (w *Worker) Platforms(noCache bool) []ocispec.Platform {
if noCache {
pm := make(map[string]struct{}, len(w.Opt.Platforms))
for _, p := range w.Opt.Platforms {
pm[platforms.Format(p)] = struct{}{}
}
for _, p := range archutil.SupportedPlatforms(noCache) {
if _, ok := pm[p]; !ok {
pp, _ := platforms.Parse(p)
w.Opt.Platforms = append(w.Opt.Platforms, pp)
}
}
}
if len(w.Opt.Platforms) == 0 {
return []ocispec.Platform{platforms.DefaultSpec()}
}
return w.Opt.Platforms
}
// GCPolicy returns automatic GC Policy
func (w *Worker) GCPolicy() []client.PruneInfo {
return w.Opt.GCPolicy
}
// ContentStore returns content store
func (w *Worker) ContentStore() content.Store {
return w.Opt.ContentStore
}
// MetadataStore returns the metadata store
func (w *Worker) MetadataStore() *metadata.Store {
return w.Opt.MetadataStore
}
// LoadRef loads a reference by ID
func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) {
var opts []cache.RefOption
if hidden {
opts = append(opts, cache.NoUpdateLastUsed)
}
return w.CacheManager().Get(ctx, id, opts...)
}
// ResolveOp converts a LLB vertex into a LLB operation
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
if baseOp, ok := v.Sys().(*pb.Op); ok {
switch op := baseOp.Op.(type) {
case *pb.Op_Source:
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w)
case *pb.Op_Exec:
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), sm, w.Opt.MetadataStore, w.Executor(), w)
case *pb.Op_File:
return ops.NewFileOp(v, op, w.CacheManager(), w.Opt.MetadataStore, w)
case *pb.Op_Build:
return ops.NewBuildOp(v, op, s, w)
}
}
return nil, errors.Errorf("could not resolve %v", v)
}
// ResolveImageConfig returns image config for an image
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
}
// DiskUsage returns disk usage report
func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
return w.CacheManager().DiskUsage(ctx, opt)
}
// Prune deletes reclaimable build cache
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error {
return w.CacheManager().Prune(ctx, ch, info...)
}
// Exporter returns exporter by name
func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
switch name {
case "moby":
return w.Opt.Exporter, nil
case client.ExporterLocal:
return localexporter.New(localexporter.Opt{
SessionManager: sm,
})
case client.ExporterTar:
return tarexporter.New(tarexporter.Opt{
SessionManager: sm,
})
default:
return nil, errors.Errorf("exporter %q could not be found", name)
}
}
// GetRemote returns a remote snapshot reference for a local one
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool, _ compression.Type, _ session.Group) (*solver.Remote, error) {
var diffIDs []layer.DiffID
var err error
if !createIfNeeded {
diffIDs, err = w.Layers.GetDiffIDs(ctx, ref.ID())
if err != nil {
return nil, err
}
} else {
if err := ref.Finalize(ctx, true); err != nil {
return nil, err
}
diffIDs, err = w.Layers.EnsureLayer(ctx, ref.ID())
if err != nil {
return nil, err
}
}
descriptors := make([]ocispec.Descriptor, len(diffIDs))
for i, dgst := range diffIDs {
descriptors[i] = ocispec.Descriptor{
MediaType: images.MediaTypeDockerSchema2Layer,
Digest: digest.Digest(dgst),
Size: -1,
}
}
return &solver.Remote{
Descriptors: descriptors,
Provider: &emptyProvider{},
}, nil
}
// PruneCacheMounts removes the current cache snapshots for specified IDs
func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
mu := mounts.CacheMountsLocker()
mu.Lock()
defer mu.Unlock()
for _, id := range ids {
id = "cache-dir:" + id
sis, err := w.Opt.MetadataStore.Search(id)
if err != nil {
return err
}
for _, si := range sis {
for _, k := range si.Indexes() {
if k == id || strings.HasPrefix(k, id+":") {
if siCached := w.CacheManager().Metadata(si.ID()); siCached != nil {
si = siCached
}
if err := cache.CachePolicyDefault(si); err != nil {
return err
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, k, nil)
})
if err := si.Commit(); err != nil {
return err
}
// if ref is unused try to clean it up right away by releasing it
if mref, err := w.CacheManager().GetMutable(ctx, si.ID()); err == nil {
go mref.Release(context.TODO())
}
break
}
}
}
}
mounts.ClearActiveCacheMounts()
return nil
}
func (w *Worker) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
var parent cache.ImmutableRef
if len(diffIDs) > 1 {
var err error
parent, err = w.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
if err != nil {
return nil, err
}
defer parent.Release(context.TODO())
}
return w.CacheManager().GetByBlob(context.TODO(), ocispec.Descriptor{
Annotations: map[string]string{
"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
},
}, parent, opts...)
}
// FromRemote converts a remote snapshot reference to a local one
func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
rootfs, err := getLayers(ctx, remote.Descriptors)
if err != nil {
return nil, err
}
layers := make([]xfer.DownloadDescriptor, 0, len(rootfs))
for _, l := range rootfs {
// ongoing.add(desc)
layers = append(layers, &layerDescriptor{
desc: l.Blob,
diffID: layer.DiffID(l.Diff.Digest),
provider: remote.Provider,
w: w,
pctx: ctx,
})
}
defer func() {
for _, l := range rootfs {
w.ContentStore().Delete(context.TODO(), l.Blob.Digest)
}
}()
r := image.NewRootFS()
rootFS, release, err := w.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, &discardProgress{})
if err != nil {
return nil, err
}
defer release()
if len(rootFS.DiffIDs) != len(layers) {
return nil, errors.Errorf("invalid layer count mismatch %d vs %d", len(rootFS.DiffIDs), len(layers))
}
for i := range rootFS.DiffIDs {
tm := time.Now()
if tmstr, ok := remote.Descriptors[i].Annotations[labelCreatedAt]; ok {
if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil {
return nil, err
}
}
descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)
if v, ok := remote.Descriptors[i].Annotations["buildkit/description"]; ok {
descr = v
}
ref, err := w.getRef(ctx, rootFS.DiffIDs[:i+1], cache.WithDescription(descr), cache.WithCreationTime(tm))
if err != nil {
return nil, err
}
if i == len(remote.Descriptors)-1 {
return ref, nil
}
defer ref.Release(context.TODO())
}
return nil, errors.Errorf("unreachable")
}
// Executor returns executor.Executor for running processes
func (w *Worker) Executor() executor.Executor {
return w.Opt.Executor
}
// CacheManager returns cache.Manager for accessing local storage
func (w *Worker) CacheManager() cache.Manager {
return w.Opt.CacheManager
}
type discardProgress struct{}
func (*discardProgress) WriteProgress(_ pkgprogress.Progress) error {
return nil
}
// Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
type layerDescriptor struct {
provider content.Provider
desc ocispec.Descriptor
diffID layer.DiffID
// ref ctdreference.Spec
w *Worker
pctx context.Context
}
func (ld *layerDescriptor) Key() string {
return "v2:" + ld.desc.Digest.String()
}
func (ld *layerDescriptor) ID() string {
return ld.desc.Digest.String()
}
func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
return ld.diffID, nil
}
func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
done := oneOffProgress(ld.pctx, fmt.Sprintf("pulling %s", ld.desc.Digest))
// TODO should this write output to progressOutput? Or use something similar to loggerFromContext()? see https://github.com/moby/buildkit/commit/aa29e7729464f3c2a773e27795e584023c751cb8
discardLogs := func(_ []byte) {}
if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, discardLogs); err != nil {
return nil, 0, done(err)
}
_ = done(nil)
ra, err := ld.w.ContentStore().ReaderAt(ctx, ld.desc)
if err != nil {
return nil, 0, err
}
return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
}
func (ld *layerDescriptor) Close() {
// ld.is.ContentStore().Delete(context.TODO(), ld.desc.Digest)
}
func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
// Cache mapping from this layer's DiffID to the blobsum
ld.w.V2MetadataService.Add(diffID, distmetadata.V2Metadata{Digest: ld.desc.Digest})
}
func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, error) {
layers := make([]rootfs.Layer, len(descs))
for i, desc := range descs {
diffIDStr := desc.Annotations["containerd.io/uncompressed"]
if diffIDStr == "" {
return nil, errors.Errorf("%s missing uncompressed digest", desc.Digest)
}
diffID, err := digest.Parse(diffIDStr)
if err != nil {
return nil, err
}
layers[i].Diff = ocispec.Descriptor{
MediaType: ocispec.MediaTypeImageLayer,
Digest: diffID,
}
layers[i].Blob = ocispec.Descriptor{
MediaType: desc.MediaType,
Digest: desc.Digest,
Size: desc.Size,
}
}
return layers, nil
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
_ = pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
_ = pw.Write(id, st)
_ = pw.Close()
return err
}
}
type emptyProvider struct {
}
func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
return nil, errors.Errorf("ReaderAt not implemented for empty provider")
}
| package worker
import (
"context"
"fmt"
"io"
"io/ioutil"
nethttp "net/http"
"runtime"
"strings"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/rootfs"
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
"github.com/docker/docker/distribution"
distmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
pkgprogress "github.com/docker/docker/pkg/progress"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/exporter"
localexporter "github.com/moby/buildkit/exporter/local"
tarexporter "github.com/moby/buildkit/exporter/tar"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver/mounts"
"github.com/moby/buildkit/solver/llbsolver/ops"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/source/git"
"github.com/moby/buildkit/source/http"
"github.com/moby/buildkit/source/local"
"github.com/moby/buildkit/util/archutil"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/progress"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
"golang.org/x/sync/semaphore"
)
const labelCreatedAt = "buildkit/createdat"
// LayerAccess provides access to a moby layer from a snapshot
type LayerAccess interface {
GetDiffIDs(ctx context.Context, key string) ([]layer.DiffID, error)
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
}
// Opt defines a structure for creating a worker.
type Opt struct {
ID string
Labels map[string]string
GCPolicy []client.PruneInfo
MetadataStore *metadata.Store
Executor executor.Executor
Snapshotter snapshot.Snapshotter
ContentStore content.Store
CacheManager cache.Manager
ImageSource *containerimage.Source
DownloadManager distribution.RootFSDownloadManager
V2MetadataService distmetadata.V2MetadataService
Transport nethttp.RoundTripper
Exporter exporter.Exporter
Layers LayerAccess
Platforms []ocispec.Platform
}
// Worker is a local worker instance with dedicated snapshotter, cache, and so on.
// TODO: s/Worker/OpWorker/g ?
type Worker struct {
Opt
SourceManager *source.Manager
}
// NewWorker instantiates a local worker
func NewWorker(opt Opt) (*Worker, error) {
sm, err := source.NewManager()
if err != nil {
return nil, err
}
cm := opt.CacheManager
sm.Register(opt.ImageSource)
gs, err := git.NewSource(git.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(gs)
} else {
logrus.Warnf("Could not register builder git source: %s", err)
}
hs, err := http.NewSource(http.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
Transport: opt.Transport,
})
if err == nil {
sm.Register(hs)
} else {
logrus.Warnf("Could not register builder http source: %s", err)
}
ss, err := local.NewSource(local.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(ss)
} else {
logrus.Warnf("Could not register builder local source: %s", err)
}
return &Worker{
Opt: opt,
SourceManager: sm,
}, nil
}
// ID returns worker ID
func (w *Worker) ID() string {
return w.Opt.ID
}
// Labels returns map of all worker labels
func (w *Worker) Labels() map[string]string {
return w.Opt.Labels
}
// Platforms returns one or more platforms supported by the image.
func (w *Worker) Platforms(noCache bool) []ocispec.Platform {
if noCache {
pm := make(map[string]struct{}, len(w.Opt.Platforms))
for _, p := range w.Opt.Platforms {
pm[platforms.Format(p)] = struct{}{}
}
for _, p := range archutil.SupportedPlatforms(noCache) {
if _, ok := pm[p]; !ok {
pp, _ := platforms.Parse(p)
w.Opt.Platforms = append(w.Opt.Platforms, pp)
}
}
}
if len(w.Opt.Platforms) == 0 {
return []ocispec.Platform{platforms.DefaultSpec()}
}
return w.Opt.Platforms
}
// GCPolicy returns automatic GC Policy
func (w *Worker) GCPolicy() []client.PruneInfo {
return w.Opt.GCPolicy
}
// ContentStore returns content store
func (w *Worker) ContentStore() content.Store {
return w.Opt.ContentStore
}
// MetadataStore returns the metadata store
func (w *Worker) MetadataStore() *metadata.Store {
return w.Opt.MetadataStore
}
// LoadRef loads a reference by ID
func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) {
var opts []cache.RefOption
if hidden {
opts = append(opts, cache.NoUpdateLastUsed)
}
return w.CacheManager().Get(ctx, id, opts...)
}
// ResolveOp converts a LLB vertex into a LLB operation
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
if baseOp, ok := v.Sys().(*pb.Op); ok {
// TODO do we need to pass a value here? Where should it come from? https://github.com/moby/buildkit/commit/b3cf7c43cfefdfd7a945002c0e76b54e346ab6cf
var parallelism *semaphore.Weighted
switch op := baseOp.Op.(type) {
case *pb.Op_Source:
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, parallelism, sm, w)
case *pb.Op_Exec:
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), parallelism, sm, w.Opt.MetadataStore, w.Executor(), w)
case *pb.Op_File:
return ops.NewFileOp(v, op, w.CacheManager(), parallelism, w.Opt.MetadataStore, w)
case *pb.Op_Build:
return ops.NewBuildOp(v, op, s, w)
}
}
return nil, errors.Errorf("could not resolve %v", v)
}
// ResolveImageConfig returns image config for an image
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
}
// DiskUsage returns disk usage report
func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
return w.CacheManager().DiskUsage(ctx, opt)
}
// Prune deletes reclaimable build cache
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error {
return w.CacheManager().Prune(ctx, ch, info...)
}
// Exporter returns exporter by name
func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
switch name {
case "moby":
return w.Opt.Exporter, nil
case client.ExporterLocal:
return localexporter.New(localexporter.Opt{
SessionManager: sm,
})
case client.ExporterTar:
return tarexporter.New(tarexporter.Opt{
SessionManager: sm,
})
default:
return nil, errors.Errorf("exporter %q could not be found", name)
}
}
// GetRemote returns a remote snapshot reference for a local one
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool, _ compression.Type, _ session.Group) (*solver.Remote, error) {
var diffIDs []layer.DiffID
var err error
if !createIfNeeded {
diffIDs, err = w.Layers.GetDiffIDs(ctx, ref.ID())
if err != nil {
return nil, err
}
} else {
if err := ref.Finalize(ctx, true); err != nil {
return nil, err
}
diffIDs, err = w.Layers.EnsureLayer(ctx, ref.ID())
if err != nil {
return nil, err
}
}
descriptors := make([]ocispec.Descriptor, len(diffIDs))
for i, dgst := range diffIDs {
descriptors[i] = ocispec.Descriptor{
MediaType: images.MediaTypeDockerSchema2Layer,
Digest: digest.Digest(dgst),
Size: -1,
}
}
return &solver.Remote{
Descriptors: descriptors,
Provider: &emptyProvider{},
}, nil
}
// PruneCacheMounts removes the current cache snapshots for specified IDs
func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
mu := mounts.CacheMountsLocker()
mu.Lock()
defer mu.Unlock()
for _, id := range ids {
id = "cache-dir:" + id
sis, err := w.Opt.MetadataStore.Search(id)
if err != nil {
return err
}
for _, si := range sis {
for _, k := range si.Indexes() {
if k == id || strings.HasPrefix(k, id+":") {
if siCached := w.CacheManager().Metadata(si.ID()); siCached != nil {
si = siCached
}
if err := cache.CachePolicyDefault(si); err != nil {
return err
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, k, nil)
})
if err := si.Commit(); err != nil {
return err
}
// if ref is unused try to clean it up right away by releasing it
if mref, err := w.CacheManager().GetMutable(ctx, si.ID()); err == nil {
go mref.Release(context.TODO())
}
break
}
}
}
}
mounts.ClearActiveCacheMounts()
return nil
}
func (w *Worker) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
var parent cache.ImmutableRef
if len(diffIDs) > 1 {
var err error
parent, err = w.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
if err != nil {
return nil, err
}
defer parent.Release(context.TODO())
}
return w.CacheManager().GetByBlob(context.TODO(), ocispec.Descriptor{
Annotations: map[string]string{
"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
},
}, parent, opts...)
}
// FromRemote converts a remote snapshot reference to a local one
func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
rootfs, err := getLayers(ctx, remote.Descriptors)
if err != nil {
return nil, err
}
layers := make([]xfer.DownloadDescriptor, 0, len(rootfs))
for _, l := range rootfs {
// ongoing.add(desc)
layers = append(layers, &layerDescriptor{
desc: l.Blob,
diffID: layer.DiffID(l.Diff.Digest),
provider: remote.Provider,
w: w,
pctx: ctx,
})
}
defer func() {
for _, l := range rootfs {
w.ContentStore().Delete(context.TODO(), l.Blob.Digest)
}
}()
r := image.NewRootFS()
rootFS, release, err := w.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, &discardProgress{})
if err != nil {
return nil, err
}
defer release()
if len(rootFS.DiffIDs) != len(layers) {
return nil, errors.Errorf("invalid layer count mismatch %d vs %d", len(rootFS.DiffIDs), len(layers))
}
for i := range rootFS.DiffIDs {
tm := time.Now()
if tmstr, ok := remote.Descriptors[i].Annotations[labelCreatedAt]; ok {
if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil {
return nil, err
}
}
descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)
if v, ok := remote.Descriptors[i].Annotations["buildkit/description"]; ok {
descr = v
}
ref, err := w.getRef(ctx, rootFS.DiffIDs[:i+1], cache.WithDescription(descr), cache.WithCreationTime(tm))
if err != nil {
return nil, err
}
if i == len(remote.Descriptors)-1 {
return ref, nil
}
defer ref.Release(context.TODO())
}
return nil, errors.Errorf("unreachable")
}
// Executor returns executor.Executor for running processes
func (w *Worker) Executor() executor.Executor {
return w.Opt.Executor
}
// CacheManager returns cache.Manager for accessing local storage
func (w *Worker) CacheManager() cache.Manager {
return w.Opt.CacheManager
}
type discardProgress struct{}
func (*discardProgress) WriteProgress(_ pkgprogress.Progress) error {
return nil
}
// Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
type layerDescriptor struct {
provider content.Provider
desc ocispec.Descriptor
diffID layer.DiffID
// ref ctdreference.Spec
w *Worker
pctx context.Context
}
func (ld *layerDescriptor) Key() string {
return "v2:" + ld.desc.Digest.String()
}
func (ld *layerDescriptor) ID() string {
return ld.desc.Digest.String()
}
func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
return ld.diffID, nil
}
func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
done := oneOffProgress(ld.pctx, fmt.Sprintf("pulling %s", ld.desc.Digest))
// TODO should this write output to progressOutput? Or use something similar to loggerFromContext()? see https://github.com/moby/buildkit/commit/aa29e7729464f3c2a773e27795e584023c751cb8
discardLogs := func(_ []byte) {}
if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, discardLogs); err != nil {
return nil, 0, done(err)
}
_ = done(nil)
ra, err := ld.w.ContentStore().ReaderAt(ctx, ld.desc)
if err != nil {
return nil, 0, err
}
return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
}
func (ld *layerDescriptor) Close() {
// ld.is.ContentStore().Delete(context.TODO(), ld.desc.Digest)
}
func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
// Cache mapping from this layer's DiffID to the blobsum
ld.w.V2MetadataService.Add(diffID, distmetadata.V2Metadata{Digest: ld.desc.Digest})
}
func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, error) {
layers := make([]rootfs.Layer, len(descs))
for i, desc := range descs {
diffIDStr := desc.Annotations["containerd.io/uncompressed"]
if diffIDStr == "" {
return nil, errors.Errorf("%s missing uncompressed digest", desc.Digest)
}
diffID, err := digest.Parse(diffIDStr)
if err != nil {
return nil, err
}
layers[i].Diff = ocispec.Descriptor{
MediaType: ocispec.MediaTypeImageLayer,
Digest: diffID,
}
layers[i].Blob = ocispec.Descriptor{
MediaType: desc.MediaType,
Digest: desc.Digest,
Size: desc.Size,
}
}
return layers, nil
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
_ = pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
_ = pw.Write(id, st)
_ = pw.Close()
return err
}
}
type emptyProvider struct {
}
func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
return nil, errors.Errorf("ReaderAt not implemented for empty provider")
}
| thaJeztah | 2773f81aa5e9e34733675a7aa7e4219391caccb0 | 9e8cf1016ead200aea31d7ff381ba2ed01416ddb | Should be safe to ignore and pass nil. Later you may connect this to dockerd configuration option. | tonistiigi | 4,729 |
moby/moby | 42,473 | revendor BuildKit (master branch) | relates to https://github.com/moby/moby/issues/42471
depends on:
- [x] https://github.com/moby/buildkit/pull/2150
- [x] https://github.com/moby/buildkit/pull/2151
Updates:
- vendor: golang.org/x/crypto 0c34fe9e7dc2486962ef9867e3edb3503537209f
full diff: https://github.com/golang/crypto/compare/c1f2f97bffc9c53fc40a1a28a5b460094c0050d9...0c34fe9e7dc2486962ef9867e3edb3503537209f
- vendor: golang.org/x/time 3af7569d3a1e776fc2a3c1cec133b43105ea9c2e
full diff: https://github.com/golang/time/compare/555d28b269f0569763d25dbe1a237ae74c6bcc82...3af7569d3a1e776fc2a3c1cec133b43105ea9c2e
- vendor: golang.org/x/net e18ecbb051101a46fc263334b127c89bc7bff7ea
full diff: https://github.com/golang/net/compare/6772e930b67bb09bf22262c7378e7d2f67cf59d1...e18ecbb051101a46fc263334b127c89bc7bff7ea
- vendor: golang.org/x/sync 036812b2e83c0ddf193dd5a34e034151da389d09
full diff: https://github.com/golang/sync/compare/6e8e738ad208923de99951fe0b48239bfd864f28...036812b2e83c0ddf193dd5a34e034151da389d09
- ~vendor: google.golang.org/genproto
full diff: https://github.com/googleapis/go-genproto/compare/3f1135a288c9a07e340ae8ba4cc6c7065a3160e8...8816d57aaa9ad8cba31b2a8ecb6199c494bdf8b4~
- vendor: github.com/tonistiigi/fsutil 5dfbf5db66b9c8fb7befcab1722381de99ec0430
full diff: https://github.com/tonistiigi/fsutil/compare/0834f99b7b85462efb69b4f571a4fa3ca7da5ac9...d72af97c0eaf93c1d20360e3cb9c63c223675b83
| null | 2021-06-05 20:00:48+00:00 | 2021-06-17 01:56:25+00:00 | builder/builder-next/worker/worker.go | package worker
import (
"context"
"fmt"
"io"
"io/ioutil"
nethttp "net/http"
"runtime"
"strings"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/rootfs"
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
"github.com/docker/docker/distribution"
distmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
pkgprogress "github.com/docker/docker/pkg/progress"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/exporter"
localexporter "github.com/moby/buildkit/exporter/local"
tarexporter "github.com/moby/buildkit/exporter/tar"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver/mounts"
"github.com/moby/buildkit/solver/llbsolver/ops"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/source/git"
"github.com/moby/buildkit/source/http"
"github.com/moby/buildkit/source/local"
"github.com/moby/buildkit/util/archutil"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/progress"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
)
const labelCreatedAt = "buildkit/createdat"
// LayerAccess provides access to a moby layer from a snapshot
type LayerAccess interface {
GetDiffIDs(ctx context.Context, key string) ([]layer.DiffID, error)
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
}
// Opt defines a structure for creating a worker.
type Opt struct {
ID string
Labels map[string]string
GCPolicy []client.PruneInfo
MetadataStore *metadata.Store
Executor executor.Executor
Snapshotter snapshot.Snapshotter
ContentStore content.Store
CacheManager cache.Manager
ImageSource *containerimage.Source
DownloadManager distribution.RootFSDownloadManager
V2MetadataService distmetadata.V2MetadataService
Transport nethttp.RoundTripper
Exporter exporter.Exporter
Layers LayerAccess
Platforms []ocispec.Platform
}
// Worker is a local worker instance with dedicated snapshotter, cache, and so on.
// TODO: s/Worker/OpWorker/g ?
type Worker struct {
Opt
SourceManager *source.Manager
}
// NewWorker instantiates a local worker
func NewWorker(opt Opt) (*Worker, error) {
sm, err := source.NewManager()
if err != nil {
return nil, err
}
cm := opt.CacheManager
sm.Register(opt.ImageSource)
gs, err := git.NewSource(git.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(gs)
} else {
logrus.Warnf("Could not register builder git source: %s", err)
}
hs, err := http.NewSource(http.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
Transport: opt.Transport,
})
if err == nil {
sm.Register(hs)
} else {
logrus.Warnf("Could not register builder http source: %s", err)
}
ss, err := local.NewSource(local.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(ss)
} else {
logrus.Warnf("Could not register builder local source: %s", err)
}
return &Worker{
Opt: opt,
SourceManager: sm,
}, nil
}
// ID returns worker ID
func (w *Worker) ID() string {
return w.Opt.ID
}
// Labels returns map of all worker labels
func (w *Worker) Labels() map[string]string {
return w.Opt.Labels
}
// Platforms returns one or more platforms supported by the image.
func (w *Worker) Platforms(noCache bool) []ocispec.Platform {
if noCache {
pm := make(map[string]struct{}, len(w.Opt.Platforms))
for _, p := range w.Opt.Platforms {
pm[platforms.Format(p)] = struct{}{}
}
for _, p := range archutil.SupportedPlatforms(noCache) {
if _, ok := pm[p]; !ok {
pp, _ := platforms.Parse(p)
w.Opt.Platforms = append(w.Opt.Platforms, pp)
}
}
}
if len(w.Opt.Platforms) == 0 {
return []ocispec.Platform{platforms.DefaultSpec()}
}
return w.Opt.Platforms
}
// GCPolicy returns automatic GC Policy
func (w *Worker) GCPolicy() []client.PruneInfo {
return w.Opt.GCPolicy
}
// ContentStore returns content store
func (w *Worker) ContentStore() content.Store {
return w.Opt.ContentStore
}
// MetadataStore returns the metadata store
func (w *Worker) MetadataStore() *metadata.Store {
return w.Opt.MetadataStore
}
// LoadRef loads a reference by ID
func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) {
var opts []cache.RefOption
if hidden {
opts = append(opts, cache.NoUpdateLastUsed)
}
return w.CacheManager().Get(ctx, id, opts...)
}
// ResolveOp converts a LLB vertex into a LLB operation
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
if baseOp, ok := v.Sys().(*pb.Op); ok {
switch op := baseOp.Op.(type) {
case *pb.Op_Source:
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w)
case *pb.Op_Exec:
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), sm, w.Opt.MetadataStore, w.Executor(), w)
case *pb.Op_File:
return ops.NewFileOp(v, op, w.CacheManager(), w.Opt.MetadataStore, w)
case *pb.Op_Build:
return ops.NewBuildOp(v, op, s, w)
}
}
return nil, errors.Errorf("could not resolve %v", v)
}
// ResolveImageConfig returns image config for an image
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
}
// DiskUsage returns disk usage report
func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
return w.CacheManager().DiskUsage(ctx, opt)
}
// Prune deletes reclaimable build cache
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error {
return w.CacheManager().Prune(ctx, ch, info...)
}
// Exporter returns exporter by name
func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
switch name {
case "moby":
return w.Opt.Exporter, nil
case client.ExporterLocal:
return localexporter.New(localexporter.Opt{
SessionManager: sm,
})
case client.ExporterTar:
return tarexporter.New(tarexporter.Opt{
SessionManager: sm,
})
default:
return nil, errors.Errorf("exporter %q could not be found", name)
}
}
// GetRemote returns a remote snapshot reference for a local one
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool, _ compression.Type, _ session.Group) (*solver.Remote, error) {
var diffIDs []layer.DiffID
var err error
if !createIfNeeded {
diffIDs, err = w.Layers.GetDiffIDs(ctx, ref.ID())
if err != nil {
return nil, err
}
} else {
if err := ref.Finalize(ctx, true); err != nil {
return nil, err
}
diffIDs, err = w.Layers.EnsureLayer(ctx, ref.ID())
if err != nil {
return nil, err
}
}
descriptors := make([]ocispec.Descriptor, len(diffIDs))
for i, dgst := range diffIDs {
descriptors[i] = ocispec.Descriptor{
MediaType: images.MediaTypeDockerSchema2Layer,
Digest: digest.Digest(dgst),
Size: -1,
}
}
return &solver.Remote{
Descriptors: descriptors,
Provider: &emptyProvider{},
}, nil
}
// PruneCacheMounts removes the current cache snapshots for specified IDs
func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
mu := mounts.CacheMountsLocker()
mu.Lock()
defer mu.Unlock()
for _, id := range ids {
id = "cache-dir:" + id
sis, err := w.Opt.MetadataStore.Search(id)
if err != nil {
return err
}
for _, si := range sis {
for _, k := range si.Indexes() {
if k == id || strings.HasPrefix(k, id+":") {
if siCached := w.CacheManager().Metadata(si.ID()); siCached != nil {
si = siCached
}
if err := cache.CachePolicyDefault(si); err != nil {
return err
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, k, nil)
})
if err := si.Commit(); err != nil {
return err
}
// if ref is unused try to clean it up right away by releasing it
if mref, err := w.CacheManager().GetMutable(ctx, si.ID()); err == nil {
go mref.Release(context.TODO())
}
break
}
}
}
}
mounts.ClearActiveCacheMounts()
return nil
}
func (w *Worker) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
var parent cache.ImmutableRef
if len(diffIDs) > 1 {
var err error
parent, err = w.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
if err != nil {
return nil, err
}
defer parent.Release(context.TODO())
}
return w.CacheManager().GetByBlob(context.TODO(), ocispec.Descriptor{
Annotations: map[string]string{
"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
},
}, parent, opts...)
}
// FromRemote converts a remote snapshot reference to a local one
func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
rootfs, err := getLayers(ctx, remote.Descriptors)
if err != nil {
return nil, err
}
layers := make([]xfer.DownloadDescriptor, 0, len(rootfs))
for _, l := range rootfs {
// ongoing.add(desc)
layers = append(layers, &layerDescriptor{
desc: l.Blob,
diffID: layer.DiffID(l.Diff.Digest),
provider: remote.Provider,
w: w,
pctx: ctx,
})
}
defer func() {
for _, l := range rootfs {
w.ContentStore().Delete(context.TODO(), l.Blob.Digest)
}
}()
r := image.NewRootFS()
rootFS, release, err := w.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, &discardProgress{})
if err != nil {
return nil, err
}
defer release()
if len(rootFS.DiffIDs) != len(layers) {
return nil, errors.Errorf("invalid layer count mismatch %d vs %d", len(rootFS.DiffIDs), len(layers))
}
for i := range rootFS.DiffIDs {
tm := time.Now()
if tmstr, ok := remote.Descriptors[i].Annotations[labelCreatedAt]; ok {
if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil {
return nil, err
}
}
descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)
if v, ok := remote.Descriptors[i].Annotations["buildkit/description"]; ok {
descr = v
}
ref, err := w.getRef(ctx, rootFS.DiffIDs[:i+1], cache.WithDescription(descr), cache.WithCreationTime(tm))
if err != nil {
return nil, err
}
if i == len(remote.Descriptors)-1 {
return ref, nil
}
defer ref.Release(context.TODO())
}
return nil, errors.Errorf("unreachable")
}
// Executor returns executor.Executor for running processes
func (w *Worker) Executor() executor.Executor {
return w.Opt.Executor
}
// CacheManager returns cache.Manager for accessing local storage
func (w *Worker) CacheManager() cache.Manager {
return w.Opt.CacheManager
}
type discardProgress struct{}
func (*discardProgress) WriteProgress(_ pkgprogress.Progress) error {
return nil
}
// Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
type layerDescriptor struct {
provider content.Provider
desc ocispec.Descriptor
diffID layer.DiffID
// ref ctdreference.Spec
w *Worker
pctx context.Context
}
func (ld *layerDescriptor) Key() string {
return "v2:" + ld.desc.Digest.String()
}
func (ld *layerDescriptor) ID() string {
return ld.desc.Digest.String()
}
func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
return ld.diffID, nil
}
func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
done := oneOffProgress(ld.pctx, fmt.Sprintf("pulling %s", ld.desc.Digest))
// TODO should this write output to progressOutput? Or use something similar to loggerFromContext()? see https://github.com/moby/buildkit/commit/aa29e7729464f3c2a773e27795e584023c751cb8
discardLogs := func(_ []byte) {}
if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, discardLogs); err != nil {
return nil, 0, done(err)
}
_ = done(nil)
ra, err := ld.w.ContentStore().ReaderAt(ctx, ld.desc)
if err != nil {
return nil, 0, err
}
return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
}
func (ld *layerDescriptor) Close() {
// ld.is.ContentStore().Delete(context.TODO(), ld.desc.Digest)
}
func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
// Cache mapping from this layer's DiffID to the blobsum
ld.w.V2MetadataService.Add(diffID, distmetadata.V2Metadata{Digest: ld.desc.Digest})
}
func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, error) {
layers := make([]rootfs.Layer, len(descs))
for i, desc := range descs {
diffIDStr := desc.Annotations["containerd.io/uncompressed"]
if diffIDStr == "" {
return nil, errors.Errorf("%s missing uncompressed digest", desc.Digest)
}
diffID, err := digest.Parse(diffIDStr)
if err != nil {
return nil, err
}
layers[i].Diff = ocispec.Descriptor{
MediaType: ocispec.MediaTypeImageLayer,
Digest: diffID,
}
layers[i].Blob = ocispec.Descriptor{
MediaType: desc.MediaType,
Digest: desc.Digest,
Size: desc.Size,
}
}
return layers, nil
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
_ = pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
_ = pw.Write(id, st)
_ = pw.Close()
return err
}
}
type emptyProvider struct {
}
func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
return nil, errors.Errorf("ReaderAt not implemented for empty provider")
}
| package worker
import (
"context"
"fmt"
"io"
"io/ioutil"
nethttp "net/http"
"runtime"
"strings"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/rootfs"
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
"github.com/docker/docker/distribution"
distmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
pkgprogress "github.com/docker/docker/pkg/progress"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/executor"
"github.com/moby/buildkit/exporter"
localexporter "github.com/moby/buildkit/exporter/local"
tarexporter "github.com/moby/buildkit/exporter/tar"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver/mounts"
"github.com/moby/buildkit/solver/llbsolver/ops"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/source/git"
"github.com/moby/buildkit/source/http"
"github.com/moby/buildkit/source/local"
"github.com/moby/buildkit/util/archutil"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/progress"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
"golang.org/x/sync/semaphore"
)
const labelCreatedAt = "buildkit/createdat"
// LayerAccess provides access to a moby layer from a snapshot
type LayerAccess interface {
GetDiffIDs(ctx context.Context, key string) ([]layer.DiffID, error)
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
}
// Opt defines a structure for creating a worker.
type Opt struct {
ID string
Labels map[string]string
GCPolicy []client.PruneInfo
MetadataStore *metadata.Store
Executor executor.Executor
Snapshotter snapshot.Snapshotter
ContentStore content.Store
CacheManager cache.Manager
ImageSource *containerimage.Source
DownloadManager distribution.RootFSDownloadManager
V2MetadataService distmetadata.V2MetadataService
Transport nethttp.RoundTripper
Exporter exporter.Exporter
Layers LayerAccess
Platforms []ocispec.Platform
}
// Worker is a local worker instance with dedicated snapshotter, cache, and so on.
// TODO: s/Worker/OpWorker/g ?
type Worker struct {
Opt
SourceManager *source.Manager
}
// NewWorker instantiates a local worker
func NewWorker(opt Opt) (*Worker, error) {
sm, err := source.NewManager()
if err != nil {
return nil, err
}
cm := opt.CacheManager
sm.Register(opt.ImageSource)
gs, err := git.NewSource(git.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(gs)
} else {
logrus.Warnf("Could not register builder git source: %s", err)
}
hs, err := http.NewSource(http.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
Transport: opt.Transport,
})
if err == nil {
sm.Register(hs)
} else {
logrus.Warnf("Could not register builder http source: %s", err)
}
ss, err := local.NewSource(local.Opt{
CacheAccessor: cm,
MetadataStore: opt.MetadataStore,
})
if err == nil {
sm.Register(ss)
} else {
logrus.Warnf("Could not register builder local source: %s", err)
}
return &Worker{
Opt: opt,
SourceManager: sm,
}, nil
}
// ID returns worker ID
func (w *Worker) ID() string {
return w.Opt.ID
}
// Labels returns map of all worker labels
func (w *Worker) Labels() map[string]string {
return w.Opt.Labels
}
// Platforms returns one or more platforms supported by the image.
func (w *Worker) Platforms(noCache bool) []ocispec.Platform {
if noCache {
pm := make(map[string]struct{}, len(w.Opt.Platforms))
for _, p := range w.Opt.Platforms {
pm[platforms.Format(p)] = struct{}{}
}
for _, p := range archutil.SupportedPlatforms(noCache) {
if _, ok := pm[p]; !ok {
pp, _ := platforms.Parse(p)
w.Opt.Platforms = append(w.Opt.Platforms, pp)
}
}
}
if len(w.Opt.Platforms) == 0 {
return []ocispec.Platform{platforms.DefaultSpec()}
}
return w.Opt.Platforms
}
// GCPolicy returns automatic GC Policy
func (w *Worker) GCPolicy() []client.PruneInfo {
return w.Opt.GCPolicy
}
// ContentStore returns content store
func (w *Worker) ContentStore() content.Store {
return w.Opt.ContentStore
}
// MetadataStore returns the metadata store
func (w *Worker) MetadataStore() *metadata.Store {
return w.Opt.MetadataStore
}
// LoadRef loads a reference by ID
func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) {
var opts []cache.RefOption
if hidden {
opts = append(opts, cache.NoUpdateLastUsed)
}
return w.CacheManager().Get(ctx, id, opts...)
}
// ResolveOp converts a LLB vertex into a LLB operation
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
if baseOp, ok := v.Sys().(*pb.Op); ok {
// TODO do we need to pass a value here? Where should it come from? https://github.com/moby/buildkit/commit/b3cf7c43cfefdfd7a945002c0e76b54e346ab6cf
var parallelism *semaphore.Weighted
switch op := baseOp.Op.(type) {
case *pb.Op_Source:
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, parallelism, sm, w)
case *pb.Op_Exec:
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), parallelism, sm, w.Opt.MetadataStore, w.Executor(), w)
case *pb.Op_File:
return ops.NewFileOp(v, op, w.CacheManager(), parallelism, w.Opt.MetadataStore, w)
case *pb.Op_Build:
return ops.NewBuildOp(v, op, s, w)
}
}
return nil, errors.Errorf("could not resolve %v", v)
}
// ResolveImageConfig returns image config for an image
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
}
// DiskUsage returns disk usage report
func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) {
return w.CacheManager().DiskUsage(ctx, opt)
}
// Prune deletes reclaimable build cache
func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error {
return w.CacheManager().Prune(ctx, ch, info...)
}
// Exporter returns exporter by name
func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) {
switch name {
case "moby":
return w.Opt.Exporter, nil
case client.ExporterLocal:
return localexporter.New(localexporter.Opt{
SessionManager: sm,
})
case client.ExporterTar:
return tarexporter.New(tarexporter.Opt{
SessionManager: sm,
})
default:
return nil, errors.Errorf("exporter %q could not be found", name)
}
}
// GetRemote returns a remote snapshot reference for a local one
func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool, _ compression.Type, _ session.Group) (*solver.Remote, error) {
var diffIDs []layer.DiffID
var err error
if !createIfNeeded {
diffIDs, err = w.Layers.GetDiffIDs(ctx, ref.ID())
if err != nil {
return nil, err
}
} else {
if err := ref.Finalize(ctx, true); err != nil {
return nil, err
}
diffIDs, err = w.Layers.EnsureLayer(ctx, ref.ID())
if err != nil {
return nil, err
}
}
descriptors := make([]ocispec.Descriptor, len(diffIDs))
for i, dgst := range diffIDs {
descriptors[i] = ocispec.Descriptor{
MediaType: images.MediaTypeDockerSchema2Layer,
Digest: digest.Digest(dgst),
Size: -1,
}
}
return &solver.Remote{
Descriptors: descriptors,
Provider: &emptyProvider{},
}, nil
}
// PruneCacheMounts removes the current cache snapshots for specified IDs
func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error {
mu := mounts.CacheMountsLocker()
mu.Lock()
defer mu.Unlock()
for _, id := range ids {
id = "cache-dir:" + id
sis, err := w.Opt.MetadataStore.Search(id)
if err != nil {
return err
}
for _, si := range sis {
for _, k := range si.Indexes() {
if k == id || strings.HasPrefix(k, id+":") {
if siCached := w.CacheManager().Metadata(si.ID()); siCached != nil {
si = siCached
}
if err := cache.CachePolicyDefault(si); err != nil {
return err
}
si.Queue(func(b *bolt.Bucket) error {
return si.SetValue(b, k, nil)
})
if err := si.Commit(); err != nil {
return err
}
// if ref is unused try to clean it up right away by releasing it
if mref, err := w.CacheManager().GetMutable(ctx, si.ID()); err == nil {
go mref.Release(context.TODO())
}
break
}
}
}
}
mounts.ClearActiveCacheMounts()
return nil
}
func (w *Worker) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) {
var parent cache.ImmutableRef
if len(diffIDs) > 1 {
var err error
parent, err = w.getRef(ctx, diffIDs[:len(diffIDs)-1], opts...)
if err != nil {
return nil, err
}
defer parent.Release(context.TODO())
}
return w.CacheManager().GetByBlob(context.TODO(), ocispec.Descriptor{
Annotations: map[string]string{
"containerd.io/uncompressed": diffIDs[len(diffIDs)-1].String(),
},
}, parent, opts...)
}
// FromRemote converts a remote snapshot reference to a local one
func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) {
rootfs, err := getLayers(ctx, remote.Descriptors)
if err != nil {
return nil, err
}
layers := make([]xfer.DownloadDescriptor, 0, len(rootfs))
for _, l := range rootfs {
// ongoing.add(desc)
layers = append(layers, &layerDescriptor{
desc: l.Blob,
diffID: layer.DiffID(l.Diff.Digest),
provider: remote.Provider,
w: w,
pctx: ctx,
})
}
defer func() {
for _, l := range rootfs {
w.ContentStore().Delete(context.TODO(), l.Blob.Digest)
}
}()
r := image.NewRootFS()
rootFS, release, err := w.DownloadManager.Download(ctx, *r, runtime.GOOS, layers, &discardProgress{})
if err != nil {
return nil, err
}
defer release()
if len(rootFS.DiffIDs) != len(layers) {
return nil, errors.Errorf("invalid layer count mismatch %d vs %d", len(rootFS.DiffIDs), len(layers))
}
for i := range rootFS.DiffIDs {
tm := time.Now()
if tmstr, ok := remote.Descriptors[i].Annotations[labelCreatedAt]; ok {
if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil {
return nil, err
}
}
descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest)
if v, ok := remote.Descriptors[i].Annotations["buildkit/description"]; ok {
descr = v
}
ref, err := w.getRef(ctx, rootFS.DiffIDs[:i+1], cache.WithDescription(descr), cache.WithCreationTime(tm))
if err != nil {
return nil, err
}
if i == len(remote.Descriptors)-1 {
return ref, nil
}
defer ref.Release(context.TODO())
}
return nil, errors.Errorf("unreachable")
}
// Executor returns executor.Executor for running processes
func (w *Worker) Executor() executor.Executor {
return w.Opt.Executor
}
// CacheManager returns cache.Manager for accessing local storage
func (w *Worker) CacheManager() cache.Manager {
return w.Opt.CacheManager
}
type discardProgress struct{}
func (*discardProgress) WriteProgress(_ pkgprogress.Progress) error {
return nil
}
// Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
type layerDescriptor struct {
provider content.Provider
desc ocispec.Descriptor
diffID layer.DiffID
// ref ctdreference.Spec
w *Worker
pctx context.Context
}
func (ld *layerDescriptor) Key() string {
return "v2:" + ld.desc.Digest.String()
}
func (ld *layerDescriptor) ID() string {
return ld.desc.Digest.String()
}
func (ld *layerDescriptor) DiffID() (layer.DiffID, error) {
return ld.diffID, nil
}
func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogress.Output) (io.ReadCloser, int64, error) {
done := oneOffProgress(ld.pctx, fmt.Sprintf("pulling %s", ld.desc.Digest))
// TODO should this write output to progressOutput? Or use something similar to loggerFromContext()? see https://github.com/moby/buildkit/commit/aa29e7729464f3c2a773e27795e584023c751cb8
discardLogs := func(_ []byte) {}
if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, discardLogs); err != nil {
return nil, 0, done(err)
}
_ = done(nil)
ra, err := ld.w.ContentStore().ReaderAt(ctx, ld.desc)
if err != nil {
return nil, 0, err
}
return ioutil.NopCloser(content.NewReader(ra)), ld.desc.Size, nil
}
func (ld *layerDescriptor) Close() {
// ld.is.ContentStore().Delete(context.TODO(), ld.desc.Digest)
}
func (ld *layerDescriptor) Registered(diffID layer.DiffID) {
// Cache mapping from this layer's DiffID to the blobsum
ld.w.V2MetadataService.Add(diffID, distmetadata.V2Metadata{Digest: ld.desc.Digest})
}
func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, error) {
layers := make([]rootfs.Layer, len(descs))
for i, desc := range descs {
diffIDStr := desc.Annotations["containerd.io/uncompressed"]
if diffIDStr == "" {
return nil, errors.Errorf("%s missing uncompressed digest", desc.Digest)
}
diffID, err := digest.Parse(diffIDStr)
if err != nil {
return nil, err
}
layers[i].Diff = ocispec.Descriptor{
MediaType: ocispec.MediaTypeImageLayer,
Digest: diffID,
}
layers[i].Blob = ocispec.Descriptor{
MediaType: desc.MediaType,
Digest: desc.Digest,
Size: desc.Size,
}
}
return layers, nil
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
_ = pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
_ = pw.Write(id, st)
_ = pw.Close()
return err
}
}
type emptyProvider struct {
}
func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
return nil, errors.Errorf("ReaderAt not implemented for empty provider")
}
| thaJeztah | 2773f81aa5e9e34733675a7aa7e4219391caccb0 | 9e8cf1016ead200aea31d7ff381ba2ed01416ddb | You can just pass nil though, don't need a `var`. | tonistiigi | 4,730 |
moby/moby | 42,473 | revendor BuildKit (master branch) | relates to https://github.com/moby/moby/issues/42471
depends on:
- [x] https://github.com/moby/buildkit/pull/2150
- [x] https://github.com/moby/buildkit/pull/2151
Updates:
- vendor: golang.org/x/crypto 0c34fe9e7dc2486962ef9867e3edb3503537209f
full diff: https://github.com/golang/crypto/compare/c1f2f97bffc9c53fc40a1a28a5b460094c0050d9...0c34fe9e7dc2486962ef9867e3edb3503537209f
- vendor: golang.org/x/time 3af7569d3a1e776fc2a3c1cec133b43105ea9c2e
full diff: https://github.com/golang/time/compare/555d28b269f0569763d25dbe1a237ae74c6bcc82...3af7569d3a1e776fc2a3c1cec133b43105ea9c2e
- vendor: golang.org/x/net e18ecbb051101a46fc263334b127c89bc7bff7ea
full diff: https://github.com/golang/net/compare/6772e930b67bb09bf22262c7378e7d2f67cf59d1...e18ecbb051101a46fc263334b127c89bc7bff7ea
- vendor: golang.org/x/sync 036812b2e83c0ddf193dd5a34e034151da389d09
full diff: https://github.com/golang/sync/compare/6e8e738ad208923de99951fe0b48239bfd864f28...036812b2e83c0ddf193dd5a34e034151da389d09
- ~vendor: google.golang.org/genproto
full diff: https://github.com/googleapis/go-genproto/compare/3f1135a288c9a07e340ae8ba4cc6c7065a3160e8...8816d57aaa9ad8cba31b2a8ecb6199c494bdf8b4~
- vendor: github.com/tonistiigi/fsutil 5dfbf5db66b9c8fb7befcab1722381de99ec0430
full diff: https://github.com/tonistiigi/fsutil/compare/0834f99b7b85462efb69b4f571a4fa3ca7da5ac9...d72af97c0eaf93c1d20360e3cb9c63c223675b83
| null | 2021-06-05 20:00:48+00:00 | 2021-06-17 01:56:25+00:00 | builder/dockerfile/copy.go | package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"archive/tar"
"fmt"
"io"
"mime"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"time"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/remotecontext"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/urlutil"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
const unnamedFilename = "__unnamed__"
type pathCache interface {
Load(key interface{}) (value interface{}, ok bool)
Store(key, value interface{})
}
// copyInfo is a data object which stores the metadata about each source file in
// a copyInstruction
type copyInfo struct {
root containerfs.ContainerFS
path string
hash string
noDecompress bool
}
func (c copyInfo) fullPath() (string, error) {
return c.root.ResolveScopedPath(c.path, true)
}
func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo {
return copyInfo{root: source.Root(), path: path, hash: hash}
}
func newCopyInfos(copyInfos ...copyInfo) []copyInfo {
return copyInfos
}
// copyInstruction is a fully parsed COPY or ADD command that is passed to
// Builder.performCopy to copy files into the image filesystem
type copyInstruction struct {
cmdName string
infos []copyInfo
dest string
chownStr string
allowLocalDecompression bool
preserveOwnership bool
}
// copier reads a raw COPY or ADD command, fetches remote sources using a downloader,
// and creates a copyInstruction
type copier struct {
imageSource *imageMount
source builder.Source
pathCache pathCache
download sourceDownloader
platform *specs.Platform
// for cleanup. TODO: having copier.cleanup() is error prone and hard to
// follow. Code calling performCopy should manage the lifecycle of its params.
// Copier should take override source as input, not imageMount.
activeLayer builder.RWLayer
tmpPaths []string
}
func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier {
platform := req.builder.platform
if platform == nil {
// May be nil if not explicitly set in API/dockerfile
platform = &specs.Platform{}
}
if platform.OS == "" {
// Default to the dispatch requests operating system if not explicit in API/dockerfile
platform.OS = req.state.operatingSystem
}
if platform.OS == "" {
// This is a failsafe just in case. Shouldn't be hit.
platform.OS = runtime.GOOS
}
return copier{
source: req.source,
pathCache: req.builder.pathCache,
download: download,
imageSource: imageSource,
platform: platform,
}
}
func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) {
inst := copyInstruction{cmdName: cmdName}
last := len(args) - 1
// Work in platform-specific filepath semantics
// TODO: This OS switch for paths is NOT correct and should not be supported.
// Maintained for backwards compatibility
pathOS := runtime.GOOS
if o.platform != nil {
pathOS = o.platform.OS
}
inst.dest = fromSlash(args[last], pathOS)
separator := string(separator(pathOS))
infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest)
if err != nil {
return inst, errors.Wrapf(err, "%s failed", cmdName)
}
if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) {
return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
}
inst.infos = infos
return inst, nil
}
// getCopyInfosForSourcePaths iterates over the source files and calculate the info
// needed to copy (e.g. hash value if cached)
// The dest is used in case source is URL (and ends with "/")
func (o *copier) getCopyInfosForSourcePaths(sources []string, dest string) ([]copyInfo, error) {
var infos []copyInfo
for _, orig := range sources {
subinfos, err := o.getCopyInfoForSourcePath(orig, dest)
if err != nil {
return nil, err
}
infos = append(infos, subinfos...)
}
if len(infos) == 0 {
return nil, errors.New("no source files were specified")
}
return infos, nil
}
func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error) {
if !urlutil.IsURL(orig) {
return o.calcCopyInfo(orig, true)
}
remote, path, err := o.download(orig)
if err != nil {
return nil, err
}
// If path == "" then we are unable to determine filename from src
// We have to make sure dest is available
if path == "" {
if strings.HasSuffix(dest, "/") {
return nil, errors.Errorf("cannot determine filename for source %s", orig)
}
path = unnamedFilename
}
o.tmpPaths = append(o.tmpPaths, remote.Root().Path())
hash, err := remote.Hash(path)
ci := newCopyInfoFromSource(remote, path, hash)
ci.noDecompress = true // data from http shouldn't be extracted even on ADD
return newCopyInfos(ci), err
}
// Cleanup removes any temporary directories created as part of downloading
// remote files.
func (o *copier) Cleanup() {
for _, path := range o.tmpPaths {
os.RemoveAll(path)
}
o.tmpPaths = []string{}
if o.activeLayer != nil {
o.activeLayer.Release()
o.activeLayer = nil
}
}
// TODO: allowWildcards can probably be removed by refactoring this function further.
func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) {
imageSource := o.imageSource
// TODO: do this when creating copier. Requires validateCopySourcePath
// (and other below) to be aware of the difference sources. Why is it only
// done on image Source?
if imageSource != nil && o.activeLayer == nil {
// this needs to be protected against repeated calls as wildcard copy
// will call it multiple times for a single COPY
var err error
rwLayer, err := imageSource.NewRWLayer()
if err != nil {
return nil, err
}
o.activeLayer = rwLayer
o.source, err = remotecontext.NewLazySource(rwLayer.Root())
if err != nil {
return nil, errors.Wrapf(err, "failed to create context for copy from %s", rwLayer.Root().Path())
}
}
if o.source == nil {
return nil, errors.Errorf("missing build context")
}
root := o.source.Root()
if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil {
return nil, err
}
// Work in source OS specific filepath semantics
// For LCOW, this is NOT the daemon OS.
origPath = root.FromSlash(origPath)
origPath = strings.TrimPrefix(origPath, string(root.Separator()))
origPath = strings.TrimPrefix(origPath, "."+string(root.Separator()))
// Deal with wildcards
if allowWildcards && containsWildcards(origPath, root.OS()) {
return o.copyWithWildcards(origPath)
}
if imageSource != nil && imageSource.ImageID() != "" {
// return a cached copy if one exists
if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok {
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil
}
}
// Deal with the single file case
copyInfo, err := copyInfoForFile(o.source, origPath)
switch {
case imageSource == nil && errors.Is(err, os.ErrNotExist):
return nil, errors.Wrapf(err, "file not found in build context or excluded by .dockerignore")
case err != nil:
return nil, err
case copyInfo.hash != "":
o.storeInPathCache(imageSource, origPath, copyInfo.hash)
return newCopyInfos(copyInfo), err
}
// TODO: remove, handle dirs in Hash()
subfiles, err := walkSource(o.source, origPath)
if err != nil {
return nil, err
}
hash := hashStringSlice("dir", subfiles)
o.storeInPathCache(imageSource, origPath, hash)
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil
}
func containsWildcards(name, platform string) bool {
isWindows := platform == "windows"
for i := 0; i < len(name); i++ {
ch := name[i]
if ch == '\\' && !isWindows {
i++
} else if ch == '*' || ch == '?' || ch == '[' {
return true
}
}
return false
}
func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
if im != nil {
o.pathCache.Store(im.ImageID()+path, hash)
}
}
func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
root := o.source.Root()
var copyInfos []copyInfo
if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
rel, err := remotecontext.Rel(root, path)
if err != nil {
return err
}
if rel == "." {
return nil
}
if match, _ := root.Match(origPath, rel); !match {
return nil
}
// Note we set allowWildcards to false in case the name has
// a * in it
subInfos, err := o.calcCopyInfo(rel, false)
if err != nil {
return err
}
copyInfos = append(copyInfos, subInfos...)
return nil
}); err != nil {
return nil, err
}
return copyInfos, nil
}
func copyInfoForFile(source builder.Source, path string) (copyInfo, error) {
fi, err := remotecontext.StatAt(source, path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
// return the relative path in the error, which is more user-friendly than the full path to the tmp-dir
return copyInfo{}, errors.WithStack(&os.PathError{Op: "stat", Path: path, Err: os.ErrNotExist})
}
return copyInfo{}, err
}
if fi.IsDir() {
return copyInfo{}, nil
}
hash, err := source.Hash(path)
if err != nil {
return copyInfo{}, err
}
return newCopyInfoFromSource(source, path, "file:"+hash), nil
}
// TODO: dedupe with copyWithWildcards()
func walkSource(source builder.Source, origPath string) ([]string, error) {
fp, err := remotecontext.FullPath(source, origPath)
if err != nil {
return nil, err
}
// Must be a dir
var subfiles []string
err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
rel, err := remotecontext.Rel(source.Root(), path)
if err != nil {
return err
}
if rel == "." {
return nil
}
hash, err := source.Hash(rel)
if err != nil {
return nil
}
// we already checked handleHash above
subfiles = append(subfiles, hash)
return nil
})
if err != nil {
return nil, err
}
sort.Strings(subfiles)
return subfiles, nil
}
type sourceDownloader func(string) (builder.Source, string, error)
func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader {
return func(url string) (builder.Source, string, error) {
return downloadSource(output, stdout, url)
}
}
func errOnSourceDownload(_ string) (builder.Source, string, error) {
return nil, "", errors.New("source can't be a URL for COPY")
}
func getFilenameForDownload(path string, resp *http.Response) string {
// Guess filename based on source
if path != "" && !strings.HasSuffix(path, "/") {
if filename := filepath.Base(filepath.FromSlash(path)); filename != "" {
return filename
}
}
// Guess filename based on Content-Disposition
if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" {
if _, params, err := mime.ParseMediaType(contentDisposition); err == nil {
if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") {
if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" {
return filename
}
}
}
}
return ""
}
func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) {
u, err := url.Parse(srcURL)
if err != nil {
return
}
resp, err := remotecontext.GetWithStatusError(srcURL)
if err != nil {
return
}
filename := getFilenameForDownload(u.Path, resp)
// Prepare file in a tmp dir
tmpDir, err := ioutils.TempDir("", "docker-remote")
if err != nil {
return
}
defer func() {
if err != nil {
os.RemoveAll(tmpDir)
}
}()
// If filename is empty, the returned filename will be "" but
// the tmp filename will be created as "__unnamed__"
tmpFileName := filename
if filename == "" {
tmpFileName = unnamedFilename
}
tmpFileName = filepath.Join(tmpDir, tmpFileName)
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return
}
progressOutput := streamformatter.NewJSONProgressOutput(output, true)
progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading")
// Download and dump result to tmp file
// TODO: add filehash directly
if _, err = io.Copy(tmpFile, progressReader); err != nil {
tmpFile.Close()
return
}
// TODO: how important is this random blank line to the output?
fmt.Fprintln(stdout)
// Set the mtime to the Last-Modified header value if present
// Otherwise just remove atime and mtime
mTime := time.Time{}
lastMod := resp.Header.Get("Last-Modified")
if lastMod != "" {
// If we can't parse it then just let it default to 'zero'
// otherwise use the parsed time value
if parsedMTime, err := http.ParseTime(lastMod); err == nil {
mTime = parsedMTime
}
}
tmpFile.Close()
if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
return
}
lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir))
return lc, filename, err
}
type copyFileOptions struct {
decompress bool
identity *idtools.Identity
archiver Archiver
}
type copyEndpoint struct {
driver containerfs.Driver
path string
}
func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error {
srcPath, err := source.fullPath()
if err != nil {
return err
}
destPath, err := dest.fullPath()
if err != nil {
return err
}
archiver := options.archiver
srcEndpoint := ©Endpoint{driver: source.root, path: srcPath}
destEndpoint := ©Endpoint{driver: dest.root, path: destPath}
src, err := source.root.Stat(srcPath)
if err != nil {
return errors.Wrapf(err, "source path not found")
}
if src.IsDir() {
return copyDirectory(archiver, srcEndpoint, destEndpoint, options.identity)
}
if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress {
return archiver.UntarPath(srcPath, destPath)
}
destExistsAsDir, err := isExistingDirectory(destEndpoint)
if err != nil {
return err
}
// dest.path must be used because destPath has already been cleaned of any
// trailing slash
if endsInSlash(dest.root, dest.path) || destExistsAsDir {
// source.path must be used to get the correct filename when the source
// is a symlink
destPath = dest.root.Join(destPath, source.root.Base(source.path))
destEndpoint = ©Endpoint{driver: dest.root, path: destPath}
}
return copyFile(archiver, srcEndpoint, destEndpoint, options.identity)
}
func isArchivePath(driver containerfs.ContainerFS, path string) bool {
file, err := driver.Open(path)
if err != nil {
return false
}
defer file.Close()
rdr, err := archive.DecompressStream(file)
if err != nil {
return false
}
r := tar.NewReader(rdr)
_, err = r.Next()
return err == nil
}
func copyDirectory(archiver Archiver, source, dest *copyEndpoint, identity *idtools.Identity) error {
destExists, err := isExistingDirectory(dest)
if err != nil {
return errors.Wrapf(err, "failed to query destination path")
}
if err := archiver.CopyWithTar(source.path, dest.path); err != nil {
return errors.Wrapf(err, "failed to copy directory")
}
if identity != nil {
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
return fixPermissions(source.path, dest.path, *identity, !destExists)
}
return nil
}
func copyFile(archiver Archiver, source, dest *copyEndpoint, identity *idtools.Identity) error {
if runtime.GOOS == "windows" && dest.driver.OS() == "linux" {
// LCOW
if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil {
return errors.Wrapf(err, "failed to create new directory")
}
} else {
// Normal containers
if identity == nil {
// Use system.MkdirAll here, which is a custom version of os.MkdirAll
// modified for use on Windows to handle volume GUID paths. These paths
// are of the form \\?\Volume{<GUID>}\<path>. An example would be:
// \\?\Volume{dae8d3ac-b9a1-11e9-88eb-e8554b2ba1db}\bin\busybox.exe
if err := system.MkdirAll(filepath.Dir(dest.path), 0755); err != nil {
return err
}
} else {
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, *identity); err != nil {
return errors.Wrapf(err, "failed to create new directory")
}
}
}
if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil {
return errors.Wrapf(err, "failed to copy file")
}
if identity != nil {
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
return fixPermissions(source.path, dest.path, *identity, false)
}
return nil
}
func endsInSlash(driver containerfs.Driver, path string) bool {
return strings.HasSuffix(path, string(driver.Separator()))
}
// isExistingDirectory returns true if the path exists and is a directory
func isExistingDirectory(point *copyEndpoint) (bool, error) {
destStat, err := point.driver.Stat(point.path)
switch {
case errors.Is(err, os.ErrNotExist):
return false, nil
case err != nil:
return false, err
}
return destStat.IsDir(), nil
}
| package dockerfile // import "github.com/docker/docker/builder/dockerfile"
import (
"archive/tar"
"fmt"
"io"
"mime"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"time"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/remotecontext"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/urlutil"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
const unnamedFilename = "__unnamed__"
type pathCache interface {
Load(key interface{}) (value interface{}, ok bool)
Store(key, value interface{})
}
// copyInfo is a data object which stores the metadata about each source file in
// a copyInstruction
type copyInfo struct {
root containerfs.ContainerFS
path string
hash string
noDecompress bool
}
func (c copyInfo) fullPath() (string, error) {
return c.root.ResolveScopedPath(c.path, true)
}
func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo {
return copyInfo{root: source.Root(), path: path, hash: hash}
}
func newCopyInfos(copyInfos ...copyInfo) []copyInfo {
return copyInfos
}
// copyInstruction is a fully parsed COPY or ADD command that is passed to
// Builder.performCopy to copy files into the image filesystem
type copyInstruction struct {
cmdName string
infos []copyInfo
dest string
chownStr string
allowLocalDecompression bool
preserveOwnership bool
}
// copier reads a raw COPY or ADD command, fetches remote sources using a downloader,
// and creates a copyInstruction
type copier struct {
imageSource *imageMount
source builder.Source
pathCache pathCache
download sourceDownloader
platform *specs.Platform
// for cleanup. TODO: having copier.cleanup() is error prone and hard to
// follow. Code calling performCopy should manage the lifecycle of its params.
// Copier should take override source as input, not imageMount.
activeLayer builder.RWLayer
tmpPaths []string
}
func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier {
platform := req.builder.platform
if platform == nil {
// May be nil if not explicitly set in API/dockerfile
platform = &specs.Platform{}
}
if platform.OS == "" {
// Default to the dispatch requests operating system if not explicit in API/dockerfile
platform.OS = req.state.operatingSystem
}
if platform.OS == "" {
// This is a failsafe just in case. Shouldn't be hit.
platform.OS = runtime.GOOS
}
return copier{
source: req.source,
pathCache: req.builder.pathCache,
download: download,
imageSource: imageSource,
platform: platform,
}
}
func (o *copier) createCopyInstruction(sourcesAndDest instructions.SourcesAndDest, cmdName string) (copyInstruction, error) {
inst := copyInstruction{cmdName: cmdName}
// Work in platform-specific filepath semantics
// TODO: This OS switch for paths is NOT correct and should not be supported.
// Maintained for backwards compatibility
pathOS := runtime.GOOS
if o.platform != nil {
pathOS = o.platform.OS
}
inst.dest = fromSlash(sourcesAndDest.DestPath, pathOS)
separator := string(separator(pathOS))
infos, err := o.getCopyInfosForSourcePaths(sourcesAndDest.SourcePaths, inst.dest)
if err != nil {
return inst, errors.Wrapf(err, "%s failed", cmdName)
}
if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) {
return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
}
inst.infos = infos
return inst, nil
}
// getCopyInfosForSourcePaths iterates over the source files and calculate the info
// needed to copy (e.g. hash value if cached)
// The dest is used in case source is URL (and ends with "/")
func (o *copier) getCopyInfosForSourcePaths(sources []string, dest string) ([]copyInfo, error) {
var infos []copyInfo
for _, orig := range sources {
subinfos, err := o.getCopyInfoForSourcePath(orig, dest)
if err != nil {
return nil, err
}
infos = append(infos, subinfos...)
}
if len(infos) == 0 {
return nil, errors.New("no source files were specified")
}
return infos, nil
}
func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error) {
if !urlutil.IsURL(orig) {
return o.calcCopyInfo(orig, true)
}
remote, path, err := o.download(orig)
if err != nil {
return nil, err
}
// If path == "" then we are unable to determine filename from src
// We have to make sure dest is available
if path == "" {
if strings.HasSuffix(dest, "/") {
return nil, errors.Errorf("cannot determine filename for source %s", orig)
}
path = unnamedFilename
}
o.tmpPaths = append(o.tmpPaths, remote.Root().Path())
hash, err := remote.Hash(path)
ci := newCopyInfoFromSource(remote, path, hash)
ci.noDecompress = true // data from http shouldn't be extracted even on ADD
return newCopyInfos(ci), err
}
// Cleanup removes any temporary directories created as part of downloading
// remote files.
func (o *copier) Cleanup() {
for _, path := range o.tmpPaths {
os.RemoveAll(path)
}
o.tmpPaths = []string{}
if o.activeLayer != nil {
o.activeLayer.Release()
o.activeLayer = nil
}
}
// TODO: allowWildcards can probably be removed by refactoring this function further.
func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) {
imageSource := o.imageSource
// TODO: do this when creating copier. Requires validateCopySourcePath
// (and other below) to be aware of the difference sources. Why is it only
// done on image Source?
if imageSource != nil && o.activeLayer == nil {
// this needs to be protected against repeated calls as wildcard copy
// will call it multiple times for a single COPY
var err error
rwLayer, err := imageSource.NewRWLayer()
if err != nil {
return nil, err
}
o.activeLayer = rwLayer
o.source, err = remotecontext.NewLazySource(rwLayer.Root())
if err != nil {
return nil, errors.Wrapf(err, "failed to create context for copy from %s", rwLayer.Root().Path())
}
}
if o.source == nil {
return nil, errors.Errorf("missing build context")
}
root := o.source.Root()
if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil {
return nil, err
}
// Work in source OS specific filepath semantics
// For LCOW, this is NOT the daemon OS.
origPath = root.FromSlash(origPath)
origPath = strings.TrimPrefix(origPath, string(root.Separator()))
origPath = strings.TrimPrefix(origPath, "."+string(root.Separator()))
// Deal with wildcards
if allowWildcards && containsWildcards(origPath, root.OS()) {
return o.copyWithWildcards(origPath)
}
if imageSource != nil && imageSource.ImageID() != "" {
// return a cached copy if one exists
if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok {
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil
}
}
// Deal with the single file case
copyInfo, err := copyInfoForFile(o.source, origPath)
switch {
case imageSource == nil && errors.Is(err, os.ErrNotExist):
return nil, errors.Wrapf(err, "file not found in build context or excluded by .dockerignore")
case err != nil:
return nil, err
case copyInfo.hash != "":
o.storeInPathCache(imageSource, origPath, copyInfo.hash)
return newCopyInfos(copyInfo), err
}
// TODO: remove, handle dirs in Hash()
subfiles, err := walkSource(o.source, origPath)
if err != nil {
return nil, err
}
hash := hashStringSlice("dir", subfiles)
o.storeInPathCache(imageSource, origPath, hash)
return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil
}
func containsWildcards(name, platform string) bool {
isWindows := platform == "windows"
for i := 0; i < len(name); i++ {
ch := name[i]
if ch == '\\' && !isWindows {
i++
} else if ch == '*' || ch == '?' || ch == '[' {
return true
}
}
return false
}
func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
if im != nil {
o.pathCache.Store(im.ImageID()+path, hash)
}
}
func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
root := o.source.Root()
var copyInfos []copyInfo
if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
rel, err := remotecontext.Rel(root, path)
if err != nil {
return err
}
if rel == "." {
return nil
}
if match, _ := root.Match(origPath, rel); !match {
return nil
}
// Note we set allowWildcards to false in case the name has
// a * in it
subInfos, err := o.calcCopyInfo(rel, false)
if err != nil {
return err
}
copyInfos = append(copyInfos, subInfos...)
return nil
}); err != nil {
return nil, err
}
return copyInfos, nil
}
func copyInfoForFile(source builder.Source, path string) (copyInfo, error) {
fi, err := remotecontext.StatAt(source, path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
// return the relative path in the error, which is more user-friendly than the full path to the tmp-dir
return copyInfo{}, errors.WithStack(&os.PathError{Op: "stat", Path: path, Err: os.ErrNotExist})
}
return copyInfo{}, err
}
if fi.IsDir() {
return copyInfo{}, nil
}
hash, err := source.Hash(path)
if err != nil {
return copyInfo{}, err
}
return newCopyInfoFromSource(source, path, "file:"+hash), nil
}
// TODO: dedupe with copyWithWildcards()
func walkSource(source builder.Source, origPath string) ([]string, error) {
fp, err := remotecontext.FullPath(source, origPath)
if err != nil {
return nil, err
}
// Must be a dir
var subfiles []string
err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
rel, err := remotecontext.Rel(source.Root(), path)
if err != nil {
return err
}
if rel == "." {
return nil
}
hash, err := source.Hash(rel)
if err != nil {
return nil
}
// we already checked handleHash above
subfiles = append(subfiles, hash)
return nil
})
if err != nil {
return nil, err
}
sort.Strings(subfiles)
return subfiles, nil
}
type sourceDownloader func(string) (builder.Source, string, error)
func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader {
return func(url string) (builder.Source, string, error) {
return downloadSource(output, stdout, url)
}
}
func errOnSourceDownload(_ string) (builder.Source, string, error) {
return nil, "", errors.New("source can't be a URL for COPY")
}
func getFilenameForDownload(path string, resp *http.Response) string {
// Guess filename based on source
if path != "" && !strings.HasSuffix(path, "/") {
if filename := filepath.Base(filepath.FromSlash(path)); filename != "" {
return filename
}
}
// Guess filename based on Content-Disposition
if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" {
if _, params, err := mime.ParseMediaType(contentDisposition); err == nil {
if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") {
if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" {
return filename
}
}
}
}
return ""
}
func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) {
u, err := url.Parse(srcURL)
if err != nil {
return
}
resp, err := remotecontext.GetWithStatusError(srcURL)
if err != nil {
return
}
filename := getFilenameForDownload(u.Path, resp)
// Prepare file in a tmp dir
tmpDir, err := ioutils.TempDir("", "docker-remote")
if err != nil {
return
}
defer func() {
if err != nil {
os.RemoveAll(tmpDir)
}
}()
// If filename is empty, the returned filename will be "" but
// the tmp filename will be created as "__unnamed__"
tmpFileName := filename
if filename == "" {
tmpFileName = unnamedFilename
}
tmpFileName = filepath.Join(tmpDir, tmpFileName)
tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return
}
progressOutput := streamformatter.NewJSONProgressOutput(output, true)
progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading")
// Download and dump result to tmp file
// TODO: add filehash directly
if _, err = io.Copy(tmpFile, progressReader); err != nil {
tmpFile.Close()
return
}
// TODO: how important is this random blank line to the output?
fmt.Fprintln(stdout)
// Set the mtime to the Last-Modified header value if present
// Otherwise just remove atime and mtime
mTime := time.Time{}
lastMod := resp.Header.Get("Last-Modified")
if lastMod != "" {
// If we can't parse it then just let it default to 'zero'
// otherwise use the parsed time value
if parsedMTime, err := http.ParseTime(lastMod); err == nil {
mTime = parsedMTime
}
}
tmpFile.Close()
if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil {
return
}
lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir))
return lc, filename, err
}
type copyFileOptions struct {
decompress bool
identity *idtools.Identity
archiver Archiver
}
type copyEndpoint struct {
driver containerfs.Driver
path string
}
func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error {
srcPath, err := source.fullPath()
if err != nil {
return err
}
destPath, err := dest.fullPath()
if err != nil {
return err
}
archiver := options.archiver
srcEndpoint := ©Endpoint{driver: source.root, path: srcPath}
destEndpoint := ©Endpoint{driver: dest.root, path: destPath}
src, err := source.root.Stat(srcPath)
if err != nil {
return errors.Wrapf(err, "source path not found")
}
if src.IsDir() {
return copyDirectory(archiver, srcEndpoint, destEndpoint, options.identity)
}
if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress {
return archiver.UntarPath(srcPath, destPath)
}
destExistsAsDir, err := isExistingDirectory(destEndpoint)
if err != nil {
return err
}
// dest.path must be used because destPath has already been cleaned of any
// trailing slash
if endsInSlash(dest.root, dest.path) || destExistsAsDir {
// source.path must be used to get the correct filename when the source
// is a symlink
destPath = dest.root.Join(destPath, source.root.Base(source.path))
destEndpoint = ©Endpoint{driver: dest.root, path: destPath}
}
return copyFile(archiver, srcEndpoint, destEndpoint, options.identity)
}
func isArchivePath(driver containerfs.ContainerFS, path string) bool {
file, err := driver.Open(path)
if err != nil {
return false
}
defer file.Close()
rdr, err := archive.DecompressStream(file)
if err != nil {
return false
}
r := tar.NewReader(rdr)
_, err = r.Next()
return err == nil
}
func copyDirectory(archiver Archiver, source, dest *copyEndpoint, identity *idtools.Identity) error {
destExists, err := isExistingDirectory(dest)
if err != nil {
return errors.Wrapf(err, "failed to query destination path")
}
if err := archiver.CopyWithTar(source.path, dest.path); err != nil {
return errors.Wrapf(err, "failed to copy directory")
}
if identity != nil {
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
return fixPermissions(source.path, dest.path, *identity, !destExists)
}
return nil
}
func copyFile(archiver Archiver, source, dest *copyEndpoint, identity *idtools.Identity) error {
if runtime.GOOS == "windows" && dest.driver.OS() == "linux" {
// LCOW
if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil {
return errors.Wrapf(err, "failed to create new directory")
}
} else {
// Normal containers
if identity == nil {
// Use system.MkdirAll here, which is a custom version of os.MkdirAll
// modified for use on Windows to handle volume GUID paths. These paths
// are of the form \\?\Volume{<GUID>}\<path>. An example would be:
// \\?\Volume{dae8d3ac-b9a1-11e9-88eb-e8554b2ba1db}\bin\busybox.exe
if err := system.MkdirAll(filepath.Dir(dest.path), 0755); err != nil {
return err
}
} else {
if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, *identity); err != nil {
return errors.Wrapf(err, "failed to create new directory")
}
}
}
if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil {
return errors.Wrapf(err, "failed to copy file")
}
if identity != nil {
// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
return fixPermissions(source.path, dest.path, *identity, false)
}
return nil
}
func endsInSlash(driver containerfs.Driver, path string) bool {
return strings.HasSuffix(path, string(driver.Separator()))
}
// isExistingDirectory returns true if the path exists and is a directory
func isExistingDirectory(point *copyEndpoint) (bool, error) {
destStat, err := point.driver.Stat(point.path)
switch {
case errors.Is(err, os.ErrNotExist):
return false, nil
case err != nil:
return false, err
}
return destStat.IsDir(), nil
}
| thaJeztah | 2773f81aa5e9e34733675a7aa7e4219391caccb0 | 9e8cf1016ead200aea31d7ff381ba2ed01416ddb | 🤦 Ahhhhh.... not sure what I did here; looks like I was planning to construct the `copyInstruction` at the end, instead of contracting a partial one (in the `err` case), but forgot to either complete that, or to roll it back. | thaJeztah | 4,731 |
moby/moby | 42,472 | daemon: improve handling of ROOTLESSKIT_PARENT_EUID | - daemon.WithRootless(): make sure ROOTLESSKIT_PARENT_EUID is valid int
- daemon.RawSysInfo(): minor simplification, and rename variable that
clashed with imported package.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-05 19:14:04+00:00 | 2021-06-11 20:03:32+00:00 | daemon/daemon_unix.go | // +build linux freebsd
package daemon // import "github.com/docker/docker/daemon"
import (
"bufio"
"context"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
"github.com/containerd/cgroups"
statsV1 "github.com/containerd/cgroups/stats/v1"
statsV2 "github.com/containerd/cgroups/v2/stats"
"github.com/containerd/containerd/sys"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/blkiodev"
pblkiodev "github.com/docker/docker/api/types/blkiodev"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/initlayer"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork"
nwconfig "github.com/docker/docker/libnetwork/config"
"github.com/docker/docker/libnetwork/drivers/bridge"
"github.com/docker/docker/libnetwork/netlabel"
"github.com/docker/docker/libnetwork/netutils"
"github.com/docker/docker/libnetwork/options"
lntypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/runconfig"
volumemounts "github.com/docker/docker/volume/mounts"
"github.com/moby/sys/mount"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
const (
isWindows = false
// See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269
linuxMinCPUShares = 2
linuxMaxCPUShares = 262144
platformSupported = true
// It's not kernel limit, we want this 6M limit to account for overhead during startup, and to supply a reasonable functional container
linuxMinMemory = 6291456
// constants for remapped root settings
defaultIDSpecifier = "default"
defaultRemappedID = "dockremap"
// constant for cgroup drivers
cgroupFsDriver = "cgroupfs"
cgroupSystemdDriver = "systemd"
cgroupNoneDriver = "none"
)
type containerGetter interface {
GetContainer(string) (*container.Container, error)
}
func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory {
memory := specs.LinuxMemory{}
if config.Memory > 0 {
memory.Limit = &config.Memory
}
if config.MemoryReservation > 0 {
memory.Reservation = &config.MemoryReservation
}
if config.MemorySwap > 0 {
memory.Swap = &config.MemorySwap
}
if config.MemorySwappiness != nil {
swappiness := uint64(*config.MemorySwappiness)
memory.Swappiness = &swappiness
}
if config.OomKillDisable != nil {
memory.DisableOOMKiller = config.OomKillDisable
}
if config.KernelMemory != 0 {
memory.Kernel = &config.KernelMemory
}
if config.KernelMemoryTCP != 0 {
memory.KernelTCP = &config.KernelMemoryTCP
}
return &memory
}
func getPidsLimit(config containertypes.Resources) *specs.LinuxPids {
if config.PidsLimit == nil {
return nil
}
if *config.PidsLimit <= 0 {
// docker API allows 0 and negative values to unset this to be consistent
// with default values. When updating values, runc requires -1 to unset
// the previous limit.
return &specs.LinuxPids{Limit: -1}
}
return &specs.LinuxPids{Limit: *config.PidsLimit}
}
func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) {
cpu := specs.LinuxCPU{}
if config.CPUShares < 0 {
return nil, fmt.Errorf("shares: invalid argument")
}
if config.CPUShares >= 0 {
shares := uint64(config.CPUShares)
cpu.Shares = &shares
}
if config.CpusetCpus != "" {
cpu.Cpus = config.CpusetCpus
}
if config.CpusetMems != "" {
cpu.Mems = config.CpusetMems
}
if config.NanoCPUs > 0 {
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
period := uint64(100 * time.Millisecond / time.Microsecond)
quota := config.NanoCPUs * int64(period) / 1e9
cpu.Period = &period
cpu.Quota = "a
}
if config.CPUPeriod != 0 {
period := uint64(config.CPUPeriod)
cpu.Period = &period
}
if config.CPUQuota != 0 {
q := config.CPUQuota
cpu.Quota = &q
}
if config.CPURealtimePeriod != 0 {
period := uint64(config.CPURealtimePeriod)
cpu.RealtimePeriod = &period
}
if config.CPURealtimeRuntime != 0 {
c := config.CPURealtimeRuntime
cpu.RealtimeRuntime = &c
}
return &cpu, nil
}
func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) {
var stat unix.Stat_t
var blkioWeightDevices []specs.LinuxWeightDevice
for _, weightDevice := range config.BlkioWeightDevice {
if err := unix.Stat(weightDevice.Path, &stat); err != nil {
return nil, errors.WithStack(&os.PathError{Op: "stat", Path: weightDevice.Path, Err: err})
}
weight := weightDevice.Weight
d := specs.LinuxWeightDevice{Weight: &weight}
// The type is 32bit on mips.
d.Major = int64(unix.Major(uint64(stat.Rdev))) // nolint: unconvert
d.Minor = int64(unix.Minor(uint64(stat.Rdev))) // nolint: unconvert
blkioWeightDevices = append(blkioWeightDevices, d)
}
return blkioWeightDevices, nil
}
func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error {
container.NoNewPrivileges = daemon.configStore.NoNewPrivileges
return parseSecurityOpt(container, hostConfig)
}
func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error {
var (
labelOpts []string
err error
)
for _, opt := range config.SecurityOpt {
if opt == "no-new-privileges" {
container.NoNewPrivileges = true
continue
}
if opt == "disable" {
labelOpts = append(labelOpts, "disable")
continue
}
var con []string
if strings.Contains(opt, "=") {
con = strings.SplitN(opt, "=", 2)
} else if strings.Contains(opt, ":") {
con = strings.SplitN(opt, ":", 2)
logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.")
}
if len(con) != 2 {
return fmt.Errorf("invalid --security-opt 1: %q", opt)
}
switch con[0] {
case "label":
labelOpts = append(labelOpts, con[1])
case "apparmor":
container.AppArmorProfile = con[1]
case "seccomp":
container.SeccompProfile = con[1]
case "no-new-privileges":
noNewPrivileges, err := strconv.ParseBool(con[1])
if err != nil {
return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
container.NoNewPrivileges = noNewPrivileges
default:
return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
}
container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts)
return err
}
func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) {
var throttleDevices []specs.LinuxThrottleDevice
var stat unix.Stat_t
for _, d := range devs {
if err := unix.Stat(d.Path, &stat); err != nil {
return nil, errors.WithStack(&os.PathError{Op: "stat", Path: d.Path, Err: err})
}
d := specs.LinuxThrottleDevice{Rate: d.Rate}
// the type is 32bit on mips
d.Major = int64(unix.Major(uint64(stat.Rdev))) // nolint: unconvert
d.Minor = int64(unix.Minor(uint64(stat.Rdev))) // nolint: unconvert
throttleDevices = append(throttleDevices, d)
}
return throttleDevices, nil
}
// adjustParallelLimit takes a number of objects and a proposed limit and
// figures out if it's reasonable (and adjusts it accordingly). This is only
// used for daemon startup, which does a lot of parallel loading of containers
// (and if we exceed RLIMIT_NOFILE then we're in trouble).
func adjustParallelLimit(n int, limit int) int {
// Rule-of-thumb overhead factor (how many files will each goroutine open
// simultaneously). Yes, this is ugly but to be frank this whole thing is
// ugly.
const overhead = 2
// On Linux, we need to ensure that parallelStartupJobs doesn't cause us to
// exceed RLIMIT_NOFILE. If parallelStartupJobs is too large, we reduce it
// and give a warning (since in theory the user should increase their
// ulimits to the largest possible value for dockerd).
var rlim unix.Rlimit
if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil {
logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err)
return limit
}
softRlimit := int(rlim.Cur)
// Much fewer containers than RLIMIT_NOFILE. No need to adjust anything.
if softRlimit > overhead*n {
return limit
}
// RLIMIT_NOFILE big enough, no need to adjust anything.
if softRlimit > overhead*limit {
return limit
}
logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit)
return softRlimit / overhead
}
func checkKernel() error {
// Check for unsupported kernel versions
// FIXME: it would be cleaner to not test for specific versions, but rather
// test for specific functionalities.
// Unfortunately we can't test for the feature "does not cause a kernel panic"
// without actually causing a kernel panic, so we need this workaround until
// the circumstances of pre-3.10 crashes are clearer.
// For details see https://github.com/docker/docker/issues/407
// Docker 1.11 and above doesn't actually run on kernels older than 3.4,
// due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4).
if !kernel.CheckKernelVersion(3, 10, 0) {
v, _ := kernel.GetKernelVersion()
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String())
}
}
return nil
}
// adaptContainerSettings is called during container creation to modify any
// settings necessary in the HostConfig structure.
func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
if adjustCPUShares && hostConfig.CPUShares > 0 {
// Handle unsupported CPUShares
if hostConfig.CPUShares < linuxMinCPUShares {
logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares)
hostConfig.CPUShares = linuxMinCPUShares
} else if hostConfig.CPUShares > linuxMaxCPUShares {
logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares)
hostConfig.CPUShares = linuxMaxCPUShares
}
}
if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 {
// By default, MemorySwap is set to twice the size of Memory.
hostConfig.MemorySwap = hostConfig.Memory * 2
}
if hostConfig.ShmSize == 0 {
hostConfig.ShmSize = config.DefaultShmSize
if daemon.configStore != nil {
hostConfig.ShmSize = int64(daemon.configStore.ShmSize)
}
}
// Set default IPC mode, if unset for container
if hostConfig.IpcMode.IsEmpty() {
m := config.DefaultIpcMode
if daemon.configStore != nil {
m = daemon.configStore.IpcMode
}
hostConfig.IpcMode = containertypes.IpcMode(m)
}
// Set default cgroup namespace mode, if unset for container
if hostConfig.CgroupnsMode.IsEmpty() {
// for cgroup v2: unshare cgroupns even for privileged containers
// https://github.com/containers/libpod/pull/4374#issuecomment-549776387
if hostConfig.Privileged && cgroups.Mode() != cgroups.Unified {
hostConfig.CgroupnsMode = containertypes.CgroupnsMode("host")
} else {
m := "host"
if cgroups.Mode() == cgroups.Unified {
m = "private"
}
if daemon.configStore != nil {
m = daemon.configStore.CgroupNamespaceMode
}
hostConfig.CgroupnsMode = containertypes.CgroupnsMode(m)
}
}
adaptSharedNamespaceContainer(daemon, hostConfig)
var err error
secOpts, err := daemon.generateSecurityOpt(hostConfig)
if err != nil {
return err
}
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...)
if hostConfig.OomKillDisable == nil {
defaultOomKillDisable := false
hostConfig.OomKillDisable = &defaultOomKillDisable
}
return nil
}
// adaptSharedNamespaceContainer replaces container name with its ID in hostConfig.
// To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode
// and NetworkMode.
//
// When a container shares its namespace with another container, use ID can keep the namespace
// sharing connection between the two containers even the another container is renamed.
func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) {
containerPrefix := "container:"
if hostConfig.PidMode.IsContainer() {
pidContainer := hostConfig.PidMode.Container()
// if there is any error returned here, we just ignore it and leave it to be
// handled in the following logic
if c, err := daemon.GetContainer(pidContainer); err == nil {
hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID)
}
}
if hostConfig.IpcMode.IsContainer() {
ipcContainer := hostConfig.IpcMode.Container()
if c, err := daemon.GetContainer(ipcContainer); err == nil {
hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID)
}
}
if hostConfig.NetworkMode.IsContainer() {
netContainer := hostConfig.NetworkMode.ConnectedContainer()
if c, err := daemon.GetContainer(netContainer); err == nil {
hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID)
}
}
}
// verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration
func verifyPlatformContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) (warnings []string, err error) {
fixMemorySwappiness(resources)
// memory subsystem checks and adjustments
if resources.Memory != 0 && resources.Memory < linuxMinMemory {
return warnings, fmt.Errorf("Minimum memory limit allowed is 6MB")
}
if resources.Memory > 0 && !sysInfo.MemoryLimit {
warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.Memory = 0
resources.MemorySwap = -1
}
if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit {
warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.")
resources.MemorySwap = -1
}
if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory {
return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage")
}
if resources.Memory == 0 && resources.MemorySwap > 0 && !update {
return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage")
}
if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness {
warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.")
resources.MemorySwappiness = nil
}
if resources.MemorySwappiness != nil {
swappiness := *resources.MemorySwappiness
if swappiness < 0 || swappiness > 100 {
return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness)
}
}
if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation {
warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.MemoryReservation = 0
}
if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory {
return warnings, fmt.Errorf("Minimum memory reservation allowed is 6MB")
}
if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation {
return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage")
}
if resources.KernelMemory > 0 {
// Kernel memory limit is not supported on cgroup v2.
// Even on cgroup v1, kernel memory limit (`kmem.limit_in_bytes`) has been deprecated since kernel 5.4.
// https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0b5adf44cae99b3ebcc7
warnings = append(warnings, "Specifying a kernel memory limit is deprecated and will be removed in a future release.")
}
if resources.KernelMemory > 0 && !sysInfo.KernelMemory {
warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.KernelMemory = 0
}
if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory {
return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB")
}
if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) {
warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
}
if resources.OomKillDisable != nil && !sysInfo.OomKillDisable {
// only produce warnings if the setting wasn't to *disable* the OOM Kill; no point
// warning the caller if they already wanted the feature to be off
if *resources.OomKillDisable {
warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.")
}
resources.OomKillDisable = nil
}
if resources.OomKillDisable != nil && *resources.OomKillDisable && resources.Memory == 0 {
warnings = append(warnings, "OOM killer is disabled for the container, but no memory limit is set, this can result in the system running out of resources.")
}
if resources.PidsLimit != nil && !sysInfo.PidsLimit {
if *resources.PidsLimit > 0 {
warnings = append(warnings, "Your kernel does not support PIDs limit capabilities or the cgroup is not mounted. PIDs limit discarded.")
}
resources.PidsLimit = nil
}
// cpu subsystem checks and adjustments
if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 {
return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set")
}
if resources.NanoCPUs > 0 && resources.CPUQuota > 0 {
return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set")
}
if resources.NanoCPUs > 0 && !sysInfo.CPUCfs {
return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU CFS scheduler or the cgroup is not mounted")
}
// The highest precision we could get on Linux is 0.001, by setting
// cpu.cfs_period_us=1000ms
// cpu.cfs_quota=1ms
// See the following link for details:
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
// Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error.
// The error message is 0.01 so that this is consistent with Windows
if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 {
return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU())
}
if resources.CPUShares > 0 && !sysInfo.CPUShares {
warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.")
resources.CPUShares = 0
}
if (resources.CPUPeriod != 0 || resources.CPUQuota != 0) && !sysInfo.CPUCfs {
warnings = append(warnings, "Your kernel does not support CPU CFS scheduler. CPU period/quota discarded.")
resources.CPUPeriod = 0
resources.CPUQuota = 0
}
if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) {
return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)")
}
if resources.CPUQuota > 0 && resources.CPUQuota < 1000 {
return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)")
}
if resources.CPUPercent > 0 {
warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS))
resources.CPUPercent = 0
}
// cpuset subsystem checks and adjustments
if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset {
warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.")
resources.CpusetCpus = ""
resources.CpusetMems = ""
}
cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus)
if err != nil {
return warnings, errors.Wrapf(err, "Invalid value %s for cpuset cpus", resources.CpusetCpus)
}
if !cpusAvailable {
return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus)
}
memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems)
if err != nil {
return warnings, errors.Wrapf(err, "Invalid value %s for cpuset mems", resources.CpusetMems)
}
if !memsAvailable {
return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems)
}
// blkio subsystem checks and adjustments
if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight {
warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.")
resources.BlkioWeight = 0
}
if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) {
return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000")
}
if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 {
return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS)
}
if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice {
warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.")
resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{}
}
if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice {
warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.")
resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice {
warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.")
resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice {
warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.")
resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice {
warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.")
resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{}
}
return warnings, nil
}
func (daemon *Daemon) getCgroupDriver() string {
if UsingSystemd(daemon.configStore) {
return cgroupSystemdDriver
}
if daemon.Rootless() {
return cgroupNoneDriver
}
return cgroupFsDriver
}
// getCD gets the raw value of the native.cgroupdriver option, if set.
func getCD(config *config.Config) string {
for _, option := range config.ExecOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
continue
}
return val
}
return ""
}
// verifyCgroupDriver validates native.cgroupdriver
func verifyCgroupDriver(config *config.Config) error {
cd := getCD(config)
if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver {
return nil
}
if cd == cgroupNoneDriver {
return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd)
}
return fmt.Errorf("native.cgroupdriver option %s not supported", cd)
}
// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd
func UsingSystemd(config *config.Config) bool {
if getCD(config) == cgroupSystemdDriver {
return true
}
// On cgroup v2 hosts, default to systemd driver
if getCD(config) == "" && cgroups.Mode() == cgroups.Unified && isRunningSystemd() {
return true
}
return false
}
var (
runningSystemd bool
detectSystemd sync.Once
)
// isRunningSystemd checks whether the host was booted with systemd as its init
// system. This functions similarly to systemd's `sd_booted(3)`: internally, it
// checks whether /run/systemd/system/ exists and is a directory.
// http://www.freedesktop.org/software/systemd/man/sd_booted.html
//
// NOTE: This function comes from package github.com/coreos/go-systemd/util
// It was borrowed here to avoid a dependency on cgo.
func isRunningSystemd() bool {
detectSystemd.Do(func() {
fi, err := os.Lstat("/run/systemd/system")
if err != nil {
return
}
runningSystemd = fi.IsDir()
})
return runningSystemd
}
// verifyPlatformContainerSettings performs platform-specific validation of the
// hostconfig and config structures.
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
if hostConfig == nil {
return nil, nil
}
sysInfo := daemon.RawSysInfo(true)
w, err := verifyPlatformContainerResources(&hostConfig.Resources, sysInfo, update)
// no matter err is nil or not, w could have data in itself.
warnings = append(warnings, w...)
if err != nil {
return warnings, err
}
if hostConfig.ShmSize < 0 {
return warnings, fmt.Errorf("SHM size can not be less than 0")
}
if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 {
return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj)
}
// ip-forwarding does not affect container with '--net=host' (or '--net=none')
if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) {
warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.")
}
if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 {
warnings = append(warnings, "Published ports are discarded when using host network mode")
}
// check for various conflicting options with user namespaces
if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() {
if hostConfig.Privileged {
return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode")
}
if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() {
return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled")
}
if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() {
return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled")
}
}
if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) {
// CgroupParent for systemd cgroup should be named as "xxx.slice"
if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") {
return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
}
}
if hostConfig.Runtime == "" {
hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
}
if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil {
return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime)
}
parser := volumemounts.NewParser(runtime.GOOS)
for dest := range hostConfig.Tmpfs {
if err := parser.ValidateTmpfsMountDestination(dest); err != nil {
return warnings, err
}
}
if !hostConfig.CgroupnsMode.Valid() {
return warnings, fmt.Errorf("invalid cgroup namespace mode: %v", hostConfig.CgroupnsMode)
}
if hostConfig.CgroupnsMode.IsPrivate() {
if !sysInfo.CgroupNamespaces {
warnings = append(warnings, "Your kernel does not support cgroup namespaces. Cgroup namespace setting discarded.")
}
}
if hostConfig.Runtime == config.LinuxV1RuntimeName || (hostConfig.Runtime == "" && daemon.configStore.DefaultRuntime == config.LinuxV1RuntimeName) {
warnings = append(warnings, fmt.Sprintf("Configured runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName))
}
return warnings, nil
}
// verifyDaemonSettings performs validation of daemon config struct
func verifyDaemonSettings(conf *config.Config) error {
if conf.ContainerdNamespace == conf.ContainerdPluginNamespace {
return errors.New("containers namespace and plugins namespace cannot be the same")
}
// Check for mutually incompatible config options
if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" {
return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one")
}
if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication {
return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true")
}
if conf.BridgeConfig.EnableIP6Tables && !conf.Experimental {
return fmt.Errorf("ip6tables rules are only available if experimental features are enabled")
}
if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq {
conf.BridgeConfig.EnableIPMasq = false
}
if err := verifyCgroupDriver(conf); err != nil {
return err
}
if conf.CgroupParent != "" && UsingSystemd(conf) {
if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") {
return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
}
}
if conf.Rootless && UsingSystemd(conf) && cgroups.Mode() != cgroups.Unified {
return fmt.Errorf("exec-opt native.cgroupdriver=systemd requires cgroup v2 for rootless mode")
}
configureRuntimes(conf)
if rtName := conf.GetDefaultRuntimeName(); rtName != "" {
if conf.GetRuntime(rtName) == nil {
return fmt.Errorf("specified default runtime '%s' does not exist", rtName)
}
if rtName == config.LinuxV1RuntimeName {
logrus.Warnf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName)
}
}
return nil
}
// checkSystem validates platform-specific requirements
func checkSystem() error {
return checkKernel()
}
// configureMaxThreads sets the Go runtime max threads threshold
// which is 90% of the kernel setting from /proc/sys/kernel/threads-max
func configureMaxThreads(config *config.Config) error {
mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max")
if err != nil {
return err
}
mtint, err := strconv.Atoi(strings.TrimSpace(string(mt)))
if err != nil {
return err
}
maxThreads := (mtint / 100) * 90
debug.SetMaxThreads(maxThreads)
logrus.Debugf("Golang's threads limit set to %d", maxThreads)
return nil
}
func overlaySupportsSelinux() (bool, error) {
f, err := os.Open("/proc/kallsyms")
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if strings.HasSuffix(s.Text(), " security_inode_copy_up") {
return true, nil
}
}
return false, s.Err()
}
// configureKernelSecuritySupport configures and validates security support for the kernel
func configureKernelSecuritySupport(config *config.Config, driverName string) error {
if config.EnableSelinuxSupport {
if !selinux.GetEnabled() {
logrus.Warn("Docker could not enable SELinux on the host system")
return nil
}
if driverName == "overlay" || driverName == "overlay2" {
// If driver is overlay or overlay2, make sure kernel
// supports selinux with overlay.
supported, err := overlaySupportsSelinux()
if err != nil {
return err
}
if !supported {
logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName)
}
}
} else {
selinux.SetDisabled()
}
return nil
}
func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) {
netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes)
if err != nil {
return nil, err
}
controller, err := libnetwork.New(netOptions...)
if err != nil {
return nil, fmt.Errorf("error obtaining controller instance: %v", err)
}
if len(activeSandboxes) > 0 {
logrus.Info("There are old running containers, the network config will not take affect")
return controller, nil
}
// Initialize default network on "null"
if n, _ := controller.NetworkByName("none"); n == nil {
if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil {
return nil, fmt.Errorf("Error creating default \"null\" network: %v", err)
}
}
// Initialize default network on "host"
if n, _ := controller.NetworkByName("host"); n == nil {
if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil {
return nil, fmt.Errorf("Error creating default \"host\" network: %v", err)
}
}
// Clear stale bridge network
if n, err := controller.NetworkByName("bridge"); err == nil {
if err = n.Delete(); err != nil {
return nil, fmt.Errorf("could not delete the default bridge network: %v", err)
}
if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled {
removeDefaultBridgeInterface()
}
}
if !config.DisableBridge {
// Initialize default driver "bridge"
if err := initBridgeDriver(controller, config); err != nil {
return nil, err
}
} else {
removeDefaultBridgeInterface()
}
// Set HostGatewayIP to the default bridge's IP if it is empty
if daemon.configStore.HostGatewayIP == nil && controller != nil {
if n, err := controller.NetworkByName("bridge"); err == nil {
v4Info, v6Info := n.Info().IpamInfo()
var gateway net.IP
if len(v4Info) > 0 {
gateway = v4Info[0].Gateway.IP
} else if len(v6Info) > 0 {
gateway = v6Info[0].Gateway.IP
}
daemon.configStore.HostGatewayIP = gateway
}
}
return controller, nil
}
func driverOptions(config *config.Config) []nwconfig.Option {
bridgeConfig := options.Generic{
"EnableIPForwarding": config.BridgeConfig.EnableIPForward,
"EnableIPTables": config.BridgeConfig.EnableIPTables,
"EnableIP6Tables": config.BridgeConfig.EnableIP6Tables,
"EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy,
"UserlandProxyPath": config.BridgeConfig.UserlandProxyPath}
bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig}
dOptions := []nwconfig.Option{}
dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption))
return dOptions
}
func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error {
bridgeName := bridge.DefaultBridgeName
if config.BridgeConfig.Iface != "" {
bridgeName = config.BridgeConfig.Iface
}
netOption := map[string]string{
bridge.BridgeName: bridgeName,
bridge.DefaultBridge: strconv.FormatBool(true),
netlabel.DriverMTU: strconv.Itoa(config.Mtu),
bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq),
bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication),
}
// --ip processing
if config.BridgeConfig.DefaultIP != nil {
netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String()
}
ipamV4Conf := &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName)
if err != nil {
return errors.Wrap(err, "list bridge addresses failed")
}
nw := nwList[0]
if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
if err != nil {
return errors.Wrap(err, "parse CIDR failed")
}
// Iterate through in case there are multiple addresses for the bridge
for _, entry := range nwList {
if fCIDR.Contains(entry.IP) {
nw = entry
break
}
}
}
ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String()
hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask)
if hip.IsGlobalUnicast() {
ipamV4Conf.Gateway = nw.IP.String()
}
if config.BridgeConfig.IP != "" {
ip, ipNet, err := net.ParseCIDR(config.BridgeConfig.IP)
if err != nil {
return err
}
ipamV4Conf.PreferredPool = ipNet.String()
ipamV4Conf.Gateway = ip.String()
} else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" {
logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool)
}
if config.BridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
if err != nil {
return err
}
ipamV4Conf.SubPool = fCIDR.String()
}
if config.BridgeConfig.DefaultGatewayIPv4 != nil {
ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String()
}
var (
deferIPv6Alloc bool
ipamV6Conf *libnetwork.IpamConf
)
if config.BridgeConfig.EnableIPv6 && config.BridgeConfig.FixedCIDRv6 == "" {
return errdefs.InvalidParameter(errors.New("IPv6 is enabled for the default bridge, but no subnet is configured. Specify an IPv6 subnet using --fixed-cidr-v6"))
} else if config.BridgeConfig.FixedCIDRv6 != "" {
_, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6)
if err != nil {
return err
}
// In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has
// at least 48 host bits, we need to guarantee the current behavior where the containers'
// IPv6 addresses will be constructed based on the containers' interface MAC address.
// We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints
// on this network until after the driver has created the endpoint and returned the
// constructed address. Libnetwork will then reserve this address with the ipam driver.
ones, _ := fCIDRv6.Mask.Size()
deferIPv6Alloc = ones <= 80
ipamV6Conf = &libnetwork.IpamConf{
AuxAddresses: make(map[string]string),
PreferredPool: fCIDRv6.String(),
}
// In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6
// address belongs to the same network, we need to inform libnetwork about it, so
// that it can be reserved with IPAM and it will not be given away to somebody else
for _, nw6 := range nw6List {
if fCIDRv6.Contains(nw6.IP) {
ipamV6Conf.Gateway = nw6.IP.String()
break
}
}
}
if config.BridgeConfig.DefaultGatewayIPv6 != nil {
if ipamV6Conf == nil {
ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
}
ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String()
}
v4Conf := []*libnetwork.IpamConf{ipamV4Conf}
v6Conf := []*libnetwork.IpamConf{}
if ipamV6Conf != nil {
v6Conf = append(v6Conf, ipamV6Conf)
}
// Initialize default network on "bridge" with the same name
_, err = controller.NewNetwork("bridge", "bridge", "",
libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6),
libnetwork.NetworkOptionDriverOpts(netOption),
libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil),
libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc))
if err != nil {
return fmt.Errorf("Error creating default \"bridge\" network: %v", err)
}
return nil
}
// Remove default bridge interface if present (--bridge=none use case)
func removeDefaultBridgeInterface() {
if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil {
if err := netlink.LinkDel(lnk); err != nil {
logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err)
}
}
}
func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error {
return func(initPath containerfs.ContainerFS) error {
return initlayer.Setup(initPath, idMapping.RootPair())
}
}
// Parse the remapped root (user namespace) option, which can be one of:
// username - valid username from /etc/passwd
// username:groupname - valid username; valid groupname from /etc/group
// uid - 32-bit unsigned int valid Linux UID value
// uid:gid - uid value; 32-bit unsigned int Linux GID value
//
// If no groupname is specified, and a username is specified, an attempt
// will be made to lookup a gid for that username as a groupname
//
// If names are used, they are verified to exist in passwd/group
func parseRemappedRoot(usergrp string) (string, string, error) {
var (
userID, groupID int
username, groupname string
)
idparts := strings.Split(usergrp, ":")
if len(idparts) > 2 {
return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp)
}
if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil {
// must be a uid; take it as valid
userID = int(uid)
luser, err := idtools.LookupUID(userID)
if err != nil {
return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err)
}
username = luser.Name
if len(idparts) == 1 {
// if the uid was numeric and no gid was specified, take the uid as the gid
groupID = userID
lgrp, err := idtools.LookupGID(groupID)
if err != nil {
return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err)
}
groupname = lgrp.Name
}
} else {
lookupName := idparts[0]
// special case: if the user specified "default", they want Docker to create or
// use (after creation) the "dockremap" user/group for root remapping
if lookupName == defaultIDSpecifier {
lookupName = defaultRemappedID
}
luser, err := idtools.LookupUser(lookupName)
if err != nil && idparts[0] != defaultIDSpecifier {
// error if the name requested isn't the special "dockremap" ID
return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err)
} else if err != nil {
// special case-- if the username == "default", then we have been asked
// to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid}
// ranges will be used for the user and group mappings in user namespaced containers
_, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID)
if err == nil {
return defaultRemappedID, defaultRemappedID, nil
}
return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err)
}
username = luser.Name
if len(idparts) == 1 {
// we only have a string username, and no group specified; look up gid from username as group
group, err := idtools.LookupGroup(lookupName)
if err != nil {
return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err)
}
groupname = group.Name
}
}
if len(idparts) == 2 {
// groupname or gid is separately specified and must be resolved
// to an unsigned 32-bit gid
if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil {
// must be a gid, take it as valid
groupID = int(gid)
lgrp, err := idtools.LookupGID(groupID)
if err != nil {
return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err)
}
groupname = lgrp.Name
} else {
// not a number; attempt a lookup
if _, err := idtools.LookupGroup(idparts[1]); err != nil {
return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err)
}
groupname = idparts[1]
}
}
return username, groupname, nil
}
func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) {
if runtime.GOOS != "linux" && config.RemappedRoot != "" {
return nil, fmt.Errorf("User namespaces are only supported on Linux")
}
// if the daemon was started with remapped root option, parse
// the config option to the int uid,gid values
if config.RemappedRoot != "" {
username, groupname, err := parseRemappedRoot(config.RemappedRoot)
if err != nil {
return nil, err
}
if username == "root" {
// Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op
// effectively
logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF")
return &idtools.IdentityMapping{}, nil
}
logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username)
// update remapped root setting now that we have resolved them to actual names
config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname)
mappings, err := idtools.NewIdentityMapping(username)
if err != nil {
return nil, errors.Wrap(err, "Can't create ID mappings")
}
return mappings, nil
}
return &idtools.IdentityMapping{}, nil
}
func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools.Identity) error {
config.Root = rootDir
// the docker root metadata directory needs to have execute permissions for all users (g+x,o+x)
// so that syscalls executing as non-root, operating on subdirectories of the graph root
// (e.g. mounted layers of a container) can traverse this path.
// The user namespace support will create subdirectories for the remapped root host uid:gid
// pair owned by that same uid:gid pair for proper write access to those needed metadata and
// layer content subtrees.
if _, err := os.Stat(rootDir); err == nil {
// root current exists; verify the access bits are correct by setting them
if err = os.Chmod(rootDir, 0711); err != nil {
return err
}
} else if os.IsNotExist(err) {
// no root exists yet, create it 0711 with root:root ownership
if err := os.MkdirAll(rootDir, 0711); err != nil {
return err
}
}
// if user namespaces are enabled we will create a subtree underneath the specified root
// with any/all specified remapped root uid/gid options on the daemon creating
// a new subdirectory with ownership set to the remapped uid/gid (so as to allow
// `chdir()` to work for containers namespaced to that uid/gid)
if config.RemappedRoot != "" {
id := idtools.CurrentIdentity()
// First make sure the current root dir has the correct perms.
if err := idtools.MkdirAllAndChown(config.Root, 0701, id); err != nil {
return errors.Wrapf(err, "could not create or set daemon root permissions: %s", config.Root)
}
config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", remappedRoot.UID, remappedRoot.GID))
logrus.Debugf("Creating user namespaced daemon root: %s", config.Root)
// Create the root directory if it doesn't exist
if err := idtools.MkdirAllAndChown(config.Root, 0701, id); err != nil {
return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err)
}
// we also need to verify that any pre-existing directories in the path to
// the graphroot won't block access to remapped root--if any pre-existing directory
// has strict permissions that don't allow "x", container start will fail, so
// better to warn and fail now
dirPath := config.Root
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if !idtools.CanAccess(dirPath, remappedRoot) {
return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root)
}
}
}
if err := setupDaemonRootPropagation(config); err != nil {
logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior")
}
return nil
}
func setupDaemonRootPropagation(cfg *config.Config) error {
rootParentMount, mountOptions, err := getSourceMount(cfg.Root)
if err != nil {
return errors.Wrap(err, "error getting daemon root's parent mount")
}
var cleanupOldFile bool
cleanupFile := getUnmountOnShutdownPath(cfg)
defer func() {
if !cleanupOldFile {
return
}
if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) {
logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file")
}
}()
if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) {
cleanupOldFile = true
return nil
}
if err := mount.MakeShared(cfg.Root); err != nil {
return errors.Wrap(err, "could not setup daemon root propagation to shared")
}
// check the case where this may have already been a mount to itself.
// If so then the daemon only performed a remount and should not try to unmount this later.
if rootParentMount == cfg.Root {
cleanupOldFile = true
return nil
}
if err := os.MkdirAll(filepath.Dir(cleanupFile), 0700); err != nil {
return errors.Wrap(err, "error creating dir to store mount cleanup file")
}
if err := ioutil.WriteFile(cleanupFile, nil, 0600); err != nil {
return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown")
}
return nil
}
// getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown
// the daemon root should be unmounted.
func getUnmountOnShutdownPath(config *config.Config) string {
return filepath.Join(config.ExecRoot, "unmount-on-shutdown")
}
// registerLinks writes the links to a file.
func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error {
if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() {
return nil
}
for _, l := range hostConfig.Links {
name, alias, err := opts.ParseLink(l)
if err != nil {
return err
}
child, err := daemon.GetContainer(name)
if err != nil {
if errdefs.IsNotFound(err) {
// Trying to link to a non-existing container is not valid, and
// should return an "invalid parameter" error. Returning a "not
// found" error here would make the client report the container's
// image could not be found (see moby/moby#39823)
err = errdefs.InvalidParameter(err)
}
return errors.Wrapf(err, "could not get container for %s", name)
}
for child.HostConfig.NetworkMode.IsContainer() {
parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2)
child, err = daemon.GetContainer(parts[1])
if err != nil {
if errdefs.IsNotFound(err) {
// Trying to link to a non-existing container is not valid, and
// should return an "invalid parameter" error. Returning a "not
// found" error here would make the client report the container's
// image could not be found (see moby/moby#39823)
err = errdefs.InvalidParameter(err)
}
return errors.Wrapf(err, "Could not get container for %s", parts[1])
}
}
if child.HostConfig.NetworkMode.IsHost() {
return runconfig.ErrConflictHostNetworkAndLinks
}
if err := daemon.registerLink(container, child, alias); err != nil {
return err
}
}
// After we load all the links into the daemon
// set them to nil on the hostconfig
_, err := container.WriteHostConfig()
return err
}
// conditionalMountOnStart is a platform specific helper function during the
// container start to call mount.
func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
return daemon.Mount(container)
}
// conditionalUnmountOnCleanup is a platform specific helper function called
// during the cleanup of a container to unmount.
func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error {
return daemon.Unmount(container)
}
func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry {
out := make([]types.BlkioStatEntry, len(entries))
for i, re := range entries {
out[i] = types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: re.Op,
Value: re.Value,
}
}
return out
}
func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
if !c.IsRunning() {
return nil, errNotRunning(c.ID)
}
cs, err := daemon.containerd.Stats(context.Background(), c.ID)
if err != nil {
if strings.Contains(err.Error(), "container not found") {
return nil, containerNotFound(c.ID)
}
return nil, err
}
s := &types.StatsJSON{}
s.Read = cs.Read
stats := cs.Metrics
switch t := stats.(type) {
case *statsV1.Metrics:
return daemon.statsV1(s, t)
case *statsV2.Metrics:
return daemon.statsV2(s, t)
default:
return nil, errors.Errorf("unexpected type of metrics %+v", t)
}
}
func (daemon *Daemon) statsV1(s *types.StatsJSON, stats *statsV1.Metrics) (*types.StatsJSON, error) {
if stats.Blkio != nil {
s.BlkioStats = types.BlkioStats{
IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive),
IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive),
IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive),
IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive),
IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive),
IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive),
IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive),
SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive),
}
}
if stats.CPU != nil {
s.CPUStats = types.CPUStats{
CPUUsage: types.CPUUsage{
TotalUsage: stats.CPU.Usage.Total,
PercpuUsage: stats.CPU.Usage.PerCPU,
UsageInKernelmode: stats.CPU.Usage.Kernel,
UsageInUsermode: stats.CPU.Usage.User,
},
ThrottlingData: types.ThrottlingData{
Periods: stats.CPU.Throttling.Periods,
ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods,
ThrottledTime: stats.CPU.Throttling.ThrottledTime,
},
}
}
if stats.Memory != nil {
raw := make(map[string]uint64)
raw["cache"] = stats.Memory.Cache
raw["rss"] = stats.Memory.RSS
raw["rss_huge"] = stats.Memory.RSSHuge
raw["mapped_file"] = stats.Memory.MappedFile
raw["dirty"] = stats.Memory.Dirty
raw["writeback"] = stats.Memory.Writeback
raw["pgpgin"] = stats.Memory.PgPgIn
raw["pgpgout"] = stats.Memory.PgPgOut
raw["pgfault"] = stats.Memory.PgFault
raw["pgmajfault"] = stats.Memory.PgMajFault
raw["inactive_anon"] = stats.Memory.InactiveAnon
raw["active_anon"] = stats.Memory.ActiveAnon
raw["inactive_file"] = stats.Memory.InactiveFile
raw["active_file"] = stats.Memory.ActiveFile
raw["unevictable"] = stats.Memory.Unevictable
raw["hierarchical_memory_limit"] = stats.Memory.HierarchicalMemoryLimit
raw["hierarchical_memsw_limit"] = stats.Memory.HierarchicalSwapLimit
raw["total_cache"] = stats.Memory.TotalCache
raw["total_rss"] = stats.Memory.TotalRSS
raw["total_rss_huge"] = stats.Memory.TotalRSSHuge
raw["total_mapped_file"] = stats.Memory.TotalMappedFile
raw["total_dirty"] = stats.Memory.TotalDirty
raw["total_writeback"] = stats.Memory.TotalWriteback
raw["total_pgpgin"] = stats.Memory.TotalPgPgIn
raw["total_pgpgout"] = stats.Memory.TotalPgPgOut
raw["total_pgfault"] = stats.Memory.TotalPgFault
raw["total_pgmajfault"] = stats.Memory.TotalPgMajFault
raw["total_inactive_anon"] = stats.Memory.TotalInactiveAnon
raw["total_active_anon"] = stats.Memory.TotalActiveAnon
raw["total_inactive_file"] = stats.Memory.TotalInactiveFile
raw["total_active_file"] = stats.Memory.TotalActiveFile
raw["total_unevictable"] = stats.Memory.TotalUnevictable
if stats.Memory.Usage != nil {
s.MemoryStats = types.MemoryStats{
Stats: raw,
Usage: stats.Memory.Usage.Usage,
MaxUsage: stats.Memory.Usage.Max,
Limit: stats.Memory.Usage.Limit,
Failcnt: stats.Memory.Usage.Failcnt,
}
} else {
s.MemoryStats = types.MemoryStats{
Stats: raw,
}
}
// if the container does not set memory limit, use the machineMemory
if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 {
s.MemoryStats.Limit = daemon.machineMemory
}
}
if stats.Pids != nil {
s.PidsStats = types.PidsStats{
Current: stats.Pids.Current,
Limit: stats.Pids.Limit,
}
}
return s, nil
}
func (daemon *Daemon) statsV2(s *types.StatsJSON, stats *statsV2.Metrics) (*types.StatsJSON, error) {
if stats.Io != nil {
var isbr []types.BlkioStatEntry
for _, re := range stats.Io.Usage {
isbr = append(isbr,
types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: "read",
Value: re.Rbytes,
},
types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: "write",
Value: re.Wbytes,
},
)
}
s.BlkioStats = types.BlkioStats{
IoServiceBytesRecursive: isbr,
// Other fields are unsupported
}
}
if stats.CPU != nil {
s.CPUStats = types.CPUStats{
CPUUsage: types.CPUUsage{
TotalUsage: stats.CPU.UsageUsec * 1000,
// PercpuUsage is not supported
UsageInKernelmode: stats.CPU.SystemUsec * 1000,
UsageInUsermode: stats.CPU.UserUsec * 1000,
},
ThrottlingData: types.ThrottlingData{
Periods: stats.CPU.NrPeriods,
ThrottledPeriods: stats.CPU.NrThrottled,
ThrottledTime: stats.CPU.ThrottledUsec * 1000,
},
}
}
if stats.Memory != nil {
raw := make(map[string]uint64)
raw["anon"] = stats.Memory.Anon
raw["file"] = stats.Memory.File
raw["kernel_stack"] = stats.Memory.KernelStack
raw["slab"] = stats.Memory.Slab
raw["sock"] = stats.Memory.Sock
raw["shmem"] = stats.Memory.Shmem
raw["file_mapped"] = stats.Memory.FileMapped
raw["file_dirty"] = stats.Memory.FileDirty
raw["file_writeback"] = stats.Memory.FileWriteback
raw["anon_thp"] = stats.Memory.AnonThp
raw["inactive_anon"] = stats.Memory.InactiveAnon
raw["active_anon"] = stats.Memory.ActiveAnon
raw["inactive_file"] = stats.Memory.InactiveFile
raw["active_file"] = stats.Memory.ActiveFile
raw["unevictable"] = stats.Memory.Unevictable
raw["slab_reclaimable"] = stats.Memory.SlabReclaimable
raw["slab_unreclaimable"] = stats.Memory.SlabUnreclaimable
raw["pgfault"] = stats.Memory.Pgfault
raw["pgmajfault"] = stats.Memory.Pgmajfault
raw["workingset_refault"] = stats.Memory.WorkingsetRefault
raw["workingset_activate"] = stats.Memory.WorkingsetActivate
raw["workingset_nodereclaim"] = stats.Memory.WorkingsetNodereclaim
raw["pgrefill"] = stats.Memory.Pgrefill
raw["pgscan"] = stats.Memory.Pgscan
raw["pgsteal"] = stats.Memory.Pgsteal
raw["pgactivate"] = stats.Memory.Pgactivate
raw["pgdeactivate"] = stats.Memory.Pgdeactivate
raw["pglazyfree"] = stats.Memory.Pglazyfree
raw["pglazyfreed"] = stats.Memory.Pglazyfreed
raw["thp_fault_alloc"] = stats.Memory.ThpFaultAlloc
raw["thp_collapse_alloc"] = stats.Memory.ThpCollapseAlloc
s.MemoryStats = types.MemoryStats{
// Stats is not compatible with v1
Stats: raw,
Usage: stats.Memory.Usage,
// MaxUsage is not supported
Limit: stats.Memory.UsageLimit,
}
// if the container does not set memory limit, use the machineMemory
if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 {
s.MemoryStats.Limit = daemon.machineMemory
}
if stats.MemoryEvents != nil {
// Failcnt is set to the "oom" field of the "memory.events" file.
// See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
s.MemoryStats.Failcnt = stats.MemoryEvents.Oom
}
}
if stats.Pids != nil {
s.PidsStats = types.PidsStats{
Current: stats.Pids.Current,
Limit: stats.Pids.Limit,
}
}
return s, nil
}
// setDefaultIsolation determines the default isolation mode for the
// daemon to run in. This is only applicable on Windows
func (daemon *Daemon) setDefaultIsolation() error {
return nil
}
// setupDaemonProcess sets various settings for the daemon's process
func setupDaemonProcess(config *config.Config) error {
// setup the daemons oom_score_adj
if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil {
return err
}
if err := setMayDetachMounts(); err != nil {
logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter")
}
return nil
}
// This is used to allow removal of mountpoints that may be mounted in other
// namespaces on RHEL based kernels starting from RHEL 7.4.
// Without this setting, removals on these RHEL based kernels may fail with
// "device or resource busy".
// This setting is not available in upstream kernels as it is not configurable,
// but has been in the upstream kernels since 3.15.
func setMayDetachMounts() error {
f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.Wrap(err, "error opening may_detach_mounts kernel config file")
}
defer f.Close()
_, err = f.WriteString("1")
if os.IsPermission(err) {
// Setting may_detach_mounts does not work in an
// unprivileged container. Ignore the error, but log
// it if we appear not to be in that situation.
if !sys.RunningInUserNS() {
logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1")
}
return nil
}
return err
}
func setupOOMScoreAdj(score int) error {
if score == 0 {
return nil
}
f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0)
if err != nil {
return err
}
defer f.Close()
stringScore := strconv.Itoa(score)
_, err = f.WriteString(stringScore)
if os.IsPermission(err) {
// Setting oom_score_adj does not work in an
// unprivileged container. Ignore the error, but log
// it if we appear not to be in that situation.
if !sys.RunningInUserNS() {
logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore)
}
return nil
}
return err
}
func (daemon *Daemon) initCPURtController(mnt, path string) error {
if path == "/" || path == "." {
return nil
}
// Recursively create cgroup to ensure that the system and all parent cgroups have values set
// for the period and runtime as this limits what the children can be set to.
if err := daemon.initCPURtController(mnt, filepath.Dir(path)); err != nil {
return err
}
path = filepath.Join(mnt, path)
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
if err := maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
return err
}
return maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
}
func maybeCreateCPURealTimeFile(configValue int64, file string, path string) error {
if configValue == 0 {
return nil
}
return ioutil.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700)
}
func (daemon *Daemon) setupSeccompProfile() error {
if daemon.configStore.SeccompProfile != "" {
daemon.seccompProfilePath = daemon.configStore.SeccompProfile
b, err := ioutil.ReadFile(daemon.configStore.SeccompProfile)
if err != nil {
return fmt.Errorf("opening seccomp profile (%s) failed: %v", daemon.configStore.SeccompProfile, err)
}
daemon.seccompProfile = b
}
return nil
}
// RawSysInfo returns *sysinfo.SysInfo .
func (daemon *Daemon) RawSysInfo(quiet bool) *sysinfo.SysInfo {
var opts []sysinfo.Opt
if daemon.getCgroupDriver() == cgroupSystemdDriver {
rootlesskitParentEUID := os.Getenv("ROOTLESSKIT_PARENT_EUID")
if rootlesskitParentEUID != "" {
groupPath := fmt.Sprintf("/user.slice/user-%s.slice", rootlesskitParentEUID)
opts = append(opts, sysinfo.WithCgroup2GroupPath(groupPath))
}
}
return sysinfo.New(quiet, opts...)
}
func recursiveUnmount(target string) error {
return mount.RecursiveUnmount(target)
}
| // +build linux freebsd
package daemon // import "github.com/docker/docker/daemon"
import (
"bufio"
"context"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"sync"
"time"
"github.com/containerd/cgroups"
statsV1 "github.com/containerd/cgroups/stats/v1"
statsV2 "github.com/containerd/cgroups/v2/stats"
"github.com/containerd/containerd/sys"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/blkiodev"
pblkiodev "github.com/docker/docker/api/types/blkiodev"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/initlayer"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork"
nwconfig "github.com/docker/docker/libnetwork/config"
"github.com/docker/docker/libnetwork/drivers/bridge"
"github.com/docker/docker/libnetwork/netlabel"
"github.com/docker/docker/libnetwork/netutils"
"github.com/docker/docker/libnetwork/options"
lntypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/runconfig"
volumemounts "github.com/docker/docker/volume/mounts"
"github.com/moby/sys/mount"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
const (
isWindows = false
// See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269
linuxMinCPUShares = 2
linuxMaxCPUShares = 262144
platformSupported = true
// It's not kernel limit, we want this 6M limit to account for overhead during startup, and to supply a reasonable functional container
linuxMinMemory = 6291456
// constants for remapped root settings
defaultIDSpecifier = "default"
defaultRemappedID = "dockremap"
// constant for cgroup drivers
cgroupFsDriver = "cgroupfs"
cgroupSystemdDriver = "systemd"
cgroupNoneDriver = "none"
)
type containerGetter interface {
GetContainer(string) (*container.Container, error)
}
func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory {
memory := specs.LinuxMemory{}
if config.Memory > 0 {
memory.Limit = &config.Memory
}
if config.MemoryReservation > 0 {
memory.Reservation = &config.MemoryReservation
}
if config.MemorySwap > 0 {
memory.Swap = &config.MemorySwap
}
if config.MemorySwappiness != nil {
swappiness := uint64(*config.MemorySwappiness)
memory.Swappiness = &swappiness
}
if config.OomKillDisable != nil {
memory.DisableOOMKiller = config.OomKillDisable
}
if config.KernelMemory != 0 {
memory.Kernel = &config.KernelMemory
}
if config.KernelMemoryTCP != 0 {
memory.KernelTCP = &config.KernelMemoryTCP
}
return &memory
}
func getPidsLimit(config containertypes.Resources) *specs.LinuxPids {
if config.PidsLimit == nil {
return nil
}
if *config.PidsLimit <= 0 {
// docker API allows 0 and negative values to unset this to be consistent
// with default values. When updating values, runc requires -1 to unset
// the previous limit.
return &specs.LinuxPids{Limit: -1}
}
return &specs.LinuxPids{Limit: *config.PidsLimit}
}
func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) {
cpu := specs.LinuxCPU{}
if config.CPUShares < 0 {
return nil, fmt.Errorf("shares: invalid argument")
}
if config.CPUShares >= 0 {
shares := uint64(config.CPUShares)
cpu.Shares = &shares
}
if config.CpusetCpus != "" {
cpu.Cpus = config.CpusetCpus
}
if config.CpusetMems != "" {
cpu.Mems = config.CpusetMems
}
if config.NanoCPUs > 0 {
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
period := uint64(100 * time.Millisecond / time.Microsecond)
quota := config.NanoCPUs * int64(period) / 1e9
cpu.Period = &period
cpu.Quota = "a
}
if config.CPUPeriod != 0 {
period := uint64(config.CPUPeriod)
cpu.Period = &period
}
if config.CPUQuota != 0 {
q := config.CPUQuota
cpu.Quota = &q
}
if config.CPURealtimePeriod != 0 {
period := uint64(config.CPURealtimePeriod)
cpu.RealtimePeriod = &period
}
if config.CPURealtimeRuntime != 0 {
c := config.CPURealtimeRuntime
cpu.RealtimeRuntime = &c
}
return &cpu, nil
}
func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) {
var stat unix.Stat_t
var blkioWeightDevices []specs.LinuxWeightDevice
for _, weightDevice := range config.BlkioWeightDevice {
if err := unix.Stat(weightDevice.Path, &stat); err != nil {
return nil, errors.WithStack(&os.PathError{Op: "stat", Path: weightDevice.Path, Err: err})
}
weight := weightDevice.Weight
d := specs.LinuxWeightDevice{Weight: &weight}
// The type is 32bit on mips.
d.Major = int64(unix.Major(uint64(stat.Rdev))) // nolint: unconvert
d.Minor = int64(unix.Minor(uint64(stat.Rdev))) // nolint: unconvert
blkioWeightDevices = append(blkioWeightDevices, d)
}
return blkioWeightDevices, nil
}
func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error {
container.NoNewPrivileges = daemon.configStore.NoNewPrivileges
return parseSecurityOpt(container, hostConfig)
}
func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error {
var (
labelOpts []string
err error
)
for _, opt := range config.SecurityOpt {
if opt == "no-new-privileges" {
container.NoNewPrivileges = true
continue
}
if opt == "disable" {
labelOpts = append(labelOpts, "disable")
continue
}
var con []string
if strings.Contains(opt, "=") {
con = strings.SplitN(opt, "=", 2)
} else if strings.Contains(opt, ":") {
con = strings.SplitN(opt, ":", 2)
logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.")
}
if len(con) != 2 {
return fmt.Errorf("invalid --security-opt 1: %q", opt)
}
switch con[0] {
case "label":
labelOpts = append(labelOpts, con[1])
case "apparmor":
container.AppArmorProfile = con[1]
case "seccomp":
container.SeccompProfile = con[1]
case "no-new-privileges":
noNewPrivileges, err := strconv.ParseBool(con[1])
if err != nil {
return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
container.NoNewPrivileges = noNewPrivileges
default:
return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
}
container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts)
return err
}
func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) {
var throttleDevices []specs.LinuxThrottleDevice
var stat unix.Stat_t
for _, d := range devs {
if err := unix.Stat(d.Path, &stat); err != nil {
return nil, errors.WithStack(&os.PathError{Op: "stat", Path: d.Path, Err: err})
}
d := specs.LinuxThrottleDevice{Rate: d.Rate}
// the type is 32bit on mips
d.Major = int64(unix.Major(uint64(stat.Rdev))) // nolint: unconvert
d.Minor = int64(unix.Minor(uint64(stat.Rdev))) // nolint: unconvert
throttleDevices = append(throttleDevices, d)
}
return throttleDevices, nil
}
// adjustParallelLimit takes a number of objects and a proposed limit and
// figures out if it's reasonable (and adjusts it accordingly). This is only
// used for daemon startup, which does a lot of parallel loading of containers
// (and if we exceed RLIMIT_NOFILE then we're in trouble).
func adjustParallelLimit(n int, limit int) int {
// Rule-of-thumb overhead factor (how many files will each goroutine open
// simultaneously). Yes, this is ugly but to be frank this whole thing is
// ugly.
const overhead = 2
// On Linux, we need to ensure that parallelStartupJobs doesn't cause us to
// exceed RLIMIT_NOFILE. If parallelStartupJobs is too large, we reduce it
// and give a warning (since in theory the user should increase their
// ulimits to the largest possible value for dockerd).
var rlim unix.Rlimit
if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil {
logrus.Warnf("Couldn't find dockerd's RLIMIT_NOFILE to double-check startup parallelism factor: %v", err)
return limit
}
softRlimit := int(rlim.Cur)
// Much fewer containers than RLIMIT_NOFILE. No need to adjust anything.
if softRlimit > overhead*n {
return limit
}
// RLIMIT_NOFILE big enough, no need to adjust anything.
if softRlimit > overhead*limit {
return limit
}
logrus.Warnf("Found dockerd's open file ulimit (%v) is far too small -- consider increasing it significantly (at least %v)", softRlimit, overhead*limit)
return softRlimit / overhead
}
func checkKernel() error {
// Check for unsupported kernel versions
// FIXME: it would be cleaner to not test for specific versions, but rather
// test for specific functionalities.
// Unfortunately we can't test for the feature "does not cause a kernel panic"
// without actually causing a kernel panic, so we need this workaround until
// the circumstances of pre-3.10 crashes are clearer.
// For details see https://github.com/docker/docker/issues/407
// Docker 1.11 and above doesn't actually run on kernels older than 3.4,
// due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4).
if !kernel.CheckKernelVersion(3, 10, 0) {
v, _ := kernel.GetKernelVersion()
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String())
}
}
return nil
}
// adaptContainerSettings is called during container creation to modify any
// settings necessary in the HostConfig structure.
func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
if adjustCPUShares && hostConfig.CPUShares > 0 {
// Handle unsupported CPUShares
if hostConfig.CPUShares < linuxMinCPUShares {
logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares)
hostConfig.CPUShares = linuxMinCPUShares
} else if hostConfig.CPUShares > linuxMaxCPUShares {
logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares)
hostConfig.CPUShares = linuxMaxCPUShares
}
}
if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 {
// By default, MemorySwap is set to twice the size of Memory.
hostConfig.MemorySwap = hostConfig.Memory * 2
}
if hostConfig.ShmSize == 0 {
hostConfig.ShmSize = config.DefaultShmSize
if daemon.configStore != nil {
hostConfig.ShmSize = int64(daemon.configStore.ShmSize)
}
}
// Set default IPC mode, if unset for container
if hostConfig.IpcMode.IsEmpty() {
m := config.DefaultIpcMode
if daemon.configStore != nil {
m = daemon.configStore.IpcMode
}
hostConfig.IpcMode = containertypes.IpcMode(m)
}
// Set default cgroup namespace mode, if unset for container
if hostConfig.CgroupnsMode.IsEmpty() {
// for cgroup v2: unshare cgroupns even for privileged containers
// https://github.com/containers/libpod/pull/4374#issuecomment-549776387
if hostConfig.Privileged && cgroups.Mode() != cgroups.Unified {
hostConfig.CgroupnsMode = containertypes.CgroupnsMode("host")
} else {
m := "host"
if cgroups.Mode() == cgroups.Unified {
m = "private"
}
if daemon.configStore != nil {
m = daemon.configStore.CgroupNamespaceMode
}
hostConfig.CgroupnsMode = containertypes.CgroupnsMode(m)
}
}
adaptSharedNamespaceContainer(daemon, hostConfig)
var err error
secOpts, err := daemon.generateSecurityOpt(hostConfig)
if err != nil {
return err
}
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, secOpts...)
if hostConfig.OomKillDisable == nil {
defaultOomKillDisable := false
hostConfig.OomKillDisable = &defaultOomKillDisable
}
return nil
}
// adaptSharedNamespaceContainer replaces container name with its ID in hostConfig.
// To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode
// and NetworkMode.
//
// When a container shares its namespace with another container, use ID can keep the namespace
// sharing connection between the two containers even the another container is renamed.
func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) {
containerPrefix := "container:"
if hostConfig.PidMode.IsContainer() {
pidContainer := hostConfig.PidMode.Container()
// if there is any error returned here, we just ignore it and leave it to be
// handled in the following logic
if c, err := daemon.GetContainer(pidContainer); err == nil {
hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID)
}
}
if hostConfig.IpcMode.IsContainer() {
ipcContainer := hostConfig.IpcMode.Container()
if c, err := daemon.GetContainer(ipcContainer); err == nil {
hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID)
}
}
if hostConfig.NetworkMode.IsContainer() {
netContainer := hostConfig.NetworkMode.ConnectedContainer()
if c, err := daemon.GetContainer(netContainer); err == nil {
hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID)
}
}
}
// verifyPlatformContainerResources performs platform-specific validation of the container's resource-configuration
func verifyPlatformContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) (warnings []string, err error) {
fixMemorySwappiness(resources)
// memory subsystem checks and adjustments
if resources.Memory != 0 && resources.Memory < linuxMinMemory {
return warnings, fmt.Errorf("Minimum memory limit allowed is 6MB")
}
if resources.Memory > 0 && !sysInfo.MemoryLimit {
warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.Memory = 0
resources.MemorySwap = -1
}
if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit {
warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.")
resources.MemorySwap = -1
}
if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory {
return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage")
}
if resources.Memory == 0 && resources.MemorySwap > 0 && !update {
return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage")
}
if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness {
warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.")
resources.MemorySwappiness = nil
}
if resources.MemorySwappiness != nil {
swappiness := *resources.MemorySwappiness
if swappiness < 0 || swappiness > 100 {
return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness)
}
}
if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation {
warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.MemoryReservation = 0
}
if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory {
return warnings, fmt.Errorf("Minimum memory reservation allowed is 6MB")
}
if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation {
return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage")
}
if resources.KernelMemory > 0 {
// Kernel memory limit is not supported on cgroup v2.
// Even on cgroup v1, kernel memory limit (`kmem.limit_in_bytes`) has been deprecated since kernel 5.4.
// https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0b5adf44cae99b3ebcc7
warnings = append(warnings, "Specifying a kernel memory limit is deprecated and will be removed in a future release.")
}
if resources.KernelMemory > 0 && !sysInfo.KernelMemory {
warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.KernelMemory = 0
}
if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory {
return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB")
}
if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) {
warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
}
if resources.OomKillDisable != nil && !sysInfo.OomKillDisable {
// only produce warnings if the setting wasn't to *disable* the OOM Kill; no point
// warning the caller if they already wanted the feature to be off
if *resources.OomKillDisable {
warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.")
}
resources.OomKillDisable = nil
}
if resources.OomKillDisable != nil && *resources.OomKillDisable && resources.Memory == 0 {
warnings = append(warnings, "OOM killer is disabled for the container, but no memory limit is set, this can result in the system running out of resources.")
}
if resources.PidsLimit != nil && !sysInfo.PidsLimit {
if *resources.PidsLimit > 0 {
warnings = append(warnings, "Your kernel does not support PIDs limit capabilities or the cgroup is not mounted. PIDs limit discarded.")
}
resources.PidsLimit = nil
}
// cpu subsystem checks and adjustments
if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 {
return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set")
}
if resources.NanoCPUs > 0 && resources.CPUQuota > 0 {
return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set")
}
if resources.NanoCPUs > 0 && !sysInfo.CPUCfs {
return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU CFS scheduler or the cgroup is not mounted")
}
// The highest precision we could get on Linux is 0.001, by setting
// cpu.cfs_period_us=1000ms
// cpu.cfs_quota=1ms
// See the following link for details:
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
// Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error.
// The error message is 0.01 so that this is consistent with Windows
if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 {
return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU())
}
if resources.CPUShares > 0 && !sysInfo.CPUShares {
warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.")
resources.CPUShares = 0
}
if (resources.CPUPeriod != 0 || resources.CPUQuota != 0) && !sysInfo.CPUCfs {
warnings = append(warnings, "Your kernel does not support CPU CFS scheduler. CPU period/quota discarded.")
resources.CPUPeriod = 0
resources.CPUQuota = 0
}
if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) {
return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)")
}
if resources.CPUQuota > 0 && resources.CPUQuota < 1000 {
return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)")
}
if resources.CPUPercent > 0 {
warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS))
resources.CPUPercent = 0
}
// cpuset subsystem checks and adjustments
if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset {
warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.")
resources.CpusetCpus = ""
resources.CpusetMems = ""
}
cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus)
if err != nil {
return warnings, errors.Wrapf(err, "Invalid value %s for cpuset cpus", resources.CpusetCpus)
}
if !cpusAvailable {
return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus)
}
memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems)
if err != nil {
return warnings, errors.Wrapf(err, "Invalid value %s for cpuset mems", resources.CpusetMems)
}
if !memsAvailable {
return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems)
}
// blkio subsystem checks and adjustments
if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight {
warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.")
resources.BlkioWeight = 0
}
if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) {
return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000")
}
if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 {
return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS)
}
if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice {
warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.")
resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{}
}
if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice {
warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.")
resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice {
warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.")
resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice {
warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.")
resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice {
warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.")
resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{}
}
return warnings, nil
}
func (daemon *Daemon) getCgroupDriver() string {
if UsingSystemd(daemon.configStore) {
return cgroupSystemdDriver
}
if daemon.Rootless() {
return cgroupNoneDriver
}
return cgroupFsDriver
}
// getCD gets the raw value of the native.cgroupdriver option, if set.
func getCD(config *config.Config) string {
for _, option := range config.ExecOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
continue
}
return val
}
return ""
}
// verifyCgroupDriver validates native.cgroupdriver
func verifyCgroupDriver(config *config.Config) error {
cd := getCD(config)
if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver {
return nil
}
if cd == cgroupNoneDriver {
return fmt.Errorf("native.cgroupdriver option %s is internally used and cannot be specified manually", cd)
}
return fmt.Errorf("native.cgroupdriver option %s not supported", cd)
}
// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd
func UsingSystemd(config *config.Config) bool {
if getCD(config) == cgroupSystemdDriver {
return true
}
// On cgroup v2 hosts, default to systemd driver
if getCD(config) == "" && cgroups.Mode() == cgroups.Unified && isRunningSystemd() {
return true
}
return false
}
var (
runningSystemd bool
detectSystemd sync.Once
)
// isRunningSystemd checks whether the host was booted with systemd as its init
// system. This functions similarly to systemd's `sd_booted(3)`: internally, it
// checks whether /run/systemd/system/ exists and is a directory.
// http://www.freedesktop.org/software/systemd/man/sd_booted.html
//
// NOTE: This function comes from package github.com/coreos/go-systemd/util
// It was borrowed here to avoid a dependency on cgo.
func isRunningSystemd() bool {
detectSystemd.Do(func() {
fi, err := os.Lstat("/run/systemd/system")
if err != nil {
return
}
runningSystemd = fi.IsDir()
})
return runningSystemd
}
// verifyPlatformContainerSettings performs platform-specific validation of the
// hostconfig and config structures.
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, update bool) (warnings []string, err error) {
if hostConfig == nil {
return nil, nil
}
sysInfo := daemon.RawSysInfo(true)
w, err := verifyPlatformContainerResources(&hostConfig.Resources, sysInfo, update)
// no matter err is nil or not, w could have data in itself.
warnings = append(warnings, w...)
if err != nil {
return warnings, err
}
if hostConfig.ShmSize < 0 {
return warnings, fmt.Errorf("SHM size can not be less than 0")
}
if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 {
return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj)
}
// ip-forwarding does not affect container with '--net=host' (or '--net=none')
if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) {
warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.")
}
if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 {
warnings = append(warnings, "Published ports are discarded when using host network mode")
}
// check for various conflicting options with user namespaces
if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() {
if hostConfig.Privileged {
return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode")
}
if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() {
return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled")
}
if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() {
return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled")
}
}
if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) {
// CgroupParent for systemd cgroup should be named as "xxx.slice"
if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") {
return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
}
}
if hostConfig.Runtime == "" {
hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
}
if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil {
return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime)
}
parser := volumemounts.NewParser(runtime.GOOS)
for dest := range hostConfig.Tmpfs {
if err := parser.ValidateTmpfsMountDestination(dest); err != nil {
return warnings, err
}
}
if !hostConfig.CgroupnsMode.Valid() {
return warnings, fmt.Errorf("invalid cgroup namespace mode: %v", hostConfig.CgroupnsMode)
}
if hostConfig.CgroupnsMode.IsPrivate() {
if !sysInfo.CgroupNamespaces {
warnings = append(warnings, "Your kernel does not support cgroup namespaces. Cgroup namespace setting discarded.")
}
}
if hostConfig.Runtime == config.LinuxV1RuntimeName || (hostConfig.Runtime == "" && daemon.configStore.DefaultRuntime == config.LinuxV1RuntimeName) {
warnings = append(warnings, fmt.Sprintf("Configured runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName))
}
return warnings, nil
}
// verifyDaemonSettings performs validation of daemon config struct
func verifyDaemonSettings(conf *config.Config) error {
if conf.ContainerdNamespace == conf.ContainerdPluginNamespace {
return errors.New("containers namespace and plugins namespace cannot be the same")
}
// Check for mutually incompatible config options
if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" {
return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one")
}
if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication {
return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true")
}
if conf.BridgeConfig.EnableIP6Tables && !conf.Experimental {
return fmt.Errorf("ip6tables rules are only available if experimental features are enabled")
}
if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq {
conf.BridgeConfig.EnableIPMasq = false
}
if err := verifyCgroupDriver(conf); err != nil {
return err
}
if conf.CgroupParent != "" && UsingSystemd(conf) {
if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") {
return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
}
}
if conf.Rootless && UsingSystemd(conf) && cgroups.Mode() != cgroups.Unified {
return fmt.Errorf("exec-opt native.cgroupdriver=systemd requires cgroup v2 for rootless mode")
}
configureRuntimes(conf)
if rtName := conf.GetDefaultRuntimeName(); rtName != "" {
if conf.GetRuntime(rtName) == nil {
return fmt.Errorf("specified default runtime '%s' does not exist", rtName)
}
if rtName == config.LinuxV1RuntimeName {
logrus.Warnf("Configured default runtime %q is deprecated and will be removed in the next release.", config.LinuxV1RuntimeName)
}
}
return nil
}
// checkSystem validates platform-specific requirements
func checkSystem() error {
return checkKernel()
}
// configureMaxThreads sets the Go runtime max threads threshold
// which is 90% of the kernel setting from /proc/sys/kernel/threads-max
func configureMaxThreads(config *config.Config) error {
mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max")
if err != nil {
return err
}
mtint, err := strconv.Atoi(strings.TrimSpace(string(mt)))
if err != nil {
return err
}
maxThreads := (mtint / 100) * 90
debug.SetMaxThreads(maxThreads)
logrus.Debugf("Golang's threads limit set to %d", maxThreads)
return nil
}
func overlaySupportsSelinux() (bool, error) {
f, err := os.Open("/proc/kallsyms")
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if strings.HasSuffix(s.Text(), " security_inode_copy_up") {
return true, nil
}
}
return false, s.Err()
}
// configureKernelSecuritySupport configures and validates security support for the kernel
func configureKernelSecuritySupport(config *config.Config, driverName string) error {
if config.EnableSelinuxSupport {
if !selinux.GetEnabled() {
logrus.Warn("Docker could not enable SELinux on the host system")
return nil
}
if driverName == "overlay" || driverName == "overlay2" {
// If driver is overlay or overlay2, make sure kernel
// supports selinux with overlay.
supported, err := overlaySupportsSelinux()
if err != nil {
return err
}
if !supported {
logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName)
}
}
} else {
selinux.SetDisabled()
}
return nil
}
func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) {
netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes)
if err != nil {
return nil, err
}
controller, err := libnetwork.New(netOptions...)
if err != nil {
return nil, fmt.Errorf("error obtaining controller instance: %v", err)
}
if len(activeSandboxes) > 0 {
logrus.Info("There are old running containers, the network config will not take affect")
return controller, nil
}
// Initialize default network on "null"
if n, _ := controller.NetworkByName("none"); n == nil {
if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil {
return nil, fmt.Errorf("Error creating default \"null\" network: %v", err)
}
}
// Initialize default network on "host"
if n, _ := controller.NetworkByName("host"); n == nil {
if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil {
return nil, fmt.Errorf("Error creating default \"host\" network: %v", err)
}
}
// Clear stale bridge network
if n, err := controller.NetworkByName("bridge"); err == nil {
if err = n.Delete(); err != nil {
return nil, fmt.Errorf("could not delete the default bridge network: %v", err)
}
if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled {
removeDefaultBridgeInterface()
}
}
if !config.DisableBridge {
// Initialize default driver "bridge"
if err := initBridgeDriver(controller, config); err != nil {
return nil, err
}
} else {
removeDefaultBridgeInterface()
}
// Set HostGatewayIP to the default bridge's IP if it is empty
if daemon.configStore.HostGatewayIP == nil && controller != nil {
if n, err := controller.NetworkByName("bridge"); err == nil {
v4Info, v6Info := n.Info().IpamInfo()
var gateway net.IP
if len(v4Info) > 0 {
gateway = v4Info[0].Gateway.IP
} else if len(v6Info) > 0 {
gateway = v6Info[0].Gateway.IP
}
daemon.configStore.HostGatewayIP = gateway
}
}
return controller, nil
}
func driverOptions(config *config.Config) []nwconfig.Option {
bridgeConfig := options.Generic{
"EnableIPForwarding": config.BridgeConfig.EnableIPForward,
"EnableIPTables": config.BridgeConfig.EnableIPTables,
"EnableIP6Tables": config.BridgeConfig.EnableIP6Tables,
"EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy,
"UserlandProxyPath": config.BridgeConfig.UserlandProxyPath}
bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig}
dOptions := []nwconfig.Option{}
dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption))
return dOptions
}
func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error {
bridgeName := bridge.DefaultBridgeName
if config.BridgeConfig.Iface != "" {
bridgeName = config.BridgeConfig.Iface
}
netOption := map[string]string{
bridge.BridgeName: bridgeName,
bridge.DefaultBridge: strconv.FormatBool(true),
netlabel.DriverMTU: strconv.Itoa(config.Mtu),
bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq),
bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication),
}
// --ip processing
if config.BridgeConfig.DefaultIP != nil {
netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String()
}
ipamV4Conf := &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName)
if err != nil {
return errors.Wrap(err, "list bridge addresses failed")
}
nw := nwList[0]
if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
if err != nil {
return errors.Wrap(err, "parse CIDR failed")
}
// Iterate through in case there are multiple addresses for the bridge
for _, entry := range nwList {
if fCIDR.Contains(entry.IP) {
nw = entry
break
}
}
}
ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String()
hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask)
if hip.IsGlobalUnicast() {
ipamV4Conf.Gateway = nw.IP.String()
}
if config.BridgeConfig.IP != "" {
ip, ipNet, err := net.ParseCIDR(config.BridgeConfig.IP)
if err != nil {
return err
}
ipamV4Conf.PreferredPool = ipNet.String()
ipamV4Conf.Gateway = ip.String()
} else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" {
logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool)
}
if config.BridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
if err != nil {
return err
}
ipamV4Conf.SubPool = fCIDR.String()
}
if config.BridgeConfig.DefaultGatewayIPv4 != nil {
ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String()
}
var (
deferIPv6Alloc bool
ipamV6Conf *libnetwork.IpamConf
)
if config.BridgeConfig.EnableIPv6 && config.BridgeConfig.FixedCIDRv6 == "" {
return errdefs.InvalidParameter(errors.New("IPv6 is enabled for the default bridge, but no subnet is configured. Specify an IPv6 subnet using --fixed-cidr-v6"))
} else if config.BridgeConfig.FixedCIDRv6 != "" {
_, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6)
if err != nil {
return err
}
// In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has
// at least 48 host bits, we need to guarantee the current behavior where the containers'
// IPv6 addresses will be constructed based on the containers' interface MAC address.
// We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints
// on this network until after the driver has created the endpoint and returned the
// constructed address. Libnetwork will then reserve this address with the ipam driver.
ones, _ := fCIDRv6.Mask.Size()
deferIPv6Alloc = ones <= 80
ipamV6Conf = &libnetwork.IpamConf{
AuxAddresses: make(map[string]string),
PreferredPool: fCIDRv6.String(),
}
// In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6
// address belongs to the same network, we need to inform libnetwork about it, so
// that it can be reserved with IPAM and it will not be given away to somebody else
for _, nw6 := range nw6List {
if fCIDRv6.Contains(nw6.IP) {
ipamV6Conf.Gateway = nw6.IP.String()
break
}
}
}
if config.BridgeConfig.DefaultGatewayIPv6 != nil {
if ipamV6Conf == nil {
ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
}
ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String()
}
v4Conf := []*libnetwork.IpamConf{ipamV4Conf}
v6Conf := []*libnetwork.IpamConf{}
if ipamV6Conf != nil {
v6Conf = append(v6Conf, ipamV6Conf)
}
// Initialize default network on "bridge" with the same name
_, err = controller.NewNetwork("bridge", "bridge", "",
libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6),
libnetwork.NetworkOptionDriverOpts(netOption),
libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil),
libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc))
if err != nil {
return fmt.Errorf("Error creating default \"bridge\" network: %v", err)
}
return nil
}
// Remove default bridge interface if present (--bridge=none use case)
func removeDefaultBridgeInterface() {
if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil {
if err := netlink.LinkDel(lnk); err != nil {
logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err)
}
}
}
func setupInitLayer(idMapping *idtools.IdentityMapping) func(containerfs.ContainerFS) error {
return func(initPath containerfs.ContainerFS) error {
return initlayer.Setup(initPath, idMapping.RootPair())
}
}
// Parse the remapped root (user namespace) option, which can be one of:
// username - valid username from /etc/passwd
// username:groupname - valid username; valid groupname from /etc/group
// uid - 32-bit unsigned int valid Linux UID value
// uid:gid - uid value; 32-bit unsigned int Linux GID value
//
// If no groupname is specified, and a username is specified, an attempt
// will be made to lookup a gid for that username as a groupname
//
// If names are used, they are verified to exist in passwd/group
func parseRemappedRoot(usergrp string) (string, string, error) {
var (
userID, groupID int
username, groupname string
)
idparts := strings.Split(usergrp, ":")
if len(idparts) > 2 {
return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp)
}
if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil {
// must be a uid; take it as valid
userID = int(uid)
luser, err := idtools.LookupUID(userID)
if err != nil {
return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err)
}
username = luser.Name
if len(idparts) == 1 {
// if the uid was numeric and no gid was specified, take the uid as the gid
groupID = userID
lgrp, err := idtools.LookupGID(groupID)
if err != nil {
return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err)
}
groupname = lgrp.Name
}
} else {
lookupName := idparts[0]
// special case: if the user specified "default", they want Docker to create or
// use (after creation) the "dockremap" user/group for root remapping
if lookupName == defaultIDSpecifier {
lookupName = defaultRemappedID
}
luser, err := idtools.LookupUser(lookupName)
if err != nil && idparts[0] != defaultIDSpecifier {
// error if the name requested isn't the special "dockremap" ID
return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err)
} else if err != nil {
// special case-- if the username == "default", then we have been asked
// to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid}
// ranges will be used for the user and group mappings in user namespaced containers
_, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID)
if err == nil {
return defaultRemappedID, defaultRemappedID, nil
}
return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err)
}
username = luser.Name
if len(idparts) == 1 {
// we only have a string username, and no group specified; look up gid from username as group
group, err := idtools.LookupGroup(lookupName)
if err != nil {
return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err)
}
groupname = group.Name
}
}
if len(idparts) == 2 {
// groupname or gid is separately specified and must be resolved
// to an unsigned 32-bit gid
if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil {
// must be a gid, take it as valid
groupID = int(gid)
lgrp, err := idtools.LookupGID(groupID)
if err != nil {
return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err)
}
groupname = lgrp.Name
} else {
// not a number; attempt a lookup
if _, err := idtools.LookupGroup(idparts[1]); err != nil {
return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err)
}
groupname = idparts[1]
}
}
return username, groupname, nil
}
func setupRemappedRoot(config *config.Config) (*idtools.IdentityMapping, error) {
if runtime.GOOS != "linux" && config.RemappedRoot != "" {
return nil, fmt.Errorf("User namespaces are only supported on Linux")
}
// if the daemon was started with remapped root option, parse
// the config option to the int uid,gid values
if config.RemappedRoot != "" {
username, groupname, err := parseRemappedRoot(config.RemappedRoot)
if err != nil {
return nil, err
}
if username == "root" {
// Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op
// effectively
logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF")
return &idtools.IdentityMapping{}, nil
}
logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s", username)
// update remapped root setting now that we have resolved them to actual names
config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname)
mappings, err := idtools.NewIdentityMapping(username)
if err != nil {
return nil, errors.Wrap(err, "Can't create ID mappings")
}
return mappings, nil
}
return &idtools.IdentityMapping{}, nil
}
func setupDaemonRoot(config *config.Config, rootDir string, remappedRoot idtools.Identity) error {
config.Root = rootDir
// the docker root metadata directory needs to have execute permissions for all users (g+x,o+x)
// so that syscalls executing as non-root, operating on subdirectories of the graph root
// (e.g. mounted layers of a container) can traverse this path.
// The user namespace support will create subdirectories for the remapped root host uid:gid
// pair owned by that same uid:gid pair for proper write access to those needed metadata and
// layer content subtrees.
if _, err := os.Stat(rootDir); err == nil {
// root current exists; verify the access bits are correct by setting them
if err = os.Chmod(rootDir, 0711); err != nil {
return err
}
} else if os.IsNotExist(err) {
// no root exists yet, create it 0711 with root:root ownership
if err := os.MkdirAll(rootDir, 0711); err != nil {
return err
}
}
// if user namespaces are enabled we will create a subtree underneath the specified root
// with any/all specified remapped root uid/gid options on the daemon creating
// a new subdirectory with ownership set to the remapped uid/gid (so as to allow
// `chdir()` to work for containers namespaced to that uid/gid)
if config.RemappedRoot != "" {
id := idtools.CurrentIdentity()
// First make sure the current root dir has the correct perms.
if err := idtools.MkdirAllAndChown(config.Root, 0701, id); err != nil {
return errors.Wrapf(err, "could not create or set daemon root permissions: %s", config.Root)
}
config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", remappedRoot.UID, remappedRoot.GID))
logrus.Debugf("Creating user namespaced daemon root: %s", config.Root)
// Create the root directory if it doesn't exist
if err := idtools.MkdirAllAndChown(config.Root, 0701, id); err != nil {
return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err)
}
// we also need to verify that any pre-existing directories in the path to
// the graphroot won't block access to remapped root--if any pre-existing directory
// has strict permissions that don't allow "x", container start will fail, so
// better to warn and fail now
dirPath := config.Root
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if !idtools.CanAccess(dirPath, remappedRoot) {
return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root)
}
}
}
if err := setupDaemonRootPropagation(config); err != nil {
logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior")
}
return nil
}
func setupDaemonRootPropagation(cfg *config.Config) error {
rootParentMount, mountOptions, err := getSourceMount(cfg.Root)
if err != nil {
return errors.Wrap(err, "error getting daemon root's parent mount")
}
var cleanupOldFile bool
cleanupFile := getUnmountOnShutdownPath(cfg)
defer func() {
if !cleanupOldFile {
return
}
if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) {
logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file")
}
}()
if hasMountInfoOption(mountOptions, sharedPropagationOption, slavePropagationOption) {
cleanupOldFile = true
return nil
}
if err := mount.MakeShared(cfg.Root); err != nil {
return errors.Wrap(err, "could not setup daemon root propagation to shared")
}
// check the case where this may have already been a mount to itself.
// If so then the daemon only performed a remount and should not try to unmount this later.
if rootParentMount == cfg.Root {
cleanupOldFile = true
return nil
}
if err := os.MkdirAll(filepath.Dir(cleanupFile), 0700); err != nil {
return errors.Wrap(err, "error creating dir to store mount cleanup file")
}
if err := ioutil.WriteFile(cleanupFile, nil, 0600); err != nil {
return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown")
}
return nil
}
// getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown
// the daemon root should be unmounted.
func getUnmountOnShutdownPath(config *config.Config) string {
return filepath.Join(config.ExecRoot, "unmount-on-shutdown")
}
// registerLinks writes the links to a file.
func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error {
if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() {
return nil
}
for _, l := range hostConfig.Links {
name, alias, err := opts.ParseLink(l)
if err != nil {
return err
}
child, err := daemon.GetContainer(name)
if err != nil {
if errdefs.IsNotFound(err) {
// Trying to link to a non-existing container is not valid, and
// should return an "invalid parameter" error. Returning a "not
// found" error here would make the client report the container's
// image could not be found (see moby/moby#39823)
err = errdefs.InvalidParameter(err)
}
return errors.Wrapf(err, "could not get container for %s", name)
}
for child.HostConfig.NetworkMode.IsContainer() {
parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2)
child, err = daemon.GetContainer(parts[1])
if err != nil {
if errdefs.IsNotFound(err) {
// Trying to link to a non-existing container is not valid, and
// should return an "invalid parameter" error. Returning a "not
// found" error here would make the client report the container's
// image could not be found (see moby/moby#39823)
err = errdefs.InvalidParameter(err)
}
return errors.Wrapf(err, "Could not get container for %s", parts[1])
}
}
if child.HostConfig.NetworkMode.IsHost() {
return runconfig.ErrConflictHostNetworkAndLinks
}
if err := daemon.registerLink(container, child, alias); err != nil {
return err
}
}
// After we load all the links into the daemon
// set them to nil on the hostconfig
_, err := container.WriteHostConfig()
return err
}
// conditionalMountOnStart is a platform specific helper function during the
// container start to call mount.
func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
return daemon.Mount(container)
}
// conditionalUnmountOnCleanup is a platform specific helper function called
// during the cleanup of a container to unmount.
func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error {
return daemon.Unmount(container)
}
func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry {
out := make([]types.BlkioStatEntry, len(entries))
for i, re := range entries {
out[i] = types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: re.Op,
Value: re.Value,
}
}
return out
}
func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
if !c.IsRunning() {
return nil, errNotRunning(c.ID)
}
cs, err := daemon.containerd.Stats(context.Background(), c.ID)
if err != nil {
if strings.Contains(err.Error(), "container not found") {
return nil, containerNotFound(c.ID)
}
return nil, err
}
s := &types.StatsJSON{}
s.Read = cs.Read
stats := cs.Metrics
switch t := stats.(type) {
case *statsV1.Metrics:
return daemon.statsV1(s, t)
case *statsV2.Metrics:
return daemon.statsV2(s, t)
default:
return nil, errors.Errorf("unexpected type of metrics %+v", t)
}
}
func (daemon *Daemon) statsV1(s *types.StatsJSON, stats *statsV1.Metrics) (*types.StatsJSON, error) {
if stats.Blkio != nil {
s.BlkioStats = types.BlkioStats{
IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive),
IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive),
IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive),
IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive),
IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive),
IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive),
IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive),
SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive),
}
}
if stats.CPU != nil {
s.CPUStats = types.CPUStats{
CPUUsage: types.CPUUsage{
TotalUsage: stats.CPU.Usage.Total,
PercpuUsage: stats.CPU.Usage.PerCPU,
UsageInKernelmode: stats.CPU.Usage.Kernel,
UsageInUsermode: stats.CPU.Usage.User,
},
ThrottlingData: types.ThrottlingData{
Periods: stats.CPU.Throttling.Periods,
ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods,
ThrottledTime: stats.CPU.Throttling.ThrottledTime,
},
}
}
if stats.Memory != nil {
raw := make(map[string]uint64)
raw["cache"] = stats.Memory.Cache
raw["rss"] = stats.Memory.RSS
raw["rss_huge"] = stats.Memory.RSSHuge
raw["mapped_file"] = stats.Memory.MappedFile
raw["dirty"] = stats.Memory.Dirty
raw["writeback"] = stats.Memory.Writeback
raw["pgpgin"] = stats.Memory.PgPgIn
raw["pgpgout"] = stats.Memory.PgPgOut
raw["pgfault"] = stats.Memory.PgFault
raw["pgmajfault"] = stats.Memory.PgMajFault
raw["inactive_anon"] = stats.Memory.InactiveAnon
raw["active_anon"] = stats.Memory.ActiveAnon
raw["inactive_file"] = stats.Memory.InactiveFile
raw["active_file"] = stats.Memory.ActiveFile
raw["unevictable"] = stats.Memory.Unevictable
raw["hierarchical_memory_limit"] = stats.Memory.HierarchicalMemoryLimit
raw["hierarchical_memsw_limit"] = stats.Memory.HierarchicalSwapLimit
raw["total_cache"] = stats.Memory.TotalCache
raw["total_rss"] = stats.Memory.TotalRSS
raw["total_rss_huge"] = stats.Memory.TotalRSSHuge
raw["total_mapped_file"] = stats.Memory.TotalMappedFile
raw["total_dirty"] = stats.Memory.TotalDirty
raw["total_writeback"] = stats.Memory.TotalWriteback
raw["total_pgpgin"] = stats.Memory.TotalPgPgIn
raw["total_pgpgout"] = stats.Memory.TotalPgPgOut
raw["total_pgfault"] = stats.Memory.TotalPgFault
raw["total_pgmajfault"] = stats.Memory.TotalPgMajFault
raw["total_inactive_anon"] = stats.Memory.TotalInactiveAnon
raw["total_active_anon"] = stats.Memory.TotalActiveAnon
raw["total_inactive_file"] = stats.Memory.TotalInactiveFile
raw["total_active_file"] = stats.Memory.TotalActiveFile
raw["total_unevictable"] = stats.Memory.TotalUnevictable
if stats.Memory.Usage != nil {
s.MemoryStats = types.MemoryStats{
Stats: raw,
Usage: stats.Memory.Usage.Usage,
MaxUsage: stats.Memory.Usage.Max,
Limit: stats.Memory.Usage.Limit,
Failcnt: stats.Memory.Usage.Failcnt,
}
} else {
s.MemoryStats = types.MemoryStats{
Stats: raw,
}
}
// if the container does not set memory limit, use the machineMemory
if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 {
s.MemoryStats.Limit = daemon.machineMemory
}
}
if stats.Pids != nil {
s.PidsStats = types.PidsStats{
Current: stats.Pids.Current,
Limit: stats.Pids.Limit,
}
}
return s, nil
}
func (daemon *Daemon) statsV2(s *types.StatsJSON, stats *statsV2.Metrics) (*types.StatsJSON, error) {
if stats.Io != nil {
var isbr []types.BlkioStatEntry
for _, re := range stats.Io.Usage {
isbr = append(isbr,
types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: "read",
Value: re.Rbytes,
},
types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: "write",
Value: re.Wbytes,
},
)
}
s.BlkioStats = types.BlkioStats{
IoServiceBytesRecursive: isbr,
// Other fields are unsupported
}
}
if stats.CPU != nil {
s.CPUStats = types.CPUStats{
CPUUsage: types.CPUUsage{
TotalUsage: stats.CPU.UsageUsec * 1000,
// PercpuUsage is not supported
UsageInKernelmode: stats.CPU.SystemUsec * 1000,
UsageInUsermode: stats.CPU.UserUsec * 1000,
},
ThrottlingData: types.ThrottlingData{
Periods: stats.CPU.NrPeriods,
ThrottledPeriods: stats.CPU.NrThrottled,
ThrottledTime: stats.CPU.ThrottledUsec * 1000,
},
}
}
if stats.Memory != nil {
raw := make(map[string]uint64)
raw["anon"] = stats.Memory.Anon
raw["file"] = stats.Memory.File
raw["kernel_stack"] = stats.Memory.KernelStack
raw["slab"] = stats.Memory.Slab
raw["sock"] = stats.Memory.Sock
raw["shmem"] = stats.Memory.Shmem
raw["file_mapped"] = stats.Memory.FileMapped
raw["file_dirty"] = stats.Memory.FileDirty
raw["file_writeback"] = stats.Memory.FileWriteback
raw["anon_thp"] = stats.Memory.AnonThp
raw["inactive_anon"] = stats.Memory.InactiveAnon
raw["active_anon"] = stats.Memory.ActiveAnon
raw["inactive_file"] = stats.Memory.InactiveFile
raw["active_file"] = stats.Memory.ActiveFile
raw["unevictable"] = stats.Memory.Unevictable
raw["slab_reclaimable"] = stats.Memory.SlabReclaimable
raw["slab_unreclaimable"] = stats.Memory.SlabUnreclaimable
raw["pgfault"] = stats.Memory.Pgfault
raw["pgmajfault"] = stats.Memory.Pgmajfault
raw["workingset_refault"] = stats.Memory.WorkingsetRefault
raw["workingset_activate"] = stats.Memory.WorkingsetActivate
raw["workingset_nodereclaim"] = stats.Memory.WorkingsetNodereclaim
raw["pgrefill"] = stats.Memory.Pgrefill
raw["pgscan"] = stats.Memory.Pgscan
raw["pgsteal"] = stats.Memory.Pgsteal
raw["pgactivate"] = stats.Memory.Pgactivate
raw["pgdeactivate"] = stats.Memory.Pgdeactivate
raw["pglazyfree"] = stats.Memory.Pglazyfree
raw["pglazyfreed"] = stats.Memory.Pglazyfreed
raw["thp_fault_alloc"] = stats.Memory.ThpFaultAlloc
raw["thp_collapse_alloc"] = stats.Memory.ThpCollapseAlloc
s.MemoryStats = types.MemoryStats{
// Stats is not compatible with v1
Stats: raw,
Usage: stats.Memory.Usage,
// MaxUsage is not supported
Limit: stats.Memory.UsageLimit,
}
// if the container does not set memory limit, use the machineMemory
if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 {
s.MemoryStats.Limit = daemon.machineMemory
}
if stats.MemoryEvents != nil {
// Failcnt is set to the "oom" field of the "memory.events" file.
// See https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
s.MemoryStats.Failcnt = stats.MemoryEvents.Oom
}
}
if stats.Pids != nil {
s.PidsStats = types.PidsStats{
Current: stats.Pids.Current,
Limit: stats.Pids.Limit,
}
}
return s, nil
}
// setDefaultIsolation determines the default isolation mode for the
// daemon to run in. This is only applicable on Windows
func (daemon *Daemon) setDefaultIsolation() error {
return nil
}
// setupDaemonProcess sets various settings for the daemon's process
func setupDaemonProcess(config *config.Config) error {
// setup the daemons oom_score_adj
if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil {
return err
}
if err := setMayDetachMounts(); err != nil {
logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter")
}
return nil
}
// This is used to allow removal of mountpoints that may be mounted in other
// namespaces on RHEL based kernels starting from RHEL 7.4.
// Without this setting, removals on these RHEL based kernels may fail with
// "device or resource busy".
// This setting is not available in upstream kernels as it is not configurable,
// but has been in the upstream kernels since 3.15.
func setMayDetachMounts() error {
f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.Wrap(err, "error opening may_detach_mounts kernel config file")
}
defer f.Close()
_, err = f.WriteString("1")
if os.IsPermission(err) {
// Setting may_detach_mounts does not work in an
// unprivileged container. Ignore the error, but log
// it if we appear not to be in that situation.
if !sys.RunningInUserNS() {
logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1")
}
return nil
}
return err
}
func setupOOMScoreAdj(score int) error {
if score == 0 {
return nil
}
f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0)
if err != nil {
return err
}
defer f.Close()
stringScore := strconv.Itoa(score)
_, err = f.WriteString(stringScore)
if os.IsPermission(err) {
// Setting oom_score_adj does not work in an
// unprivileged container. Ignore the error, but log
// it if we appear not to be in that situation.
if !sys.RunningInUserNS() {
logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore)
}
return nil
}
return err
}
func (daemon *Daemon) initCPURtController(mnt, path string) error {
if path == "/" || path == "." {
return nil
}
// Recursively create cgroup to ensure that the system and all parent cgroups have values set
// for the period and runtime as this limits what the children can be set to.
if err := daemon.initCPURtController(mnt, filepath.Dir(path)); err != nil {
return err
}
path = filepath.Join(mnt, path)
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
if err := maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
return err
}
return maybeCreateCPURealTimeFile(daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
}
func maybeCreateCPURealTimeFile(configValue int64, file string, path string) error {
if configValue == 0 {
return nil
}
return ioutil.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700)
}
func (daemon *Daemon) setupSeccompProfile() error {
if daemon.configStore.SeccompProfile != "" {
daemon.seccompProfilePath = daemon.configStore.SeccompProfile
b, err := ioutil.ReadFile(daemon.configStore.SeccompProfile)
if err != nil {
return fmt.Errorf("opening seccomp profile (%s) failed: %v", daemon.configStore.SeccompProfile, err)
}
daemon.seccompProfile = b
}
return nil
}
// RawSysInfo returns *sysinfo.SysInfo .
func (daemon *Daemon) RawSysInfo(quiet bool) *sysinfo.SysInfo {
var siOpts []sysinfo.Opt
if daemon.getCgroupDriver() == cgroupSystemdDriver {
if euid := os.Getenv("ROOTLESSKIT_PARENT_EUID"); euid != "" {
siOpts = append(siOpts, sysinfo.WithCgroup2GroupPath("/user.slice/user-"+euid+".slice"))
}
}
return sysinfo.New(quiet, siOpts...)
}
func recursiveUnmount(target string) error {
return mount.RecursiveUnmount(target)
}
| thaJeztah | 1ba54a5fd0ba293db3bea46cd67604b593f2048b | a060328874e00d54e734cacd756d9634e73d899c | FWIW; I initially wanted to add validation here, didn't see a clean way to return a warning from this function. Perhaps we need to make these functions return a `[]warnings` or `[]errors`, but I decided not to spend too much time on that; the `oci_linux` would now provide an error, so things won't go unnoticed at least.
Might still be good to look at improving these functions (in general: not this one specifically) to more easily allow warnings to be communicated to the user. | thaJeztah | 4,732 |
moby/moby | 42,460 | Update AUTHORS and mailmap | Updating the list to account for the new commits from libnetwork.
| null | 2021-06-04 00:19:58+00:00 | 2021-06-04 06:10:52+00:00 | .mailmap | # Generate AUTHORS: hack/generate-authors.sh
# Tip for finding duplicates (besides scanning the output of AUTHORS for name
# duplicates that aren't also email duplicates): scan the output of:
# git log --format='%aE - %aN' | sort -uf
#
# For explanation on this file format: man git-shortlog
<[email protected]> <[email protected]>
<[email protected]> <[email protected]>
Aaron L. Xu <[email protected]>
Abhinandan Prativadi <[email protected]>
Adam Dobrawy <[email protected]>
Adam Dobrawy <[email protected]> <[email protected]>
Adrien Gallouët <[email protected]> <[email protected]>
Ahmed Kamal <[email protected]>
Ahmet Alp Balkan <[email protected]> <[email protected]>
AJ Bowen <[email protected]>
AJ Bowen <[email protected]> <[email protected]>
AJ Bowen <[email protected]> <[email protected]>
Akihiro Matsushima <[email protected]> <[email protected]>
Akihiro Suda <[email protected]>
Akihiro Suda <[email protected]> <[email protected]>
Akihiro Suda <[email protected]> <[email protected]>
Aleksa Sarai <[email protected]>
Aleksa Sarai <[email protected]> <[email protected]>
Aleksa Sarai <[email protected]> <[email protected]>
Aleksandrs Fadins <[email protected]>
Alessandro Boch <[email protected]> <[email protected]>
Alex Chen <[email protected]> <[email protected]>
Alex Ellis <[email protected]>
Alex Goodman <[email protected]> <[email protected]>
Alexander Larsson <[email protected]> <[email protected]>
Alexander Morozov <[email protected]>
Alexander Morozov <[email protected]> <[email protected]>
Alexandre Beslic <[email protected]> <[email protected]>
Alicia Lauerman <[email protected]> <[email protected]>
Allen Sun <[email protected]> <[email protected]>
Allen Sun <[email protected]> <[email protected]>
Andrea Denisse Gómez <[email protected]>
Andrew Weiss <[email protected]> <[email protected]>
Andrew Weiss <[email protected]> <[email protected]>
Andrey Kolomentsev <[email protected]>
Andrey Kolomentsev <[email protected]> <[email protected]>
André Martins <[email protected]> <[email protected]>
Andy Rothfusz <[email protected]> <[email protected]>
Andy Smith <[email protected]>
Ankush Agarwal <[email protected]> <[email protected]>
Antonio Murdaca <[email protected]> <[email protected]>
Antonio Murdaca <[email protected]> <[email protected]>
Antonio Murdaca <[email protected]> <[email protected]>
Antonio Murdaca <[email protected]> <[email protected]>
Antonio Murdaca <[email protected]> <[email protected]>
Anuj Bahuguna <[email protected]>
Anuj Bahuguna <[email protected]> <[email protected]>
Anusha Ragunathan <[email protected]> <[email protected]>
Arko Dasgupta <[email protected]>
Arko Dasgupta <[email protected]> <[email protected]>
Arnaud Porterie <[email protected]>
Arnaud Porterie <[email protected]> <[email protected]>
Arthur Gautier <[email protected]> <[email protected]>
Avi Miller <[email protected]> <[email protected]>
Ben Bonnefoy <[email protected]>
Ben Golub <[email protected]>
Ben Toews <[email protected]> <[email protected]>
Benny Ng <[email protected]>
Benoit Chesneau <[email protected]>
Bevisy Zhang <[email protected]>
Bhiraj Butala <[email protected]>
Bhumika Bayani <[email protected]>
Bilal Amarni <[email protected]> <[email protected]>
Bill Wang <[email protected]> <[email protected]>
Bily Zhang <[email protected]>
Bin Liu <[email protected]>
Bin Liu <[email protected]> <[email protected]>
Bingshen Wang <[email protected]>
Boaz Shuster <[email protected]>
Boqin Qin <[email protected]>
Brandon Philips <[email protected]> <[email protected]>
Brandon Philips <[email protected]> <[email protected]>
Brent Salisbury <[email protected]> <[email protected]>
Brian Goff <[email protected]>
Brian Goff <[email protected]> <[email protected]>
Brian Goff <[email protected]> <[email protected]>
Carlos de Paula <[email protected]>
Chander Govindarajan <[email protected]>
Chao Wang <[email protected]> <[email protected]>
Charles Hooper <[email protected]> <[email protected]>
Chen Chao <[email protected]>
Chen Chuanliang <[email protected]>
Chen Mingjie <[email protected]>
Chen Qiu <[email protected]>
Chen Qiu <[email protected]> <[email protected]>
Chengfei Shang <[email protected]>
Chris Dias <[email protected]>
Chris McKinnel <[email protected]>
Chris Price <[email protected]>
Chris Price <[email protected]> <[email protected]>
Christopher Biscardi <[email protected]>
Christopher Latham <[email protected]>
Christy Norman <[email protected]>
Chun Chen <[email protected]> <[email protected]>
Corbin Coleman <[email protected]>
Cristian Ariza <[email protected]>
Cristian Staretu <[email protected]>
Cristian Staretu <[email protected]> <[email protected]>
Cristian Staretu <[email protected]> <[email protected]>
CUI Wei <[email protected]> cuiwei13 <[email protected]>
Daehyeok Mun <[email protected]>
Daehyeok Mun <[email protected]> <[email protected]>
Daehyeok Mun <[email protected]> <[email protected]>
Dan Feldman <[email protected]>
Daniel Dao <[email protected]>
Daniel Dao <[email protected]> <[email protected]>
Daniel Garcia <[email protected]>
Daniel Gasienica <[email protected]> <[email protected]>
Daniel Goosen <[email protected]> <[email protected]>
Daniel Grunwell <[email protected]>
Daniel Hiltgen <[email protected]> <[email protected]>
Daniel J Walsh <[email protected]>
Daniel Mizyrycki <[email protected]> <[email protected]>
Daniel Mizyrycki <[email protected]> <[email protected]>
Daniel Mizyrycki <[email protected]> <[email protected]>
Daniel Nephin <[email protected]> <[email protected]>
Daniel Norberg <[email protected]> <[email protected]>
Daniel Watkins <[email protected]>
Daniel Zhang <[email protected]>
Danny Yates <[email protected]> <[email protected]>
Darren Shepherd <[email protected]> <[email protected]>
Dattatraya Kumbhar <[email protected]>
Dave Goodchild <[email protected]>
Dave Henderson <[email protected]> <[email protected]>
Dave Tucker <[email protected]> <[email protected]>
David M. Karr <[email protected]>
David Sheets <[email protected]> <[email protected]>
David Sissitka <[email protected]>
David Williamson <[email protected]> <[email protected]>
Deshi Xiao <[email protected]> <[email protected]>
Deshi Xiao <[email protected]> <[email protected]>
Diego Siqueira <[email protected]>
Diogo Monica <[email protected]> <[email protected]>
Dmitry Sharshakov <[email protected]>
Dmitry Sharshakov <[email protected]> <[email protected]>
Dominic Yin <[email protected]>
Dominik Honnef <[email protected]> <[email protected]>
Doug Davis <[email protected]> <[email protected]>
Doug Tangren <[email protected]>
Drew Erny <[email protected]>
Drew Erny <[email protected]> <[email protected]>
Elan Ruusamäe <[email protected]>
Elan Ruusamäe <[email protected]> <[email protected]>
Elango Sivanandam <[email protected]>
Eric G. Noriega <[email protected]> <[email protected]>
Eric Hanchrow <[email protected]> <[email protected]>
Eric Rosenberg <[email protected]> <[email protected]>
Erica Windisch <[email protected]> <[email protected]>
Erica Windisch <[email protected]> <[email protected]>
Erik Hollensbe <[email protected]> <[email protected]>
Erwin van der Koogh <[email protected]>
Ethan Bell <[email protected]>
Euan Kemp <[email protected]> <[email protected]>
Eugen Krizo <[email protected]>
Evan Hazlett <[email protected]> <[email protected]>
Evelyn Xu <[email protected]>
Evgeny Shmarnev <[email protected]>
Faiz Khan <[email protected]>
Fangming Fang <[email protected]>
Felix Hupfeld <[email protected]> <[email protected]>
Felix Ruess <[email protected]> <[email protected]>
Feng Yan <[email protected]>
Fengtu Wang <[email protected]> <[email protected]>
Francisco Carriedo <[email protected]>
Frank Rosquin <[email protected]> <[email protected]>
Frederick F. Kautz IV <[email protected]> <[email protected]>
Fu JinLin <[email protected]>
Gabriel Nicolas Avellaneda <[email protected]>
Gaetan de Villele <[email protected]>
Gang Qiao <[email protected]> <[email protected]>
Geon Kim <[email protected]>
George Kontridze <[email protected]>
Gerwim Feiken <[email protected]> <[email protected]>
Giampaolo Mancini <[email protected]>
Giovan Isa Musthofa <[email protected]>
Gopikannan Venugopalsamy <[email protected]>
Gou Rao <[email protected]> <[email protected]>
Grant Millar <[email protected]>
Grant Millar <[email protected]> <[email protected]>
Grant Millar <[email protected]> <[email protected]>
Greg Stephens <[email protected]>
Guillaume J. Charmes <[email protected]> <[email protected]>
Guillaume J. Charmes <[email protected]> <[email protected]>
Guillaume J. Charmes <[email protected]> <[email protected]>
Guillaume J. Charmes <[email protected]> <[email protected]>
Guillaume J. Charmes <[email protected]> <[email protected]>
Guri <[email protected]>
Gurjeet Singh <[email protected]> <[email protected]>
Gustav Sinder <[email protected]>
Günther Jungbluth <[email protected]>
Hakan Özler <[email protected]>
Hao Shu Wei <[email protected]>
Hao Shu Wei <[email protected]> <[email protected]>
Harald Albers <[email protected]> <[email protected]>
Harald Niesche <[email protected]>
Harold Cooper <[email protected]>
Harry Zhang <[email protected]> <[email protected]>
Harry Zhang <[email protected]> <[email protected]>
Harry Zhang <[email protected]> <[email protected]>
Harry Zhang <[email protected]>
Harshal Patil <[email protected]> <[email protected]>
Helen Xie <[email protected]>
Hiroyuki Sasagawa <[email protected]>
Hollie Teal <[email protected]>
Hollie Teal <[email protected]> <[email protected]>
Hollie Teal <[email protected]> <[email protected]>
Hu Keping <[email protected]>
Huu Nguyen <[email protected]> <[email protected]>
Hyzhou Zhy <[email protected]>
Hyzhou Zhy <[email protected]> <[email protected]>
Ian Campbell <[email protected]>
Ian Campbell <[email protected]> <[email protected]>
Ilya Khlopotov <[email protected]>
Iskander Sharipov <[email protected]>
Ivan Markin <[email protected]> <[email protected]>
Jack Laxson <[email protected]>
Jacob Atzen <[email protected]> <[email protected]>
Jacob Tomlinson <[email protected]> <[email protected]>
Jaivish Kothari <[email protected]>
James Nesbitt <[email protected]>
James Nesbitt <[email protected]> <[email protected]>
Jamie Hannaford <[email protected]> <[email protected]>
Jean Rouge <[email protected]> <[email protected]>
Jean-Baptiste Barth <[email protected]>
Jean-Baptiste Dalido <[email protected]>
Jean-Tiare Le Bigot <[email protected]> <[email protected]>
Jeff Anderson <[email protected]> <[email protected]>
Jeff Nickoloff <[email protected]> <[email protected]>
Jeroen Franse <[email protected]>
Jessica Frazelle <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jian Liao <[email protected]>
Jiang Jinyang <[email protected]>
Jiang Jinyang <[email protected]> <[email protected]>
Jim Galasyn <[email protected]>
Jiuyue Ma <[email protected]>
Joey Geiger <[email protected]>
Joffrey F <[email protected]>
Joffrey F <[email protected]> <[email protected]>
Joffrey F <[email protected]> <[email protected]>
Johan Euphrosine <[email protected]> <[email protected]>
John Harris <[email protected]>
John Howard <[email protected]>
John Howard <[email protected]> <[email protected]>
John Howard <[email protected]> <[email protected]>
John Howard <[email protected]> <[email protected]>
John Howard <[email protected]> <[email protected]>
John Howard <[email protected]> <[email protected]>
John Stephens <[email protected]> <[email protected]>
Jon Surrell <[email protected]> <[email protected]>
Jonathan Choy <[email protected]>
Jonathan Choy <[email protected]> <[email protected]>
Jordan Arentsen <[email protected]>
Jordan Jennings <[email protected]> <[email protected]>
Jorit Kleine-Möllhoff <[email protected]> <[email protected]>
Jose Diaz-Gonzalez <[email protected]>
Jose Diaz-Gonzalez <[email protected]> <[email protected]>
Jose Diaz-Gonzalez <[email protected]> <[email protected]>
Josh Bonczkowski <[email protected]>
Josh Eveleth <[email protected]> <[email protected]>
Josh Hawn <[email protected]> <[email protected]>
Josh Horwitz <[email protected]> <[email protected]>
Josh Soref <[email protected]> <[email protected]>
Josh Wilson <[email protected]> <[email protected]>
Joyce Jang <[email protected]>
Julien Bordellier <[email protected]> <[email protected]>
Julien Bordellier <[email protected]> <[email protected]>
Justin Cormack <[email protected]>
Justin Cormack <[email protected]> <[email protected]>
Justin Cormack <[email protected]> <[email protected]>
Justin Simonelis <[email protected]> <[email protected]>
Justin Terry <[email protected]>
Jérôme Petazzoni <[email protected]> <[email protected]>
Jérôme Petazzoni <[email protected]> <[email protected]>
Jérôme Petazzoni <[email protected]> <[email protected]>
K. Heller <[email protected]> <[email protected]>
Kai Qiang Wu (Kennan) <[email protected]>
Kai Qiang Wu (Kennan) <[email protected]> <[email protected]>
Kamil Domański <[email protected]>
Kamjar Gerami <[email protected]>
Karthik Nayak <[email protected]>
Karthik Nayak <[email protected]> <[email protected]>
Ken Cochrane <[email protected]> <[email protected]>
Ken Herner <[email protected]> <[email protected]>
Ken Reese <[email protected]>
Kenfe-Mickaël Laventure <[email protected]>
Kevin Feyrer <[email protected]> <[email protected]>
Kevin Kern <[email protected]>
Kevin Meredith <[email protected]>
Kir Kolyshkin <[email protected]>
Kir Kolyshkin <[email protected]> <[email protected]>
Kir Kolyshkin <[email protected]> <[email protected]>
Konrad Kleine <[email protected]> <[email protected]>
Konstantin Gribov <[email protected]>
Konstantin Pelykh <[email protected]>
Kotaro Yoshimatsu <[email protected]>
Kunal Kushwaha <[email protected]> <[email protected]>
Lajos Papp <[email protected]> <[email protected]>
Lei Gong <[email protected]>
Lei Jitang <[email protected]>
Lei Jitang <[email protected]> <[email protected]>
Liang Mingqiang <[email protected]>
Liang-Chi Hsieh <[email protected]>
Liao Qingwei <[email protected]>
Linus Heckemann <[email protected]>
Linus Heckemann <[email protected]> <[email protected]>
Lokesh Mandvekar <[email protected]> <[email protected]>
Lorenzo Fontana <[email protected]> <[email protected]>
Lorenzo Fontana <[email protected]> <[email protected]>
Louis Opter <[email protected]>
Louis Opter <[email protected]> <[email protected]>
Luca Favatella <[email protected]> <[email protected]>
Luke Marsden <[email protected]> <[email protected]>
Lyn <[email protected]>
Lynda O'Leary <[email protected]>
Lynda O'Leary <[email protected]> <[email protected]>
Ma Müller <[email protected]>
Madhan Raj Mookkandy <[email protected]> <[email protected]>
Madhu Venugopal <[email protected]> <[email protected]>
Mageee <[email protected]> <21521230.zju.edu.cn>
Mansi Nahar <[email protected]> <[email protected]>
Mansi Nahar <[email protected]> <[email protected]>
Marc Abramowitz <[email protected]> <[email protected]>
Marcelo Horacio Fortino <[email protected]> <[email protected]>
Marcus Linke <[email protected]>
Marianna Tessel <[email protected]>
Mark Oates <[email protected]>
Markan Patel <[email protected]>
Markus Kortlang <[email protected]> <[email protected]>
Martin Redmond <[email protected]> <[email protected]>
Martin Redmond <[email protected]> <[email protected]>
Mary Anthony <[email protected]> <[email protected]>
Mary Anthony <[email protected]> <[email protected]>
Mary Anthony <[email protected]> moxiegirl <[email protected]>
Masato Ohba <[email protected]>
Matt Bentley <[email protected]> <[email protected]>
Matt Schurenko <[email protected]>
Matt Williams <[email protected]>
Matt Williams <[email protected]> <[email protected]>
Matthew Heon <[email protected]> <[email protected]>
Matthew Mosesohn <[email protected]>
Matthew Mueller <[email protected]>
Matthias Kühnle <[email protected]> <[email protected]>
Mauricio Garavaglia <[email protected]> <[email protected]>
Maxwell <[email protected]>
Maxwell <[email protected]> <[email protected]>
Michael Crosby <[email protected]> <[email protected]>
Michael Crosby <[email protected]> <[email protected]>
Michael Crosby <[email protected]> <[email protected]>
Michael Hudson-Doyle <[email protected]> <[email protected]>
Michael Huettermann <[email protected]>
Michael Käufl <[email protected]> <[email protected]>
Michael Nussbaum <[email protected]>
Michael Nussbaum <[email protected]> <[email protected]>
Michael Spetsiotis <[email protected]>
Michal Minář <[email protected]>
Michał Gryko <[email protected]>
Michiel de Jong <[email protected]>
Mickaël Fortunato <[email protected]>
Miguel Angel Alvarez Cabrerizo <[email protected]> <[email protected]>
Miguel Angel Fernández <[email protected]>
Mihai Borobocea <[email protected]> <[email protected]>
Mike Casas <[email protected]> <[email protected]>
Mike Goelzer <[email protected]> <[email protected]>
Milind Chawre <[email protected]>
Misty Stanley-Jones <[email protected]> <[email protected]>
Mohit Soni <[email protected]> <[email protected]>
Moorthy RS <[email protected]> <[email protected]>
Moysés Borges <[email protected]>
Moysés Borges <[email protected]> <[email protected]>
Nace Oroz <[email protected]>
Natasha Jarus <[email protected]>
Nathan LeClaire <[email protected]> <[email protected]>
Nathan LeClaire <[email protected]> <[email protected]>
Neil Horman <[email protected]> <[email protected]>
Nick Russo <[email protected]> <[email protected]>
Nicolas Borboën <[email protected]> <[email protected]>
Nigel Poulton <[email protected]>
Nik Nyby <[email protected]> <[email protected]>
Nolan Darilek <[email protected]>
O.S. Tezer <[email protected]>
O.S. Tezer <[email protected]> <[email protected]>
Oh Jinkyun <[email protected]> <[email protected]>
Oliver Reason <[email protected]>
Olli Janatuinen <[email protected]>
Olli Janatuinen <[email protected]> <[email protected]>
Ouyang Liduo <[email protected]>
Patrick Stapleton <[email protected]>
Paul Liljenberg <[email protected]> <[email protected]>
Pavel Tikhomirov <[email protected]> <[email protected]>
Pawel Konczalski <[email protected]>
Peter Choi <[email protected]> <[email protected]>
Peter Dave Hello <[email protected]> <[email protected]>
Peter Jaffe <[email protected]>
Peter Nagy <[email protected]> <[email protected]>
Peter Waller <[email protected]> <[email protected]>
Phil Estes <[email protected]> <[email protected]>
Philip Alexander Etling <[email protected]>
Philipp Gillé <[email protected]> <[email protected]>
Prasanna Gautam <[email protected]>
Qiang Huang <[email protected]>
Qiang Huang <[email protected]> <[email protected]>
Ray Tsang <[email protected]> <[email protected]>
Renaud Gaubert <[email protected]> <[email protected]>
Robert Terhaar <[email protected]> <[email protected]>
Roberto G. Hashioka <[email protected]> <[email protected]>
Roberto Muñoz Fernández <[email protected]> <[email protected]>
Robin Thoni <[email protected]>
Roman Dudin <[email protected]> <[email protected]>
Rong Zhang <[email protected]>
Rongxiang Song <[email protected]>
Ross Boucher <[email protected]>
Rui Cao <[email protected]>
Runshen Zhu <[email protected]>
Ryan Stelly <[email protected]>
Sakeven Jiang <[email protected]>
Sandeep Bansal <[email protected]>
Sandeep Bansal <[email protected]> <[email protected]>
Sargun Dhillon <[email protected]> <[email protected]>
Sean Lee <[email protected]> <[email protected]>
Sebastiaan van Stijn <[email protected]> <[email protected]>
Sebastiaan van Stijn <[email protected]> <[email protected]>
Shaun Kaasten <[email protected]>
Shawn Landden <[email protected]> <[email protected]>
Shengbo Song <[email protected]>
Shengbo Song <[email protected]> <[email protected]>
Shih-Yuan Lee <[email protected]>
Shishir Mahajan <[email protected]> <[email protected]>
Shu-Wai Chow <[email protected]>
Shukui Yang <[email protected]>
Shuwei Hao <[email protected]>
Shuwei Hao <[email protected]> <[email protected]>
Sidhartha Mani <[email protected]>
Sjoerd Langkemper <[email protected]> <[email protected]>
Solomon Hykes <[email protected]> <[email protected]>
Solomon Hykes <[email protected]> <[email protected]>
Solomon Hykes <[email protected]> <[email protected]>
Soshi Katsuta <[email protected]>
Soshi Katsuta <[email protected]> <[email protected]>
Sridhar Ratnakumar <[email protected]>
Sridhar Ratnakumar <[email protected]> <[email protected]>
Srini Brahmaroutu <[email protected]> <[email protected]>
Srinivasan Srivatsan <[email protected]> <[email protected]>
Stefan Berger <[email protected]>
Stefan Berger <[email protected]> <[email protected]>
Stefan J. Wernli <[email protected]> <[email protected]>
Stefan S. <[email protected]>
Stefan Scherer <[email protected]>
Stefan Scherer <[email protected]> <[email protected]>
Stephan Spindler <[email protected]> <[email protected]>
Stephen Day <[email protected]>
Stephen Day <[email protected]> <[email protected]>
Stephen Day <[email protected]> <[email protected]>
Steve Desmond <[email protected]> <[email protected]>
Sun Gengze <[email protected]>
Sun Jianbo <[email protected]>
Sun Jianbo <[email protected]> <[email protected]>
Sven Dowideit <[email protected]>
Sven Dowideit <[email protected]> <[email protected]>
Sven Dowideit <[email protected]> <[email protected]>
Sven Dowideit <[email protected]> <[email protected]>
Sven Dowideit <[email protected]> <[email protected]>
Sven Dowideit <[email protected]> <[email protected]>
Sven Dowideit <[email protected]> <¨[email protected]¨>
Sylvain Bellemare <[email protected]>
Sylvain Bellemare <[email protected]> <[email protected]>
Tangi Colin <[email protected]>
Tejesh Mehta <[email protected]> <[email protected]>
Thatcher Peskens <[email protected]>
Thatcher Peskens <[email protected]> <[email protected]>
Thatcher Peskens <[email protected]> <[email protected]>
Thomas Gazagnaire <[email protected]> <[email protected]>
Thomas Léveil <[email protected]>
Thomas Léveil <[email protected]> <[email protected]>
Tibor Vass <[email protected]> <[email protected]>
Tibor Vass <[email protected]> <[email protected]>
Tim Bart <[email protected]>
Tim Bosse <[email protected]> <[email protected]>
Tim Ruffles <[email protected]> <[email protected]>
Tim Terhorst <[email protected]>
Tim Zju <[email protected]>
Timothy Hobbs <[email protected]>
Toli Kuznets <[email protected]>
Tom Barlow <[email protected]>
Tom Sweeney <[email protected]>
Tõnis Tiigi <[email protected]>
Trace Andreason <[email protected]>
Trishna Guha <[email protected]>
Tristan Carel <[email protected]>
Tristan Carel <[email protected]> <[email protected]>
Tyler Brown <[email protected]>
Umesh Yadav <[email protected]>
Umesh Yadav <[email protected]> <[email protected]>
Victor Lyuboslavsky <[email protected]>
Victor Vieux <[email protected]> <[email protected]>
Victor Vieux <[email protected]> <[email protected]>
Victor Vieux <[email protected]> <[email protected]>
Victor Vieux <[email protected]> <[email protected]>
Victor Vieux <[email protected]> <[email protected]>
Victor Vieux <[email protected]> <[email protected]>
Vikram bir Singh <[email protected]>
Vikram bir Singh <[email protected]> <[email protected]>
Viktor Vojnovski <[email protected]> <[email protected]>
Vincent Batts <[email protected]> <[email protected]>
Vincent Bernat <[email protected]> <[email protected]>
Vincent Bernat <[email protected]> <[email protected]>
Vincent Boulineau <[email protected]>
Vincent Demeester <[email protected]> <[email protected]>
Vincent Demeester <[email protected]> <[email protected]>
Vincent Demeester <[email protected]> <[email protected]>
Vishnu Kannan <[email protected]>
Vitaly Ostrosablin <[email protected]>
Vitaly Ostrosablin <[email protected]> <[email protected]>
Vladimir Rutsky <[email protected]> <[email protected]>
Walter Stanish <[email protected]>
Wang Chao <[email protected]>
Wang Chao <[email protected]> <[email protected]>
Wang Guoliang <[email protected]>
Wang Jie <[email protected]>
Wang Ping <[email protected]>
Wang Xing <[email protected]> <root@localhost>
Wang Yuexiao <[email protected]>
Wayne Chang <[email protected]>
Wayne Song <[email protected]> <[email protected]>
Wei Wu <[email protected]> cizixs <[email protected]>
Wenjun Tang <[email protected]> <[email protected]>
Wewang Xiaorenfine <[email protected]>
Will Weaver <[email protected]>
Wing-Kam Wong <[email protected]>
Xian Chaobo <[email protected]>
Xian Chaobo <[email protected]> <[email protected]>
Xianglin Gao <[email protected]>
Xianlu Bird <[email protected]>
Xiao YongBiao <[email protected]>
Xiaodong Liu <[email protected]>
Xiaodong Zhang <[email protected]>
Xiaoyu Zhang <[email protected]>
Xuecong Liao <[email protected]>
Yamasaki Masahide <[email protected]>
Yao Zaiyong <[email protected]>
Yassine Tijani <[email protected]>
Yazhong Liu <[email protected]>
Yestin Sun <[email protected]> <[email protected]>
Yi EungJun <[email protected]> <[email protected]>
Ying Li <[email protected]>
Ying Li <[email protected]> <[email protected]>
Yong Tang <[email protected]> <[email protected]>
Yongxin Li <[email protected]>
Yosef Fertel <[email protected]> <[email protected]>
Yu Changchun <[email protected]>
Yu Chengxia <[email protected]>
Yu Peng <[email protected]>
Yu Peng <[email protected]> <[email protected]>
Yue Zhang <[email protected]>
Zachary Jaffee <[email protected]> <[email protected]>
Zachary Jaffee <[email protected]> <[email protected]>
ZhangHang <[email protected]>
Zhenkun Bi <[email protected]>
Zhou Hao <[email protected]>
Zhoulin Xie <[email protected]>
Zhu Kunjia <[email protected]>
Ziheng Liu <[email protected]>
Zou Yu <[email protected]>
Zuhayr Elahi <[email protected]>
Zuhayr Elahi <[email protected]> <[email protected]>
| # Generate AUTHORS: hack/generate-authors.sh
# Tip for finding duplicates (besides scanning the output of AUTHORS for name
# duplicates that aren't also email duplicates): scan the output of:
# git log --format='%aE - %aN' | sort -uf
#
# For explanation on this file format: man git-shortlog
<[email protected]> <[email protected]>
<[email protected]> <[email protected]>
Aaron L. Xu <[email protected]>
Abhinandan Prativadi <[email protected]>
Abhinandan Prativadi <[email protected]> <[email protected]>
Abhinandan Prativadi <[email protected]> abhi <user.email>
Abhishek Chanda <[email protected]>
Abhishek Chanda <[email protected]> <[email protected]>
Ada Mancini <[email protected]>
Adam Dobrawy <[email protected]>
Adam Dobrawy <[email protected]> <[email protected]>
Adrien Gallouët <[email protected]> <[email protected]>
Ahmed Kamal <[email protected]>
Ahmet Alp Balkan <[email protected]> <[email protected]>
AJ Bowen <[email protected]>
AJ Bowen <[email protected]> <[email protected]>
AJ Bowen <[email protected]> <[email protected]>
Akihiro Matsushima <[email protected]> <[email protected]>
Akihiro Suda <[email protected]>
Akihiro Suda <[email protected]> <[email protected]>
Akihiro Suda <[email protected]> <[email protected]>
Akshay Moghe <[email protected]>
Aleksa Sarai <[email protected]>
Aleksa Sarai <[email protected]> <[email protected]>
Aleksa Sarai <[email protected]> <[email protected]>
Aleksandrs Fadins <[email protected]>
Alessandro Boch <[email protected]>
Alessandro Boch <[email protected]>
Alessandro Boch <[email protected]> <[email protected]>
Alessandro Boch <[email protected]> <[email protected]>
Alessandro Boch <[email protected]> <[email protected]>
Alex Chan <[email protected]>
Alex Chan <[email protected]> <[email protected]>
Alex Chen <[email protected]> <[email protected]>
Alex Ellis <[email protected]>
Alex Goodman <[email protected]> <[email protected]>
Alexander Larsson <[email protected]> <[email protected]>
Alexander Morozov <[email protected]>
Alexander Morozov <[email protected]> <[email protected]>
Alexandre Beslic <[email protected]> <[email protected]>
Alexis Thomas <[email protected]>
Alicia Lauerman <[email protected]> <[email protected]>
Allen Sun <[email protected]> <[email protected]>
Allen Sun <[email protected]> <[email protected]>
Andrea Denisse Gómez <[email protected]>
Andrew Kim <[email protected]>
Andrew Kim <[email protected]> <[email protected]>
Andrew Weiss <[email protected]> <[email protected]>
Andrew Weiss <[email protected]> <[email protected]>
Andrey Kolomentsev <[email protected]>
Andrey Kolomentsev <[email protected]> <[email protected]>
André Martins <[email protected]> <[email protected]>
Andy Rothfusz <[email protected]> <[email protected]>
Andy Smith <[email protected]>
Ankush Agarwal <[email protected]> <[email protected]>
Antonio Murdaca <[email protected]> <[email protected]>
Antonio Murdaca <[email protected]> <[email protected]>
Antonio Murdaca <[email protected]> <[email protected]>
Antonio Murdaca <[email protected]> <[email protected]>
Antonio Murdaca <[email protected]> <[email protected]>
Anuj Bahuguna <[email protected]>
Anuj Bahuguna <[email protected]> <[email protected]>
Anusha Ragunathan <[email protected]> <[email protected]>
Arko Dasgupta <[email protected]>
Arko Dasgupta <[email protected]> <[email protected]>
Arnaud Porterie <[email protected]>
Arnaud Porterie <[email protected]> <[email protected]>
Arnaud Rebillout <[email protected]>
Arnaud Rebillout <[email protected]> <[email protected]>
Arthur Gautier <[email protected]> <[email protected]>
Avi Miller <[email protected]> <[email protected]>
Ben Bonnefoy <[email protected]>
Ben Golub <[email protected]>
Ben Toews <[email protected]> <[email protected]>
Benny Ng <[email protected]>
Benoit Chesneau <[email protected]>
Bevisy Zhang <[email protected]>
Bhiraj Butala <[email protected]>
Bhumika Bayani <[email protected]>
Bilal Amarni <[email protected]> <[email protected]>
Bill Wang <[email protected]> <[email protected]>
Bily Zhang <[email protected]>
Bin Liu <[email protected]>
Bin Liu <[email protected]> <[email protected]>
Bingshen Wang <[email protected]>
Boaz Shuster <[email protected]>
Boqin Qin <[email protected]>
Brandon Philips <[email protected]> <[email protected]>
Brandon Philips <[email protected]> <[email protected]>
Brent Salisbury <[email protected]> <[email protected]>
Brian Goff <[email protected]>
Brian Goff <[email protected]> <[email protected]>
Brian Goff <[email protected]> <[email protected]>
Brian Goff <[email protected]> <[email protected]>
Brian Goff <[email protected]> <[email protected]>
Cameron Sparr <[email protected]>
Carlos de Paula <[email protected]>
Chander Govindarajan <[email protected]>
Chao Wang <[email protected]> <[email protected]>
Charles Hooper <[email protected]> <[email protected]>
Chen Chao <[email protected]>
Chen Chuanliang <[email protected]>
Chen Mingjie <[email protected]>
Chen Qiu <[email protected]>
Chen Qiu <[email protected]> <[email protected]>
Chengfei Shang <[email protected]>
Chris Dias <[email protected]>
Chris McKinnel <[email protected]>
Chris Price <[email protected]>
Chris Price <[email protected]> <[email protected]>
Chris Telfer <[email protected]>
Chris Telfer <[email protected]> <[email protected]>
Christopher Biscardi <[email protected]>
Christopher Latham <[email protected]>
Christy Norman <[email protected]>
Chun Chen <[email protected]> <[email protected]>
Corbin Coleman <[email protected]>
Cristian Ariza <[email protected]>
Cristian Staretu <[email protected]>
Cristian Staretu <[email protected]> <[email protected]>
Cristian Staretu <[email protected]> <[email protected]>
CUI Wei <[email protected]> cuiwei13 <[email protected]>
Daehyeok Mun <[email protected]>
Daehyeok Mun <[email protected]> <[email protected]>
Daehyeok Mun <[email protected]> <[email protected]>
Dan Feldman <[email protected]>
Daniel Dao <[email protected]>
Daniel Dao <[email protected]> <[email protected]>
Daniel Garcia <[email protected]>
Daniel Gasienica <[email protected]> <[email protected]>
Daniel Goosen <[email protected]> <[email protected]>
Daniel Grunwell <[email protected]>
Daniel Hiltgen <[email protected]> <[email protected]>
Daniel J Walsh <[email protected]>
Daniel Mizyrycki <[email protected]> <[email protected]>
Daniel Mizyrycki <[email protected]> <[email protected]>
Daniel Mizyrycki <[email protected]> <[email protected]>
Daniel Nephin <[email protected]> <[email protected]>
Daniel Norberg <[email protected]> <[email protected]>
Daniel Watkins <[email protected]>
Daniel Zhang <[email protected]>
Danny Yates <[email protected]> <[email protected]>
Darren Shepherd <[email protected]> <[email protected]>
Dattatraya Kumbhar <[email protected]>
Dave Goodchild <[email protected]>
Dave Henderson <[email protected]> <[email protected]>
Dave Tucker <[email protected]> <[email protected]>
David M. Karr <[email protected]>
David Sheets <[email protected]> <[email protected]>
David Sissitka <[email protected]>
David Williamson <[email protected]> <[email protected]>
Derek Ch <[email protected]>
Deshi Xiao <[email protected]> <[email protected]>
Deshi Xiao <[email protected]> <[email protected]>
Dhilip Kumars <[email protected]>
Diego Siqueira <[email protected]>
Diogo Monica <[email protected]> <[email protected]>
Dmitry Sharshakov <[email protected]>
Dmitry Sharshakov <[email protected]> <[email protected]>
Dmytro Iakovliev <[email protected]>
Dominic Yin <[email protected]>
Dominik Honnef <[email protected]> <[email protected]>
Doug Davis <[email protected]> <[email protected]>
Doug Tangren <[email protected]>
Drew Erny <[email protected]>
Drew Erny <[email protected]> <[email protected]>
Elan Ruusamäe <[email protected]>
Elan Ruusamäe <[email protected]> <[email protected]>
Elango Sivanandam <[email protected]>
Elango Sivanandam <[email protected]> <[email protected]>
Eli Uriegas <[email protected]>
Eli Uriegas <[email protected]> <[email protected]>
Eric G. Noriega <[email protected]> <[email protected]>
Eric Hanchrow <[email protected]> <[email protected]>
Eric Rosenberg <[email protected]> <[email protected]>
Erica Windisch <[email protected]> <[email protected]>
Erica Windisch <[email protected]> <[email protected]>
Erik Hollensbe <[email protected]> <[email protected]>
Erwin van der Koogh <[email protected]>
Ethan Bell <[email protected]>
Euan Kemp <[email protected]> <[email protected]>
Eugen Krizo <[email protected]>
Evan Hazlett <[email protected]> <[email protected]>
Evelyn Xu <[email protected]>
Evgeny Shmarnev <[email protected]>
Faiz Khan <[email protected]>
Fangming Fang <[email protected]>
Felix Hupfeld <[email protected]> <[email protected]>
Felix Ruess <[email protected]> <[email protected]>
Feng Yan <[email protected]>
Fengtu Wang <[email protected]> <[email protected]>
Francisco Carriedo <[email protected]>
Frank Rosquin <[email protected]> <[email protected]>
Frank Yang <[email protected]>
Frederick F. Kautz IV <[email protected]> <[email protected]>
Fu JinLin <[email protected]>
Gabriel Nicolas Avellaneda <[email protected]>
Gaetan de Villele <[email protected]>
Gang Qiao <[email protected]> <[email protected]>
Geon Kim <[email protected]>
George Kontridze <[email protected]>
Gerwim Feiken <[email protected]> <[email protected]>
Giampaolo Mancini <[email protected]>
Giovan Isa Musthofa <[email protected]>
Gopikannan Venugopalsamy <[email protected]>
Gou Rao <[email protected]> <[email protected]>
Grant Millar <[email protected]>
Grant Millar <[email protected]> <[email protected]>
Grant Millar <[email protected]> <[email protected]>
Greg Stephens <[email protected]>
Guillaume J. Charmes <[email protected]> <[email protected]>
Guillaume J. Charmes <[email protected]> <[email protected]>
Guillaume J. Charmes <[email protected]> <[email protected]>
Guillaume J. Charmes <[email protected]> <[email protected]>
Guillaume J. Charmes <[email protected]> <[email protected]>
Gunadhya S. <[email protected]>
Guri <[email protected]>
Gurjeet Singh <[email protected]> <[email protected]>
Gustav Sinder <[email protected]>
Günther Jungbluth <[email protected]>
Hakan Özler <[email protected]>
Hao Shu Wei <[email protected]>
Hao Shu Wei <[email protected]> <[email protected]>
Harald Albers <[email protected]> <[email protected]>
Harald Niesche <[email protected]>
Harold Cooper <[email protected]>
Harry Zhang <[email protected]> <[email protected]>
Harry Zhang <[email protected]> <[email protected]>
Harry Zhang <[email protected]> <[email protected]>
Harry Zhang <[email protected]>
Harshal Patil <[email protected]> <[email protected]>
Helen Xie <[email protected]>
Hiroyuki Sasagawa <[email protected]>
Hollie Teal <[email protected]>
Hollie Teal <[email protected]> <[email protected]>
Hollie Teal <[email protected]> <[email protected]>
Hu Keping <[email protected]>
Hui Kang <[email protected]>
Hui Kang <[email protected]> <[email protected]>
Huu Nguyen <[email protected]> <[email protected]>
Hyzhou Zhy <[email protected]>
Hyzhou Zhy <[email protected]> <[email protected]>
Ian Campbell <[email protected]>
Ian Campbell <[email protected]> <[email protected]>
Ilya Khlopotov <[email protected]>
Iskander Sharipov <[email protected]>
Ivan Markin <[email protected]> <[email protected]>
Jack Laxson <[email protected]>
Jacob Atzen <[email protected]> <[email protected]>
Jacob Tomlinson <[email protected]> <[email protected]>
Jaivish Kothari <[email protected]>
Jake Moshenko <[email protected]>
Jakub Drahos <[email protected]>
Jakub Drahos <[email protected]> <[email protected]>
James Nesbitt <[email protected]>
James Nesbitt <[email protected]> <[email protected]>
Jamie Hannaford <[email protected]> <[email protected]>
Jana Radhakrishnan <[email protected]>
Jana Radhakrishnan <[email protected]> <[email protected]>
Jean Rouge <[email protected]> <[email protected]>
Jean-Baptiste Barth <[email protected]>
Jean-Baptiste Dalido <[email protected]>
Jean-Tiare Le Bigot <[email protected]> <[email protected]>
Jeff Anderson <[email protected]> <[email protected]>
Jeff Nickoloff <[email protected]> <[email protected]>
Jeroen Franse <[email protected]>
Jessica Frazelle <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jessica Frazelle <[email protected]> <[email protected]>
Jian Liao <[email protected]>
Jiang Jinyang <[email protected]>
Jiang Jinyang <[email protected]> <[email protected]>
Jim Galasyn <[email protected]>
Jiuyue Ma <[email protected]>
Joey Geiger <[email protected]>
Joffrey F <[email protected]>
Joffrey F <[email protected]> <[email protected]>
Joffrey F <[email protected]> <[email protected]>
Johan Euphrosine <[email protected]> <[email protected]>
John Harris <[email protected]>
John Howard <[email protected]>
John Howard <[email protected]> <[email protected]>
John Howard <[email protected]> <[email protected]>
John Howard <[email protected]> <[email protected]>
John Howard <[email protected]> <[email protected]>
John Howard <[email protected]> <[email protected]>
John Howard <[email protected]> <[email protected]>
John Stephens <[email protected]> <[email protected]>
Jon Surrell <[email protected]> <[email protected]>
Jonathan Choy <[email protected]>
Jonathan Choy <[email protected]> <[email protected]>
Jordan Arentsen <[email protected]>
Jordan Jennings <[email protected]> <[email protected]>
Jorit Kleine-Möllhoff <[email protected]> <[email protected]>
Jose Diaz-Gonzalez <[email protected]>
Jose Diaz-Gonzalez <[email protected]> <[email protected]>
Jose Diaz-Gonzalez <[email protected]> <[email protected]>
Josh Bonczkowski <[email protected]>
Josh Eveleth <[email protected]> <[email protected]>
Josh Hawn <[email protected]> <[email protected]>
Josh Horwitz <[email protected]> <[email protected]>
Josh Soref <[email protected]> <[email protected]>
Josh Wilson <[email protected]> <[email protected]>
Joyce Jang <[email protected]>
Julien Bordellier <[email protected]> <[email protected]>
Julien Bordellier <[email protected]> <[email protected]>
Jun Du <[email protected]>
Justin Cormack <[email protected]>
Justin Cormack <[email protected]> <[email protected]>
Justin Cormack <[email protected]> <[email protected]>
Justin Simonelis <[email protected]> <[email protected]>
Justin Terry <[email protected]>
Jérôme Petazzoni <[email protected]> <[email protected]>
Jérôme Petazzoni <[email protected]> <[email protected]>
Jérôme Petazzoni <[email protected]> <[email protected]>
K. Heller <[email protected]> <[email protected]>
Kai Qiang Wu (Kennan) <[email protected]>
Kai Qiang Wu (Kennan) <[email protected]> <[email protected]>
Kamil Domański <[email protected]>
Kamjar Gerami <[email protected]>
Karthik Nayak <[email protected]>
Karthik Nayak <[email protected]> <[email protected]>
Ken Cochrane <[email protected]> <[email protected]>
Ken Herner <[email protected]> <[email protected]>
Ken Reese <[email protected]>
Kenfe-Mickaël Laventure <[email protected]>
Kevin Feyrer <[email protected]> <[email protected]>
Kevin Kern <[email protected]>
Kevin Meredith <[email protected]>
Kir Kolyshkin <[email protected]>
Kir Kolyshkin <[email protected]> <[email protected]>
Kir Kolyshkin <[email protected]> <[email protected]>
Konrad Kleine <[email protected]> <[email protected]>
Konstantin Gribov <[email protected]>
Konstantin Pelykh <[email protected]>
Kotaro Yoshimatsu <[email protected]>
Kunal Kushwaha <[email protected]>
Kunal Kushwaha <[email protected]> <[email protected]>
Kunal Kushwaha <[email protected]> <[email protected]>
Kyle Squizzato <[email protected]>
Kyle Squizzato <[email protected]> <[email protected]>
Lajos Papp <[email protected]> <[email protected]>
Lei Gong <[email protected]>
Lei Jitang <[email protected]>
Lei Jitang <[email protected]> <[email protected]>
Lei Jitang <[email protected]> <[email protected]>
Leiiwang <[email protected]>
Liang Mingqiang <[email protected]>
Liang-Chi Hsieh <[email protected]>
Liao Qingwei <[email protected]>
Linus Heckemann <[email protected]>
Linus Heckemann <[email protected]> <[email protected]>
Lokesh Mandvekar <[email protected]> <[email protected]>
Lorenzo Fontana <[email protected]> <[email protected]>
Lorenzo Fontana <[email protected]> <[email protected]>
Louis Opter <[email protected]>
Louis Opter <[email protected]> <[email protected]>
Luca Favatella <[email protected]> <[email protected]>
Luke Marsden <[email protected]> <[email protected]>
Lyn <[email protected]>
Lynda O'Leary <[email protected]>
Lynda O'Leary <[email protected]> <[email protected]>
Ma Müller <[email protected]>
Madhan Raj Mookkandy <[email protected]>
Madhan Raj Mookkandy <[email protected]> <[email protected]>
Madhan Raj Mookkandy <[email protected]> <[email protected]>
Madhu Venugopal <[email protected]> <[email protected]>
Madhu Venugopal <[email protected]> <[email protected]>
Mageee <[email protected]> <21521230.zju.edu.cn>
Mansi Nahar <[email protected]> <[email protected]>
Mansi Nahar <[email protected]> <[email protected]>
Marc Abramowitz <[email protected]> <[email protected]>
Marcelo Horacio Fortino <[email protected]> <[email protected]>
Marcus Linke <[email protected]>
Marianna Tessel <[email protected]>
Mark Oates <[email protected]>
Markan Patel <[email protected]>
Markus Kortlang <[email protected]> <[email protected]>
Martin Redmond <[email protected]> <[email protected]>
Martin Redmond <[email protected]> <[email protected]>
Maru Newby <[email protected]>
Mary Anthony <[email protected]> <[email protected]>
Mary Anthony <[email protected]> <[email protected]>
Mary Anthony <[email protected]> moxiegirl <[email protected]>
Masato Ohba <[email protected]>
Matt Bentley <[email protected]> <[email protected]>
Matt Schurenko <[email protected]>
Matt Williams <[email protected]>
Matt Williams <[email protected]> <[email protected]>
Matthew Heon <[email protected]> <[email protected]>
Matthew Mosesohn <[email protected]>
Matthew Mueller <[email protected]>
Matthias Kühnle <[email protected]> <[email protected]>
Mauricio Garavaglia <[email protected]> <[email protected]>
Maxwell <[email protected]>
Maxwell <[email protected]> <[email protected]>
Menghui Chen <[email protected]>
Michael Beskin <[email protected]>
Michael Crosby <[email protected]> <[email protected]>
Michael Crosby <[email protected]> <[email protected]>
Michael Crosby <[email protected]> <[email protected]>
Michael Hudson-Doyle <[email protected]> <[email protected]>
Michael Huettermann <[email protected]>
Michael Käufl <[email protected]> <[email protected]>
Michael Nussbaum <[email protected]>
Michael Nussbaum <[email protected]> <[email protected]>
Michael Spetsiotis <[email protected]>
Michael Stapelberg <[email protected]>
Michael Stapelberg <[email protected]> <[email protected]>
Michal Kostrzewa <[email protected]>
Michal Kostrzewa <[email protected]> <[email protected]>
Michal Minář <[email protected]>
Michał Gryko <[email protected]>
Michiel de Jong <[email protected]>
Mickaël Fortunato <[email protected]>
Miguel Angel Alvarez Cabrerizo <[email protected]> <[email protected]>
Miguel Angel Fernández <[email protected]>
Mihai Borobocea <[email protected]> <[email protected]>
Mikael Davranche <[email protected]>
Mikael Davranche <[email protected]> <[email protected]>
Mike Casas <[email protected]> <[email protected]>
Mike Goelzer <[email protected]> <[email protected]>
Milind Chawre <[email protected]>
Misty Stanley-Jones <[email protected]> <[email protected]>
Mohammad Banikazemi <[email protected]>
Mohammad Banikazemi <[email protected]> <[email protected]>
Mohit Soni <[email protected]> <[email protected]>
Moorthy RS <[email protected]> <[email protected]>
Moysés Borges <[email protected]>
Moysés Borges <[email protected]> <[email protected]>
Nace Oroz <[email protected]>
Natasha Jarus <[email protected]>
Nathan LeClaire <[email protected]> <[email protected]>
Nathan LeClaire <[email protected]> <[email protected]>
Neil Horman <[email protected]> <[email protected]>
Nick Russo <[email protected]> <[email protected]>
Nicolas Borboën <[email protected]> <[email protected]>
Nigel Poulton <[email protected]>
Nik Nyby <[email protected]> <[email protected]>
Nolan Darilek <[email protected]>
O.S. Tezer <[email protected]>
O.S. Tezer <[email protected]> <[email protected]>
Oh Jinkyun <[email protected]> <[email protected]>
Oliver Reason <[email protected]>
Olli Janatuinen <[email protected]>
Olli Janatuinen <[email protected]> <[email protected]>
Onur Filiz <[email protected]>
Onur Filiz <[email protected]> <[email protected]>
Ouyang Liduo <[email protected]>
Patrick Stapleton <[email protected]>
Paul Liljenberg <[email protected]> <[email protected]>
Pavel Tikhomirov <[email protected]> <[email protected]>
Pawel Konczalski <[email protected]>
Peter Choi <[email protected]> <[email protected]>
Peter Dave Hello <[email protected]> <[email protected]>
Peter Jaffe <[email protected]>
Peter Nagy <[email protected]> <[email protected]>
Peter Waller <[email protected]> <[email protected]>
Phil Estes <[email protected]> <[email protected]>
Philip Alexander Etling <[email protected]>
Philipp Gillé <[email protected]> <[email protected]>
Prasanna Gautam <[email protected]>
Puneet Pruthi <[email protected]>
Puneet Pruthi <[email protected]> <[email protected]>
Qiang Huang <[email protected]>
Qiang Huang <[email protected]> <[email protected]>
Qin TianHuan <[email protected]>
Ray Tsang <[email protected]> <[email protected]>
Renaud Gaubert <[email protected]> <[email protected]>
Robert Terhaar <[email protected]> <[email protected]>
Roberto G. Hashioka <[email protected]> <[email protected]>
Roberto Muñoz Fernández <[email protected]> <[email protected]>
Robin Thoni <[email protected]>
Roman Dudin <[email protected]> <[email protected]>
Rong Zhang <[email protected]>
Rongxiang Song <[email protected]>
Rony Weng <[email protected]>
Ross Boucher <[email protected]>
Rui Cao <[email protected]>
Runshen Zhu <[email protected]>
Ryan Stelly <[email protected]>
Ryoga Saito <[email protected]>
Ryoga Saito <[email protected]> <[email protected]>
Sainath Grandhi <[email protected]>
Sainath Grandhi <[email protected]> <[email protected]>
Sakeven Jiang <[email protected]>
Sandeep Bansal <[email protected]>
Sandeep Bansal <[email protected]> <[email protected]>
Santhosh Manohar <[email protected]>
Sargun Dhillon <[email protected]> <[email protected]>
Sean Lee <[email protected]> <[email protected]>
Sebastiaan van Stijn <[email protected]>
Sebastiaan van Stijn <[email protected]> <[email protected]>
Sebastiaan van Stijn <[email protected]> <[email protected]>
Sebastiaan van Stijn <[email protected]> <[email protected]>
Shaun Kaasten <[email protected]>
Shawn Landden <[email protected]> <[email protected]>
Shengbo Song <[email protected]>
Shengbo Song <[email protected]> <[email protected]>
Shih-Yuan Lee <[email protected]>
Shishir Mahajan <[email protected]> <[email protected]>
Shu-Wai Chow <[email protected]>
Shukui Yang <[email protected]>
Shuwei Hao <[email protected]>
Shuwei Hao <[email protected]> <[email protected]>
Sidhartha Mani <[email protected]>
Sjoerd Langkemper <[email protected]> <[email protected]>
Smark Meng <[email protected]>
Smark Meng <[email protected]> <[email protected]>
Solomon Hykes <[email protected]> <[email protected]>
Solomon Hykes <[email protected]> <[email protected]>
Solomon Hykes <[email protected]> <[email protected]>
Soshi Katsuta <[email protected]>
Soshi Katsuta <[email protected]> <[email protected]>
Sridhar Ratnakumar <[email protected]>
Sridhar Ratnakumar <[email protected]> <[email protected]>
Srini Brahmaroutu <[email protected]> <[email protected]>
Srinivasan Srivatsan <[email protected]> <[email protected]>
Stefan Berger <[email protected]>
Stefan Berger <[email protected]> <[email protected]>
Stefan J. Wernli <[email protected]> <[email protected]>
Stefan S. <[email protected]>
Stefan Scherer <[email protected]>
Stefan Scherer <[email protected]> <[email protected]>
Stephan Spindler <[email protected]> <[email protected]>
Stephen Day <[email protected]>
Stephen Day <[email protected]> <[email protected]>
Stephen Day <[email protected]> <[email protected]>
Steve Desmond <[email protected]> <[email protected]>
Sun Gengze <[email protected]>
Sun Jianbo <[email protected]>
Sun Jianbo <[email protected]> <[email protected]>
Sven Dowideit <[email protected]>
Sven Dowideit <[email protected]> <[email protected]>
Sven Dowideit <[email protected]> <[email protected]>
Sven Dowideit <[email protected]> <[email protected]>
Sven Dowideit <[email protected]> <[email protected]>
Sven Dowideit <[email protected]> <[email protected]>
Sven Dowideit <[email protected]> <¨[email protected]¨>
Sylvain Bellemare <[email protected]>
Sylvain Bellemare <[email protected]> <[email protected]>
Tangi Colin <[email protected]>
Tejesh Mehta <[email protected]> <[email protected]>
Terry Chu <[email protected]>
Terry Chu <[email protected]> <[email protected]>
Thatcher Peskens <[email protected]>
Thatcher Peskens <[email protected]> <[email protected]>
Thatcher Peskens <[email protected]> <[email protected]>
Thiago Alves Silva <[email protected]>
Thiago Alves Silva <[email protected]> <[email protected]>
Thomas Gazagnaire <[email protected]> <[email protected]>
Thomas Léveil <[email protected]>
Thomas Léveil <[email protected]> <[email protected]>
Tibor Vass <[email protected]> <[email protected]>
Tibor Vass <[email protected]> <[email protected]>
Till Claassen <[email protected]>
Tim Bart <[email protected]>
Tim Bosse <[email protected]> <[email protected]>
Tim Potter <[email protected]>
Tim Potter <[email protected]> <[email protected]>
Tim Ruffles <[email protected]> <[email protected]>
Tim Terhorst <[email protected]>
Tim Wagner <[email protected]>
Tim Wagner <[email protected]> <[email protected]>
Tim Zju <[email protected]>
Timothy Hobbs <[email protected]>
Toli Kuznets <[email protected]>
Tom Barlow <[email protected]>
Tom Denham <[email protected]>
Tom Denham <[email protected]> <[email protected]>
Tom Sweeney <[email protected]>
Tom Wilkie <[email protected]>
Tom Wilkie <[email protected]> <[email protected]>
Tõnis Tiigi <[email protected]>
Trace Andreason <[email protected]>
Trapier Marshall <[email protected]>
Trapier Marshall <[email protected]> <[email protected]>
Trishna Guha <[email protected]>
Tristan Carel <[email protected]>
Tristan Carel <[email protected]> <[email protected]>
Tyler Brown <[email protected]>
Umesh Yadav <[email protected]>
Umesh Yadav <[email protected]> <[email protected]>
Victor Lyuboslavsky <[email protected]>
Victor Vieux <[email protected]> <[email protected]>
Victor Vieux <[email protected]> <[email protected]>
Victor Vieux <[email protected]> <[email protected]>
Victor Vieux <[email protected]> <[email protected]>
Victor Vieux <[email protected]> <[email protected]>
Victor Vieux <[email protected]> <[email protected]>
Vikas Choudhary <[email protected]>
Vikram bir Singh <[email protected]>
Vikram bir Singh <[email protected]> <[email protected]>
Viktor Vojnovski <[email protected]> <[email protected]>
Vincent Batts <[email protected]> <[email protected]>
Vincent Bernat <[email protected]> <[email protected]>
Vincent Bernat <[email protected]> <[email protected]>
Vincent Boulineau <[email protected]>
Vincent Demeester <[email protected]> <[email protected]>
Vincent Demeester <[email protected]> <[email protected]>
Vincent Demeester <[email protected]> <[email protected]>
Vishnu Kannan <[email protected]>
Vitaly Ostrosablin <[email protected]>
Vitaly Ostrosablin <[email protected]> <[email protected]>
Vladimir Rutsky <[email protected]> <[email protected]>
Vladislav Kolesnikov <[email protected]>
Vladislav Kolesnikov <[email protected]> <[email protected]>
Walter Stanish <[email protected]>
Wang Chao <[email protected]>
Wang Chao <[email protected]> <[email protected]>
Wang Guoliang <[email protected]>
Wang Jie <[email protected]>
Wang Ping <[email protected]>
Wang Xing <[email protected]> <root@localhost>
Wang Yuexiao <[email protected]>
Wayne Chang <[email protected]>
Wayne Song <[email protected]> <[email protected]>
Wei Wu <[email protected]> cizixs <[email protected]>
Wen Cheng Ma <[email protected]>
Wenjun Tang <[email protected]> <[email protected]>
Wewang Xiaorenfine <[email protected]>
Will Weaver <[email protected]>
Wing-Kam Wong <[email protected]>
WuLonghui <[email protected]>
Xian Chaobo <[email protected]>
Xian Chaobo <[email protected]> <[email protected]>
Xianglin Gao <[email protected]>
Xianjie <[email protected]>
Xianjie <[email protected]> <[email protected]>
Xianlu Bird <[email protected]>
Xiao YongBiao <[email protected]>
Xiao Zhang <[email protected]>
Xiaodong Liu <[email protected]>
Xiaodong Zhang <[email protected]>
Xiaohua Ding <[email protected]>
Xiaoyu Zhang <[email protected]>
Xuecong Liao <[email protected]>
Yamasaki Masahide <[email protected]>
Yao Zaiyong <[email protected]>
Yassine Tijani <[email protected]>
Yazhong Liu <[email protected]>
Yestin Sun <[email protected]> <[email protected]>
Yi EungJun <[email protected]> <[email protected]>
Ying Li <[email protected]>
Ying Li <[email protected]> <[email protected]>
Yong Tang <[email protected]> <[email protected]>
Yongxin Li <[email protected]>
Yosef Fertel <[email protected]> <[email protected]>
Yu Changchun <[email protected]>
Yu Chengxia <[email protected]>
Yu Peng <[email protected]>
Yu Peng <[email protected]> <[email protected]>
Yue Zhang <[email protected]>
Zach Gershman <[email protected]>
Zach Gershman <[email protected]> <[email protected]>
Zachary Jaffee <[email protected]> <[email protected]>
Zachary Jaffee <[email protected]> <[email protected]>
ZhangHang <[email protected]>
Zhenkun Bi <[email protected]>
Zhou Hao <[email protected]>
Zhoulin Xie <[email protected]>
Zhu Kunjia <[email protected]>
Ziheng Liu <[email protected]>
Zou Yu <[email protected]>
Zuhayr Elahi <[email protected]>
Zuhayr Elahi <[email protected]> <[email protected]>
정재영 <[email protected]>
정재영 <[email protected]> <[email protected]>
| thaJeztah | 33c332ad1982019cb9250ed0163ee2ed4955f596 | e0cabedf14d0eac1e9a41036c7df9745cc0adfa1 | Heh. Looks like I reconfigured my git at some point, when I was working on contributing docs 😂 | thaJeztah | 4,733 |
moby/moby | 42,457 | rootless: fix "x509: certificate signed by unknown authority" on openSUSE Tumbleweed |
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Fix "x509: certificate signed by unknown authority" error on openSUSE Tumbleweed.
openSUSE Tumbleweed was facing this error, as `/etc/ssl/ca-bundle.pem` is provided as a symlink to `../../var/lib/ca-certificates/ca-bundle.pem`, which was not supported by `rootlesskit --copy-up=/etc` .
See rootless-containers/rootlesskit#225
**- How I did it**
By bind-mounting `/etc/ssl` from the parent namespace into the child.
**- How to verify it**
Run `docker --context=rootless pull hello-world` on an openSUSE Tumbleweed host.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
rootless: fix "x509: certificate signed by unknown authority" on openSUSE Tumbleweed
**- A picture of a cute animal (not mandatory but encouraged)**
:penguin:
| null | 2021-06-03 07:12:31+00:00 | 2021-07-06 09:53:37+00:00 | contrib/dockerd-rootless.sh | #!/bin/sh
# dockerd-rootless.sh executes dockerd in rootless mode.
#
# Usage: dockerd-rootless.sh [DOCKERD_OPTIONS]
#
# External dependencies:
# * newuidmap and newgidmap needs to be installed.
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
# * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed.
#
# Recognized environment variables:
# * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
#
# See the documentation for the further information: https://docs.docker.com/go/rootless/
set -e -x
case "$1" in
"check" | "install" | "uninstall")
echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
exit 1
;;
esac
if ! [ -w $XDG_RUNTIME_DIR ]; then
echo "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
if ! [ -d $HOME ]; then
echo "HOME needs to be set and exist."
exit 1
fi
rootlesskit=""
for f in docker-rootlesskit rootlesskit; do
if command -v $f > /dev/null 2>&1; then
rootlesskit=$f
break
fi
done
if [ -z $rootlesskit ]; then
echo "rootlesskit needs to be installed"
exit 1
fi
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
if [ -z $net ]; then
if command -v slirp4netns > /dev/null 2>&1; then
# If --netns-type is present in --help, slirp4netns is >= v0.4.0.
if slirp4netns --help | grep -qw -- --netns-type; then
net=slirp4netns
if [ -z $mtu ]; then
mtu=65520
fi
else
echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit."
fi
fi
if [ -z $net ]; then
if command -v vpnkit > /dev/null 2>&1; then
net=vpnkit
else
echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed"
exit 1
fi
fi
fi
if [ -z $mtu ]; then
mtu=1500
fi
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
_DOCKERD_ROOTLESS_CHILD=1
export _DOCKERD_ROOTLESS_CHILD
if [ "$(id -u)" = "0" ]; then
echo "This script must be executed as a non-privileged user"
exit 1
fi
# `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent.
# https://github.com/rootless-containers/rootlesskit/issues/94
if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then
_DOCKERD_ROOTLESS_SELINUX=1
export _DOCKERD_ROOTLESS_SELINUX
fi
# Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces.
#
# --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks
# * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
# (by either systemd-networkd or NetworkManager)
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
exec $rootlesskit \
--net=$net --mtu=$mtu \
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
--copy-up=/etc --copy-up=/run \
--propagation=rslave \
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
$0 $@
else
[ $_DOCKERD_ROOTLESS_CHILD = 1 ]
# remove the symlinks for the existing files in the parent namespace if any,
# so that we can create our own files in our mount namespace.
rm -f /run/docker /run/containerd /run/xtables.lock
if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then
# iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected.
# https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401
# https://github.com/moby/moby/issues/41230
chcon system_u:object_r:iptables_var_run_t:s0 /run
fi
exec dockerd $@
fi
| #!/bin/sh
# dockerd-rootless.sh executes dockerd in rootless mode.
#
# Usage: dockerd-rootless.sh [DOCKERD_OPTIONS]
#
# External dependencies:
# * newuidmap and newgidmap needs to be installed.
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
# * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed.
#
# Recognized environment variables:
# * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
#
# See the documentation for the further information: https://docs.docker.com/go/rootless/
set -e -x
case "$1" in
"check" | "install" | "uninstall")
echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
exit 1
;;
esac
if ! [ -w $XDG_RUNTIME_DIR ]; then
echo "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
if ! [ -d $HOME ]; then
echo "HOME needs to be set and exist."
exit 1
fi
rootlesskit=""
for f in docker-rootlesskit rootlesskit; do
if command -v $f > /dev/null 2>&1; then
rootlesskit=$f
break
fi
done
if [ -z $rootlesskit ]; then
echo "rootlesskit needs to be installed"
exit 1
fi
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
if [ -z $net ]; then
if command -v slirp4netns > /dev/null 2>&1; then
# If --netns-type is present in --help, slirp4netns is >= v0.4.0.
if slirp4netns --help | grep -qw -- --netns-type; then
net=slirp4netns
if [ -z $mtu ]; then
mtu=65520
fi
else
echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit."
fi
fi
if [ -z $net ]; then
if command -v vpnkit > /dev/null 2>&1; then
net=vpnkit
else
echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed"
exit 1
fi
fi
fi
if [ -z $mtu ]; then
mtu=1500
fi
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
_DOCKERD_ROOTLESS_CHILD=1
export _DOCKERD_ROOTLESS_CHILD
if [ "$(id -u)" = "0" ]; then
echo "This script must be executed as a non-privileged user"
exit 1
fi
# `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent.
# https://github.com/rootless-containers/rootlesskit/issues/94
if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then
_DOCKERD_ROOTLESS_SELINUX=1
export _DOCKERD_ROOTLESS_SELINUX
fi
# Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces.
#
# --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks
# * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
# (by either systemd-networkd or NetworkManager)
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
exec $rootlesskit \
--net=$net --mtu=$mtu \
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
--copy-up=/etc --copy-up=/run \
--propagation=rslave \
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
$0 $@
else
[ $_DOCKERD_ROOTLESS_CHILD = 1 ]
# remove the symlinks for the existing files in the parent namespace if any,
# so that we can create our own files in our mount namespace.
rm -f /run/docker /run/containerd /run/xtables.lock
if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then
# iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected.
# https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401
# https://github.com/moby/moby/issues/41230
chcon system_u:object_r:iptables_var_run_t:s0 /run
fi
if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then
# Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed.
# https://github.com/rootless-containers/rootlesskit/issues/225
realpath_etc_ssl=$(realpath /etc/ssl)
rm -f /etc/ssl
mkdir /etc/ssl
mount --rbind ${realpath_etc_ssl} /etc/ssl
fi
exec dockerd $@
fi
| AkihiroSuda | 2b5427b85371633c94d6c3ed3ae769d67c79f500 | 6d88407ac299b10bbf9e01e1dc5a66895d1564a2 | I'm slightly confused here; from that ticket, it looks like the files _inside_ `/etc/ssl` are symlinks, but `/etc/ssl` itself is not, correct?
```bash
$ docker run --rm opensuse/tumbleweed sh -c 'ls -l /etc | grep ssl && ls -l /etc/ssl'
drwxr-xr-x 2 root root 4096 Jun 4 13:11 ssl
total 0
lrwxrwxrwx 1 root root 43 May 8 20:10 ca-bundle.pem -> ../../var/lib/ca-certificates/ca-bundle.pem
lrwxrwxrwx 1 root root 33 May 8 20:10 certs -> ../../var/lib/ca-certificates/pem
$ docker run --rm opensuse/tumbleweed realpath /etc/ssl
/etc/ssl
```
Isn't the issue that the files _inside_ it cannot be used (because they're symlinked?) | thaJeztah | 4,734 |
moby/moby | 42,457 | rootless: fix "x509: certificate signed by unknown authority" on openSUSE Tumbleweed |
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Fix "x509: certificate signed by unknown authority" error on openSUSE Tumbleweed.
openSUSE Tumbleweed was facing this error, as `/etc/ssl/ca-bundle.pem` is provided as a symlink to `../../var/lib/ca-certificates/ca-bundle.pem`, which was not supported by `rootlesskit --copy-up=/etc` .
See rootless-containers/rootlesskit#225
**- How I did it**
By bind-mounting `/etc/ssl` from the parent namespace into the child.
**- How to verify it**
Run `docker --context=rootless pull hello-world` on an openSUSE Tumbleweed host.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
rootless: fix "x509: certificate signed by unknown authority" on openSUSE Tumbleweed
**- A picture of a cute animal (not mandatory but encouraged)**
:penguin:
| null | 2021-06-03 07:12:31+00:00 | 2021-07-06 09:53:37+00:00 | contrib/dockerd-rootless.sh | #!/bin/sh
# dockerd-rootless.sh executes dockerd in rootless mode.
#
# Usage: dockerd-rootless.sh [DOCKERD_OPTIONS]
#
# External dependencies:
# * newuidmap and newgidmap needs to be installed.
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
# * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed.
#
# Recognized environment variables:
# * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
#
# See the documentation for the further information: https://docs.docker.com/go/rootless/
set -e -x
case "$1" in
"check" | "install" | "uninstall")
echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
exit 1
;;
esac
if ! [ -w $XDG_RUNTIME_DIR ]; then
echo "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
if ! [ -d $HOME ]; then
echo "HOME needs to be set and exist."
exit 1
fi
rootlesskit=""
for f in docker-rootlesskit rootlesskit; do
if command -v $f > /dev/null 2>&1; then
rootlesskit=$f
break
fi
done
if [ -z $rootlesskit ]; then
echo "rootlesskit needs to be installed"
exit 1
fi
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
if [ -z $net ]; then
if command -v slirp4netns > /dev/null 2>&1; then
# If --netns-type is present in --help, slirp4netns is >= v0.4.0.
if slirp4netns --help | grep -qw -- --netns-type; then
net=slirp4netns
if [ -z $mtu ]; then
mtu=65520
fi
else
echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit."
fi
fi
if [ -z $net ]; then
if command -v vpnkit > /dev/null 2>&1; then
net=vpnkit
else
echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed"
exit 1
fi
fi
fi
if [ -z $mtu ]; then
mtu=1500
fi
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
_DOCKERD_ROOTLESS_CHILD=1
export _DOCKERD_ROOTLESS_CHILD
if [ "$(id -u)" = "0" ]; then
echo "This script must be executed as a non-privileged user"
exit 1
fi
# `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent.
# https://github.com/rootless-containers/rootlesskit/issues/94
if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then
_DOCKERD_ROOTLESS_SELINUX=1
export _DOCKERD_ROOTLESS_SELINUX
fi
# Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces.
#
# --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks
# * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
# (by either systemd-networkd or NetworkManager)
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
exec $rootlesskit \
--net=$net --mtu=$mtu \
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
--copy-up=/etc --copy-up=/run \
--propagation=rslave \
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
$0 $@
else
[ $_DOCKERD_ROOTLESS_CHILD = 1 ]
# remove the symlinks for the existing files in the parent namespace if any,
# so that we can create our own files in our mount namespace.
rm -f /run/docker /run/containerd /run/xtables.lock
if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then
# iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected.
# https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401
# https://github.com/moby/moby/issues/41230
chcon system_u:object_r:iptables_var_run_t:s0 /run
fi
exec dockerd $@
fi
| #!/bin/sh
# dockerd-rootless.sh executes dockerd in rootless mode.
#
# Usage: dockerd-rootless.sh [DOCKERD_OPTIONS]
#
# External dependencies:
# * newuidmap and newgidmap needs to be installed.
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
# * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed.
#
# Recognized environment variables:
# * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
#
# See the documentation for the further information: https://docs.docker.com/go/rootless/
set -e -x
case "$1" in
"check" | "install" | "uninstall")
echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
exit 1
;;
esac
if ! [ -w $XDG_RUNTIME_DIR ]; then
echo "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
if ! [ -d $HOME ]; then
echo "HOME needs to be set and exist."
exit 1
fi
rootlesskit=""
for f in docker-rootlesskit rootlesskit; do
if command -v $f > /dev/null 2>&1; then
rootlesskit=$f
break
fi
done
if [ -z $rootlesskit ]; then
echo "rootlesskit needs to be installed"
exit 1
fi
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
if [ -z $net ]; then
if command -v slirp4netns > /dev/null 2>&1; then
# If --netns-type is present in --help, slirp4netns is >= v0.4.0.
if slirp4netns --help | grep -qw -- --netns-type; then
net=slirp4netns
if [ -z $mtu ]; then
mtu=65520
fi
else
echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit."
fi
fi
if [ -z $net ]; then
if command -v vpnkit > /dev/null 2>&1; then
net=vpnkit
else
echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed"
exit 1
fi
fi
fi
if [ -z $mtu ]; then
mtu=1500
fi
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
_DOCKERD_ROOTLESS_CHILD=1
export _DOCKERD_ROOTLESS_CHILD
if [ "$(id -u)" = "0" ]; then
echo "This script must be executed as a non-privileged user"
exit 1
fi
# `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent.
# https://github.com/rootless-containers/rootlesskit/issues/94
if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then
_DOCKERD_ROOTLESS_SELINUX=1
export _DOCKERD_ROOTLESS_SELINUX
fi
# Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces.
#
# --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks
# * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
# (by either systemd-networkd or NetworkManager)
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
exec $rootlesskit \
--net=$net --mtu=$mtu \
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
--copy-up=/etc --copy-up=/run \
--propagation=rslave \
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
$0 $@
else
[ $_DOCKERD_ROOTLESS_CHILD = 1 ]
# remove the symlinks for the existing files in the parent namespace if any,
# so that we can create our own files in our mount namespace.
rm -f /run/docker /run/containerd /run/xtables.lock
if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then
# iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected.
# https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401
# https://github.com/moby/moby/issues/41230
chcon system_u:object_r:iptables_var_run_t:s0 /run
fi
if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then
# Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed.
# https://github.com/rootless-containers/rootlesskit/issues/225
realpath_etc_ssl=$(realpath /etc/ssl)
rm -f /etc/ssl
mkdir /etc/ssl
mount --rbind ${realpath_etc_ssl} /etc/ssl
fi
exec dockerd $@
fi
| AkihiroSuda | 2b5427b85371633c94d6c3ed3ae769d67c79f500 | 6d88407ac299b10bbf9e01e1dc5a66895d1564a2 | In other words, would we:
- find `/var/lib/ca-certificates/` (based on one of the symlinks inside the directory)
- then `mount --rbind /var/lib/ca-certificates/ /etc/ssl` ?
Currently it looks like it's just mounting `/etc/ssl` on `/etc/ssl`
(perhaps I'm wrong!) | thaJeztah | 4,735 |
moby/moby | 42,457 | rootless: fix "x509: certificate signed by unknown authority" on openSUSE Tumbleweed |
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Fix "x509: certificate signed by unknown authority" error on openSUSE Tumbleweed.
openSUSE Tumbleweed was facing this error, as `/etc/ssl/ca-bundle.pem` is provided as a symlink to `../../var/lib/ca-certificates/ca-bundle.pem`, which was not supported by `rootlesskit --copy-up=/etc` .
See rootless-containers/rootlesskit#225
**- How I did it**
By bind-mounting `/etc/ssl` from the parent namespace into the child.
**- How to verify it**
Run `docker --context=rootless pull hello-world` on an openSUSE Tumbleweed host.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
rootless: fix "x509: certificate signed by unknown authority" on openSUSE Tumbleweed
**- A picture of a cute animal (not mandatory but encouraged)**
:penguin:
| null | 2021-06-03 07:12:31+00:00 | 2021-07-06 09:53:37+00:00 | contrib/dockerd-rootless.sh | #!/bin/sh
# dockerd-rootless.sh executes dockerd in rootless mode.
#
# Usage: dockerd-rootless.sh [DOCKERD_OPTIONS]
#
# External dependencies:
# * newuidmap and newgidmap needs to be installed.
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
# * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed.
#
# Recognized environment variables:
# * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
#
# See the documentation for the further information: https://docs.docker.com/go/rootless/
set -e -x
case "$1" in
"check" | "install" | "uninstall")
echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
exit 1
;;
esac
if ! [ -w $XDG_RUNTIME_DIR ]; then
echo "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
if ! [ -d $HOME ]; then
echo "HOME needs to be set and exist."
exit 1
fi
rootlesskit=""
for f in docker-rootlesskit rootlesskit; do
if command -v $f > /dev/null 2>&1; then
rootlesskit=$f
break
fi
done
if [ -z $rootlesskit ]; then
echo "rootlesskit needs to be installed"
exit 1
fi
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
if [ -z $net ]; then
if command -v slirp4netns > /dev/null 2>&1; then
# If --netns-type is present in --help, slirp4netns is >= v0.4.0.
if slirp4netns --help | grep -qw -- --netns-type; then
net=slirp4netns
if [ -z $mtu ]; then
mtu=65520
fi
else
echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit."
fi
fi
if [ -z $net ]; then
if command -v vpnkit > /dev/null 2>&1; then
net=vpnkit
else
echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed"
exit 1
fi
fi
fi
if [ -z $mtu ]; then
mtu=1500
fi
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
_DOCKERD_ROOTLESS_CHILD=1
export _DOCKERD_ROOTLESS_CHILD
if [ "$(id -u)" = "0" ]; then
echo "This script must be executed as a non-privileged user"
exit 1
fi
# `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent.
# https://github.com/rootless-containers/rootlesskit/issues/94
if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then
_DOCKERD_ROOTLESS_SELINUX=1
export _DOCKERD_ROOTLESS_SELINUX
fi
# Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces.
#
# --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks
# * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
# (by either systemd-networkd or NetworkManager)
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
exec $rootlesskit \
--net=$net --mtu=$mtu \
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
--copy-up=/etc --copy-up=/run \
--propagation=rslave \
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
$0 $@
else
[ $_DOCKERD_ROOTLESS_CHILD = 1 ]
# remove the symlinks for the existing files in the parent namespace if any,
# so that we can create our own files in our mount namespace.
rm -f /run/docker /run/containerd /run/xtables.lock
if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then
# iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected.
# https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401
# https://github.com/moby/moby/issues/41230
chcon system_u:object_r:iptables_var_run_t:s0 /run
fi
exec dockerd $@
fi
| #!/bin/sh
# dockerd-rootless.sh executes dockerd in rootless mode.
#
# Usage: dockerd-rootless.sh [DOCKERD_OPTIONS]
#
# External dependencies:
# * newuidmap and newgidmap needs to be installed.
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
# * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed.
#
# Recognized environment variables:
# * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
#
# See the documentation for the further information: https://docs.docker.com/go/rootless/
set -e -x
case "$1" in
"check" | "install" | "uninstall")
echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
exit 1
;;
esac
if ! [ -w $XDG_RUNTIME_DIR ]; then
echo "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
if ! [ -d $HOME ]; then
echo "HOME needs to be set and exist."
exit 1
fi
rootlesskit=""
for f in docker-rootlesskit rootlesskit; do
if command -v $f > /dev/null 2>&1; then
rootlesskit=$f
break
fi
done
if [ -z $rootlesskit ]; then
echo "rootlesskit needs to be installed"
exit 1
fi
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
if [ -z $net ]; then
if command -v slirp4netns > /dev/null 2>&1; then
# If --netns-type is present in --help, slirp4netns is >= v0.4.0.
if slirp4netns --help | grep -qw -- --netns-type; then
net=slirp4netns
if [ -z $mtu ]; then
mtu=65520
fi
else
echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit."
fi
fi
if [ -z $net ]; then
if command -v vpnkit > /dev/null 2>&1; then
net=vpnkit
else
echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed"
exit 1
fi
fi
fi
if [ -z $mtu ]; then
mtu=1500
fi
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
_DOCKERD_ROOTLESS_CHILD=1
export _DOCKERD_ROOTLESS_CHILD
if [ "$(id -u)" = "0" ]; then
echo "This script must be executed as a non-privileged user"
exit 1
fi
# `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent.
# https://github.com/rootless-containers/rootlesskit/issues/94
if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then
_DOCKERD_ROOTLESS_SELINUX=1
export _DOCKERD_ROOTLESS_SELINUX
fi
# Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces.
#
# --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks
# * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
# (by either systemd-networkd or NetworkManager)
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
exec $rootlesskit \
--net=$net --mtu=$mtu \
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
--copy-up=/etc --copy-up=/run \
--propagation=rslave \
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
$0 $@
else
[ $_DOCKERD_ROOTLESS_CHILD = 1 ]
# remove the symlinks for the existing files in the parent namespace if any,
# so that we can create our own files in our mount namespace.
rm -f /run/docker /run/containerd /run/xtables.lock
if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then
# iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected.
# https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401
# https://github.com/moby/moby/issues/41230
chcon system_u:object_r:iptables_var_run_t:s0 /run
fi
if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then
# Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed.
# https://github.com/rootless-containers/rootlesskit/issues/225
realpath_etc_ssl=$(realpath /etc/ssl)
rm -f /etc/ssl
mkdir /etc/ssl
mount --rbind ${realpath_etc_ssl} /etc/ssl
fi
exec dockerd $@
fi
| AkihiroSuda | 2b5427b85371633c94d6c3ed3ae769d67c79f500 | 6d88407ac299b10bbf9e01e1dc5a66895d1564a2 | > it looks like the files inside /etc/ssl are symlinks, but /etc/ssl itself is not, correct?
true
> Isn't the issue that the files inside it cannot be used (because they're symlinked?)
Yes.
The issue is that `ca-bundle.pem -> ../../var/lib/ca-certificates/ca-bundle.pem` is no longer accessible as it is now resolved into `/etc/var/lib/ca-certificates/ca-bundle.pem`, not `/var/lib/ca-certificates/ca-bundle.pem`, because the base directory is now `/etc/.ro780432485/ssl`, not `/etc/ssl`
> find /var/lib/ca-certificates/
No, that probably doesn't work for other distros.
> Currently it looks like it's just mounting /etc/ssl on /etc/ssl
It is mounting `/etc/ssl` in the parent namespace on `/etc/ssl` in the child namespace.
| AkihiroSuda | 4,736 |
moby/moby | 42,457 | rootless: fix "x509: certificate signed by unknown authority" on openSUSE Tumbleweed |
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Fix "x509: certificate signed by unknown authority" error on openSUSE Tumbleweed.
openSUSE Tumbleweed was facing this error, as `/etc/ssl/ca-bundle.pem` is provided as a symlink to `../../var/lib/ca-certificates/ca-bundle.pem`, which was not supported by `rootlesskit --copy-up=/etc` .
See rootless-containers/rootlesskit#225
**- How I did it**
By bind-mounting `/etc/ssl` from the parent namespace into the child.
**- How to verify it**
Run `docker --context=rootless pull hello-world` on an openSUSE Tumbleweed host.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
rootless: fix "x509: certificate signed by unknown authority" on openSUSE Tumbleweed
**- A picture of a cute animal (not mandatory but encouraged)**
:penguin:
| null | 2021-06-03 07:12:31+00:00 | 2021-07-06 09:53:37+00:00 | contrib/dockerd-rootless.sh | #!/bin/sh
# dockerd-rootless.sh executes dockerd in rootless mode.
#
# Usage: dockerd-rootless.sh [DOCKERD_OPTIONS]
#
# External dependencies:
# * newuidmap and newgidmap needs to be installed.
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
# * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed.
#
# Recognized environment variables:
# * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
#
# See the documentation for the further information: https://docs.docker.com/go/rootless/
set -e -x
case "$1" in
"check" | "install" | "uninstall")
echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
exit 1
;;
esac
if ! [ -w $XDG_RUNTIME_DIR ]; then
echo "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
if ! [ -d $HOME ]; then
echo "HOME needs to be set and exist."
exit 1
fi
rootlesskit=""
for f in docker-rootlesskit rootlesskit; do
if command -v $f > /dev/null 2>&1; then
rootlesskit=$f
break
fi
done
if [ -z $rootlesskit ]; then
echo "rootlesskit needs to be installed"
exit 1
fi
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
if [ -z $net ]; then
if command -v slirp4netns > /dev/null 2>&1; then
# If --netns-type is present in --help, slirp4netns is >= v0.4.0.
if slirp4netns --help | grep -qw -- --netns-type; then
net=slirp4netns
if [ -z $mtu ]; then
mtu=65520
fi
else
echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit."
fi
fi
if [ -z $net ]; then
if command -v vpnkit > /dev/null 2>&1; then
net=vpnkit
else
echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed"
exit 1
fi
fi
fi
if [ -z $mtu ]; then
mtu=1500
fi
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
_DOCKERD_ROOTLESS_CHILD=1
export _DOCKERD_ROOTLESS_CHILD
if [ "$(id -u)" = "0" ]; then
echo "This script must be executed as a non-privileged user"
exit 1
fi
# `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent.
# https://github.com/rootless-containers/rootlesskit/issues/94
if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then
_DOCKERD_ROOTLESS_SELINUX=1
export _DOCKERD_ROOTLESS_SELINUX
fi
# Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces.
#
# --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks
# * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
# (by either systemd-networkd or NetworkManager)
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
exec $rootlesskit \
--net=$net --mtu=$mtu \
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
--copy-up=/etc --copy-up=/run \
--propagation=rslave \
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
$0 $@
else
[ $_DOCKERD_ROOTLESS_CHILD = 1 ]
# remove the symlinks for the existing files in the parent namespace if any,
# so that we can create our own files in our mount namespace.
rm -f /run/docker /run/containerd /run/xtables.lock
if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then
# iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected.
# https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401
# https://github.com/moby/moby/issues/41230
chcon system_u:object_r:iptables_var_run_t:s0 /run
fi
exec dockerd $@
fi
| #!/bin/sh
# dockerd-rootless.sh executes dockerd in rootless mode.
#
# Usage: dockerd-rootless.sh [DOCKERD_OPTIONS]
#
# External dependencies:
# * newuidmap and newgidmap needs to be installed.
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
# * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed.
#
# Recognized environment variables:
# * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
#
# See the documentation for the further information: https://docs.docker.com/go/rootless/
set -e -x
case "$1" in
"check" | "install" | "uninstall")
echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
exit 1
;;
esac
if ! [ -w $XDG_RUNTIME_DIR ]; then
echo "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
if ! [ -d $HOME ]; then
echo "HOME needs to be set and exist."
exit 1
fi
rootlesskit=""
for f in docker-rootlesskit rootlesskit; do
if command -v $f > /dev/null 2>&1; then
rootlesskit=$f
break
fi
done
if [ -z $rootlesskit ]; then
echo "rootlesskit needs to be installed"
exit 1
fi
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
if [ -z $net ]; then
if command -v slirp4netns > /dev/null 2>&1; then
# If --netns-type is present in --help, slirp4netns is >= v0.4.0.
if slirp4netns --help | grep -qw -- --netns-type; then
net=slirp4netns
if [ -z $mtu ]; then
mtu=65520
fi
else
echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit."
fi
fi
if [ -z $net ]; then
if command -v vpnkit > /dev/null 2>&1; then
net=vpnkit
else
echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed"
exit 1
fi
fi
fi
if [ -z $mtu ]; then
mtu=1500
fi
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
_DOCKERD_ROOTLESS_CHILD=1
export _DOCKERD_ROOTLESS_CHILD
if [ "$(id -u)" = "0" ]; then
echo "This script must be executed as a non-privileged user"
exit 1
fi
# `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent.
# https://github.com/rootless-containers/rootlesskit/issues/94
if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then
_DOCKERD_ROOTLESS_SELINUX=1
export _DOCKERD_ROOTLESS_SELINUX
fi
# Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces.
#
# --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks
# * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
# (by either systemd-networkd or NetworkManager)
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
exec $rootlesskit \
--net=$net --mtu=$mtu \
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
--copy-up=/etc --copy-up=/run \
--propagation=rslave \
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
$0 $@
else
[ $_DOCKERD_ROOTLESS_CHILD = 1 ]
# remove the symlinks for the existing files in the parent namespace if any,
# so that we can create our own files in our mount namespace.
rm -f /run/docker /run/containerd /run/xtables.lock
if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then
# iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected.
# https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401
# https://github.com/moby/moby/issues/41230
chcon system_u:object_r:iptables_var_run_t:s0 /run
fi
if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then
# Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed.
# https://github.com/rootless-containers/rootlesskit/issues/225
realpath_etc_ssl=$(realpath /etc/ssl)
rm -f /etc/ssl
mkdir /etc/ssl
mount --rbind ${realpath_etc_ssl} /etc/ssl
fi
exec dockerd $@
fi
| AkihiroSuda | 2b5427b85371633c94d6c3ed3ae769d67c79f500 | 6d88407ac299b10bbf9e01e1dc5a66895d1564a2 | But what's the `realpath_etc_ssl=$(realpath /etc/ssl)` doing? As there's no symlink to resolve in that case.
So the effective change is that we delete `/etc/ssl` (and the symlinks) and replace it with an empty directory? | thaJeztah | 4,737 |
moby/moby | 42,457 | rootless: fix "x509: certificate signed by unknown authority" on openSUSE Tumbleweed |
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Fix "x509: certificate signed by unknown authority" error on openSUSE Tumbleweed.
openSUSE Tumbleweed was facing this error, as `/etc/ssl/ca-bundle.pem` is provided as a symlink to `../../var/lib/ca-certificates/ca-bundle.pem`, which was not supported by `rootlesskit --copy-up=/etc` .
See rootless-containers/rootlesskit#225
**- How I did it**
By bind-mounting `/etc/ssl` from the parent namespace into the child.
**- How to verify it**
Run `docker --context=rootless pull hello-world` on an openSUSE Tumbleweed host.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
rootless: fix "x509: certificate signed by unknown authority" on openSUSE Tumbleweed
**- A picture of a cute animal (not mandatory but encouraged)**
:penguin:
| null | 2021-06-03 07:12:31+00:00 | 2021-07-06 09:53:37+00:00 | contrib/dockerd-rootless.sh | #!/bin/sh
# dockerd-rootless.sh executes dockerd in rootless mode.
#
# Usage: dockerd-rootless.sh [DOCKERD_OPTIONS]
#
# External dependencies:
# * newuidmap and newgidmap needs to be installed.
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
# * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed.
#
# Recognized environment variables:
# * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
#
# See the documentation for the further information: https://docs.docker.com/go/rootless/
set -e -x
case "$1" in
"check" | "install" | "uninstall")
echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
exit 1
;;
esac
if ! [ -w $XDG_RUNTIME_DIR ]; then
echo "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
if ! [ -d $HOME ]; then
echo "HOME needs to be set and exist."
exit 1
fi
rootlesskit=""
for f in docker-rootlesskit rootlesskit; do
if command -v $f > /dev/null 2>&1; then
rootlesskit=$f
break
fi
done
if [ -z $rootlesskit ]; then
echo "rootlesskit needs to be installed"
exit 1
fi
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
if [ -z $net ]; then
if command -v slirp4netns > /dev/null 2>&1; then
# If --netns-type is present in --help, slirp4netns is >= v0.4.0.
if slirp4netns --help | grep -qw -- --netns-type; then
net=slirp4netns
if [ -z $mtu ]; then
mtu=65520
fi
else
echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit."
fi
fi
if [ -z $net ]; then
if command -v vpnkit > /dev/null 2>&1; then
net=vpnkit
else
echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed"
exit 1
fi
fi
fi
if [ -z $mtu ]; then
mtu=1500
fi
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
_DOCKERD_ROOTLESS_CHILD=1
export _DOCKERD_ROOTLESS_CHILD
if [ "$(id -u)" = "0" ]; then
echo "This script must be executed as a non-privileged user"
exit 1
fi
# `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent.
# https://github.com/rootless-containers/rootlesskit/issues/94
if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then
_DOCKERD_ROOTLESS_SELINUX=1
export _DOCKERD_ROOTLESS_SELINUX
fi
# Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces.
#
# --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks
# * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
# (by either systemd-networkd or NetworkManager)
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
exec $rootlesskit \
--net=$net --mtu=$mtu \
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
--copy-up=/etc --copy-up=/run \
--propagation=rslave \
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
$0 $@
else
[ $_DOCKERD_ROOTLESS_CHILD = 1 ]
# remove the symlinks for the existing files in the parent namespace if any,
# so that we can create our own files in our mount namespace.
rm -f /run/docker /run/containerd /run/xtables.lock
if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then
# iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected.
# https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401
# https://github.com/moby/moby/issues/41230
chcon system_u:object_r:iptables_var_run_t:s0 /run
fi
exec dockerd $@
fi
| #!/bin/sh
# dockerd-rootless.sh executes dockerd in rootless mode.
#
# Usage: dockerd-rootless.sh [DOCKERD_OPTIONS]
#
# External dependencies:
# * newuidmap and newgidmap needs to be installed.
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
# * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed.
#
# Recognized environment variables:
# * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
#
# See the documentation for the further information: https://docs.docker.com/go/rootless/
set -e -x
case "$1" in
"check" | "install" | "uninstall")
echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
exit 1
;;
esac
if ! [ -w $XDG_RUNTIME_DIR ]; then
echo "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
if ! [ -d $HOME ]; then
echo "HOME needs to be set and exist."
exit 1
fi
rootlesskit=""
for f in docker-rootlesskit rootlesskit; do
if command -v $f > /dev/null 2>&1; then
rootlesskit=$f
break
fi
done
if [ -z $rootlesskit ]; then
echo "rootlesskit needs to be installed"
exit 1
fi
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
if [ -z $net ]; then
if command -v slirp4netns > /dev/null 2>&1; then
# If --netns-type is present in --help, slirp4netns is >= v0.4.0.
if slirp4netns --help | grep -qw -- --netns-type; then
net=slirp4netns
if [ -z $mtu ]; then
mtu=65520
fi
else
echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit."
fi
fi
if [ -z $net ]; then
if command -v vpnkit > /dev/null 2>&1; then
net=vpnkit
else
echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed"
exit 1
fi
fi
fi
if [ -z $mtu ]; then
mtu=1500
fi
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
_DOCKERD_ROOTLESS_CHILD=1
export _DOCKERD_ROOTLESS_CHILD
if [ "$(id -u)" = "0" ]; then
echo "This script must be executed as a non-privileged user"
exit 1
fi
# `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent.
# https://github.com/rootless-containers/rootlesskit/issues/94
if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then
_DOCKERD_ROOTLESS_SELINUX=1
export _DOCKERD_ROOTLESS_SELINUX
fi
# Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces.
#
# --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks
# * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
# (by either systemd-networkd or NetworkManager)
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
exec $rootlesskit \
--net=$net --mtu=$mtu \
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
--copy-up=/etc --copy-up=/run \
--propagation=rslave \
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
$0 $@
else
[ $_DOCKERD_ROOTLESS_CHILD = 1 ]
# remove the symlinks for the existing files in the parent namespace if any,
# so that we can create our own files in our mount namespace.
rm -f /run/docker /run/containerd /run/xtables.lock
if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then
# iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected.
# https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401
# https://github.com/moby/moby/issues/41230
chcon system_u:object_r:iptables_var_run_t:s0 /run
fi
if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then
# Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed.
# https://github.com/rootless-containers/rootlesskit/issues/225
realpath_etc_ssl=$(realpath /etc/ssl)
rm -f /etc/ssl
mkdir /etc/ssl
mount --rbind ${realpath_etc_ssl} /etc/ssl
fi
exec dockerd $@
fi
| AkihiroSuda | 2b5427b85371633c94d6c3ed3ae769d67c79f500 | 6d88407ac299b10bbf9e01e1dc5a66895d1564a2 | `rootlesskit --copy-up=/etc` mounts an empty tmpfs on `/etc` (to make it writable), mounts the old `/etc` on `/etc/.ro780432485` ,and simlinkify `/etc/ssl` to `/etc/.ro780432485/ssl`.
So `realpath_etc_ssl` = `/etc/.ro780432485/ssl`.
| AkihiroSuda | 4,738 |
moby/moby | 42,457 | rootless: fix "x509: certificate signed by unknown authority" on openSUSE Tumbleweed |
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Fix "x509: certificate signed by unknown authority" error on openSUSE Tumbleweed.
openSUSE Tumbleweed was facing this error, as `/etc/ssl/ca-bundle.pem` is provided as a symlink to `../../var/lib/ca-certificates/ca-bundle.pem`, which was not supported by `rootlesskit --copy-up=/etc` .
See rootless-containers/rootlesskit#225
**- How I did it**
By bind-mounting `/etc/ssl` from the parent namespace into the child.
**- How to verify it**
Run `docker --context=rootless pull hello-world` on an openSUSE Tumbleweed host.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
rootless: fix "x509: certificate signed by unknown authority" on openSUSE Tumbleweed
**- A picture of a cute animal (not mandatory but encouraged)**
:penguin:
| null | 2021-06-03 07:12:31+00:00 | 2021-07-06 09:53:37+00:00 | contrib/dockerd-rootless.sh | #!/bin/sh
# dockerd-rootless.sh executes dockerd in rootless mode.
#
# Usage: dockerd-rootless.sh [DOCKERD_OPTIONS]
#
# External dependencies:
# * newuidmap and newgidmap needs to be installed.
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
# * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed.
#
# Recognized environment variables:
# * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
#
# See the documentation for the further information: https://docs.docker.com/go/rootless/
set -e -x
case "$1" in
"check" | "install" | "uninstall")
echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
exit 1
;;
esac
if ! [ -w $XDG_RUNTIME_DIR ]; then
echo "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
if ! [ -d $HOME ]; then
echo "HOME needs to be set and exist."
exit 1
fi
rootlesskit=""
for f in docker-rootlesskit rootlesskit; do
if command -v $f > /dev/null 2>&1; then
rootlesskit=$f
break
fi
done
if [ -z $rootlesskit ]; then
echo "rootlesskit needs to be installed"
exit 1
fi
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
if [ -z $net ]; then
if command -v slirp4netns > /dev/null 2>&1; then
# If --netns-type is present in --help, slirp4netns is >= v0.4.0.
if slirp4netns --help | grep -qw -- --netns-type; then
net=slirp4netns
if [ -z $mtu ]; then
mtu=65520
fi
else
echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit."
fi
fi
if [ -z $net ]; then
if command -v vpnkit > /dev/null 2>&1; then
net=vpnkit
else
echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed"
exit 1
fi
fi
fi
if [ -z $mtu ]; then
mtu=1500
fi
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
_DOCKERD_ROOTLESS_CHILD=1
export _DOCKERD_ROOTLESS_CHILD
if [ "$(id -u)" = "0" ]; then
echo "This script must be executed as a non-privileged user"
exit 1
fi
# `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent.
# https://github.com/rootless-containers/rootlesskit/issues/94
if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then
_DOCKERD_ROOTLESS_SELINUX=1
export _DOCKERD_ROOTLESS_SELINUX
fi
# Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces.
#
# --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks
# * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
# (by either systemd-networkd or NetworkManager)
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
exec $rootlesskit \
--net=$net --mtu=$mtu \
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
--copy-up=/etc --copy-up=/run \
--propagation=rslave \
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
$0 $@
else
[ $_DOCKERD_ROOTLESS_CHILD = 1 ]
# remove the symlinks for the existing files in the parent namespace if any,
# so that we can create our own files in our mount namespace.
rm -f /run/docker /run/containerd /run/xtables.lock
if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then
# iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected.
# https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401
# https://github.com/moby/moby/issues/41230
chcon system_u:object_r:iptables_var_run_t:s0 /run
fi
exec dockerd $@
fi
| #!/bin/sh
# dockerd-rootless.sh executes dockerd in rootless mode.
#
# Usage: dockerd-rootless.sh [DOCKERD_OPTIONS]
#
# External dependencies:
# * newuidmap and newgidmap needs to be installed.
# * /etc/subuid and /etc/subgid needs to be configured for the current user.
# * Either one of slirp4netns (>= v0.4.0), VPNKit, lxc-user-nic needs to be installed.
#
# Recognized environment variables:
# * DOCKERD_ROOTLESS_ROOTLESSKIT_NET=(slirp4netns|vpnkit|lxc-user-nic): the rootlesskit network driver. Defaults to "slirp4netns" if slirp4netns (>= v0.4.0) is installed. Otherwise defaults to "vpnkit".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_MTU=NUM: the MTU value for the rootlesskit network driver. Defaults to 65520 for slirp4netns, 1500 for other drivers.
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns): the rootlesskit port driver. Defaults to "builtin".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
#
# See the documentation for the further information: https://docs.docker.com/go/rootless/
set -e -x
case "$1" in
"check" | "install" | "uninstall")
echo "Did you mean 'dockerd-rootless-setuptool.sh $@' ?"
exit 1
;;
esac
if ! [ -w $XDG_RUNTIME_DIR ]; then
echo "XDG_RUNTIME_DIR needs to be set and writable"
exit 1
fi
if ! [ -d $HOME ]; then
echo "HOME needs to be set and exist."
exit 1
fi
rootlesskit=""
for f in docker-rootlesskit rootlesskit; do
if command -v $f > /dev/null 2>&1; then
rootlesskit=$f
break
fi
done
if [ -z $rootlesskit ]; then
echo "rootlesskit needs to be installed"
exit 1
fi
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_NET:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_MTU:=}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
if [ -z $net ]; then
if command -v slirp4netns > /dev/null 2>&1; then
# If --netns-type is present in --help, slirp4netns is >= v0.4.0.
if slirp4netns --help | grep -qw -- --netns-type; then
net=slirp4netns
if [ -z $mtu ]; then
mtu=65520
fi
else
echo "slirp4netns found but seems older than v0.4.0. Falling back to VPNKit."
fi
fi
if [ -z $net ]; then
if command -v vpnkit > /dev/null 2>&1; then
net=vpnkit
else
echo "Either slirp4netns (>= v0.4.0) or vpnkit needs to be installed"
exit 1
fi
fi
fi
if [ -z $mtu ]; then
mtu=1500
fi
if [ -z $_DOCKERD_ROOTLESS_CHILD ]; then
_DOCKERD_ROOTLESS_CHILD=1
export _DOCKERD_ROOTLESS_CHILD
if [ "$(id -u)" = "0" ]; then
echo "This script must be executed as a non-privileged user"
exit 1
fi
# `selinuxenabled` always returns false in RootlessKit child, so we execute `selinuxenabled` in the parent.
# https://github.com/rootless-containers/rootlesskit/issues/94
if command -v selinuxenabled > /dev/null 2>&1 && selinuxenabled; then
_DOCKERD_ROOTLESS_SELINUX=1
export _DOCKERD_ROOTLESS_SELINUX
fi
# Re-exec the script via RootlessKit, so as to create unprivileged {user,mount,network} namespaces.
#
# --copy-up allows removing/creating files in the directories by creating tmpfs and symlinks
# * /etc: copy-up is required so as to prevent `/etc/resolv.conf` in the
# namespace from being unexpectedly unmounted when `/etc/resolv.conf` is recreated on the host
# (by either systemd-networkd or NetworkManager)
# * /run: copy-up is required so that we can create /run/docker (hardcoded for plugins) in our namespace
exec $rootlesskit \
--net=$net --mtu=$mtu \
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
--copy-up=/etc --copy-up=/run \
--propagation=rslave \
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
$0 $@
else
[ $_DOCKERD_ROOTLESS_CHILD = 1 ]
# remove the symlinks for the existing files in the parent namespace if any,
# so that we can create our own files in our mount namespace.
rm -f /run/docker /run/containerd /run/xtables.lock
if [ -n "$_DOCKERD_ROOTLESS_SELINUX" ]; then
# iptables requires /run in the child to be relabeled. The actual /run in the parent is unaffected.
# https://github.com/containers/podman/blob/e6fc34b71aa9d876b1218efe90e14f8b912b0603/libpod/networking_linux.go#L396-L401
# https://github.com/moby/moby/issues/41230
chcon system_u:object_r:iptables_var_run_t:s0 /run
fi
if [ "$(stat -c %T -f /etc)" = "tmpfs" ] && [ -L "/etc/ssl" ]; then
# Workaround for "x509: certificate signed by unknown authority" on openSUSE Tumbleweed.
# https://github.com/rootless-containers/rootlesskit/issues/225
realpath_etc_ssl=$(realpath /etc/ssl)
rm -f /etc/ssl
mkdir /etc/ssl
mount --rbind ${realpath_etc_ssl} /etc/ssl
fi
exec dockerd $@
fi
| AkihiroSuda | 2b5427b85371633c94d6c3ed3ae769d67c79f500 | 6d88407ac299b10bbf9e01e1dc5a66895d1564a2 | Ahhhhhh! That's the bit I was missing! Sorry for the confusion 🤗 ❤️ | thaJeztah | 4,739 |
moby/moby | 42,451 | Remove LCOW (step 1) | taken from https://github.com/moby/moby/pull/42170, but splitting into smaller chunks
The LCOW implementation in dockerd has been deprecated in favor of re-implementation
in containerd (in progress). Microsoft started removing the LCOW V1 code from the
build dependencies we use in Microsoft/opengcs (soon to be part of Microsoft/hcshhim),
which means that we need to start removing this code.
This first step removes the lcow graphdriver, the LCOW initialization code, and
some LCOW-related utilities.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-01 09:49:38+00:00 | 2021-06-08 20:41:45+00:00 | pkg/system/lcow_unsupported.go | // +build !windows windows,no_lcow
package system // import "github.com/docker/docker/pkg/system"
import (
"runtime"
"strings"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// InitLCOW does nothing since LCOW is a windows only feature
func InitLCOW(_ bool) {}
// LCOWSupported returns true if Linux containers on Windows are supported.
func LCOWSupported() bool {
return false
}
// ValidatePlatform determines if a platform structure is valid. This function
// is used for LCOW, and is a no-op on non-windows platforms.
func ValidatePlatform(_ specs.Platform) error {
return nil
}
// IsOSSupported determines if an operating system is supported by the host.
func IsOSSupported(os string) bool {
return strings.EqualFold(runtime.GOOS, os)
}
| package system // import "github.com/docker/docker/pkg/system"
import (
"runtime"
"strings"
)
// LCOWSupported returns true if Linux containers on Windows are supported.
func LCOWSupported() bool {
return false
}
// IsOSSupported determines if an operating system is supported by the host.
func IsOSSupported(os string) bool {
return strings.EqualFold(runtime.GOOS, os)
}
| thaJeztah | 0ad2293d0e5bbf4c966a0e8b27c3ac3835265577 | a7ea29a5a6dcf65a2f4fff0be9becc2f7c587b4e | Is this still used somewhere, or should this whole file likely be going away? | tianon | 4,740 |
moby/moby | 42,451 | Remove LCOW (step 1) | taken from https://github.com/moby/moby/pull/42170, but splitting into smaller chunks
The LCOW implementation in dockerd has been deprecated in favor of re-implementation
in containerd (in progress). Microsoft started removing the LCOW V1 code from the
build dependencies we use in Microsoft/opengcs (soon to be part of Microsoft/hcshhim),
which means that we need to start removing this code.
This first step removes the lcow graphdriver, the LCOW initialization code, and
some LCOW-related utilities.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-01 09:49:38+00:00 | 2021-06-08 20:41:45+00:00 | pkg/system/lcow_unsupported.go | // +build !windows windows,no_lcow
package system // import "github.com/docker/docker/pkg/system"
import (
"runtime"
"strings"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// InitLCOW does nothing since LCOW is a windows only feature
func InitLCOW(_ bool) {}
// LCOWSupported returns true if Linux containers on Windows are supported.
func LCOWSupported() bool {
return false
}
// ValidatePlatform determines if a platform structure is valid. This function
// is used for LCOW, and is a no-op on non-windows platforms.
func ValidatePlatform(_ specs.Platform) error {
return nil
}
// IsOSSupported determines if an operating system is supported by the host.
func IsOSSupported(os string) bool {
return strings.EqualFold(runtime.GOOS, os)
}
| package system // import "github.com/docker/docker/pkg/system"
import (
"runtime"
"strings"
)
// LCOWSupported returns true if Linux containers on Windows are supported.
func LCOWSupported() bool {
return false
}
// IsOSSupported determines if an operating system is supported by the host.
func IsOSSupported(os string) bool {
return strings.EqualFold(runtime.GOOS, os)
}
| thaJeztah | 0ad2293d0e5bbf4c966a0e8b27c3ac3835265577 | a7ea29a5a6dcf65a2f4fff0be9becc2f7c587b4e | Let me double check if it's still called in this diff. Once everything is done, it should definitely be gone.
I mostly opened this PR (smaller subset of https://github.com/moby/moby/pull/42170) for my own sanity, as the LCOW code is in so many places, that "chipping it off" in smaller bits made it somewhat easier both for me, as well as for reviewers. 😓
| thaJeztah | 4,741 |
moby/moby | 42,451 | Remove LCOW (step 1) | taken from https://github.com/moby/moby/pull/42170, but splitting into smaller chunks
The LCOW implementation in dockerd has been deprecated in favor of re-implementation
in containerd (in progress). Microsoft started removing the LCOW V1 code from the
build dependencies we use in Microsoft/opengcs (soon to be part of Microsoft/hcshhim),
which means that we need to start removing this code.
This first step removes the lcow graphdriver, the LCOW initialization code, and
some LCOW-related utilities.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-01 09:49:38+00:00 | 2021-06-08 20:41:45+00:00 | pkg/system/lcow_unsupported.go | // +build !windows windows,no_lcow
package system // import "github.com/docker/docker/pkg/system"
import (
"runtime"
"strings"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// InitLCOW does nothing since LCOW is a windows only feature
func InitLCOW(_ bool) {}
// LCOWSupported returns true if Linux containers on Windows are supported.
func LCOWSupported() bool {
return false
}
// ValidatePlatform determines if a platform structure is valid. This function
// is used for LCOW, and is a no-op on non-windows platforms.
func ValidatePlatform(_ specs.Platform) error {
return nil
}
// IsOSSupported determines if an operating system is supported by the host.
func IsOSSupported(os string) bool {
return strings.EqualFold(runtime.GOOS, os)
}
| package system // import "github.com/docker/docker/pkg/system"
import (
"runtime"
"strings"
)
// LCOWSupported returns true if Linux containers on Windows are supported.
func LCOWSupported() bool {
return false
}
// IsOSSupported determines if an operating system is supported by the host.
func IsOSSupported(os string) bool {
return strings.EqualFold(runtime.GOOS, os)
}
| thaJeztah | 0ad2293d0e5bbf4c966a0e8b27c3ac3835265577 | a7ea29a5a6dcf65a2f4fff0be9becc2f7c587b4e | Just checked, and `LCOWSupported()` is still used in various places (but now of course always `false`) | thaJeztah | 4,742 |
moby/moby | 42,451 | Remove LCOW (step 1) | taken from https://github.com/moby/moby/pull/42170, but splitting into smaller chunks
The LCOW implementation in dockerd has been deprecated in favor of re-implementation
in containerd (in progress). Microsoft started removing the LCOW V1 code from the
build dependencies we use in Microsoft/opengcs (soon to be part of Microsoft/hcshhim),
which means that we need to start removing this code.
This first step removes the lcow graphdriver, the LCOW initialization code, and
some LCOW-related utilities.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-01 09:49:38+00:00 | 2021-06-08 20:41:45+00:00 | pkg/system/lcow_unsupported.go | // +build !windows windows,no_lcow
package system // import "github.com/docker/docker/pkg/system"
import (
"runtime"
"strings"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
// InitLCOW does nothing since LCOW is a windows only feature
func InitLCOW(_ bool) {}
// LCOWSupported returns true if Linux containers on Windows are supported.
func LCOWSupported() bool {
return false
}
// ValidatePlatform determines if a platform structure is valid. This function
// is used for LCOW, and is a no-op on non-windows platforms.
func ValidatePlatform(_ specs.Platform) error {
return nil
}
// IsOSSupported determines if an operating system is supported by the host.
func IsOSSupported(os string) bool {
return strings.EqualFold(runtime.GOOS, os)
}
| package system // import "github.com/docker/docker/pkg/system"
import (
"runtime"
"strings"
)
// LCOWSupported returns true if Linux containers on Windows are supported.
func LCOWSupported() bool {
return false
}
// IsOSSupported determines if an operating system is supported by the host.
func IsOSSupported(os string) bool {
return strings.EqualFold(runtime.GOOS, os)
}
| thaJeztah | 0ad2293d0e5bbf4c966a0e8b27c3ac3835265577 | a7ea29a5a6dcf65a2f4fff0be9becc2f7c587b4e | Sounds good! 👍 | tianon | 4,743 |
moby/moby | 42,451 | Remove LCOW (step 1) | taken from https://github.com/moby/moby/pull/42170, but splitting into smaller chunks
The LCOW implementation in dockerd has been deprecated in favor of re-implementation
in containerd (in progress). Microsoft started removing the LCOW V1 code from the
build dependencies we use in Microsoft/opengcs (soon to be part of Microsoft/hcshhim),
which means that we need to start removing this code.
This first step removes the lcow graphdriver, the LCOW initialization code, and
some LCOW-related utilities.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-01 09:49:38+00:00 | 2021-06-08 20:41:45+00:00 | pkg/system/path_windows.go | package system // import "github.com/docker/docker/pkg/system"
import "golang.org/x/sys/windows"
// GetLongPathName converts Windows short pathnames to full pathnames.
// For example C:\Users\ADMIN~1 --> C:\Users\Administrator.
// It is a no-op on non-Windows platforms
func GetLongPathName(path string) (string, error) {
// See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg
p, err := windows.UTF16FromString(path)
if err != nil {
return "", err
}
b := p // GetLongPathName says we can reuse buffer
n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
if n > uint32(len(b)) {
b = make([]uint16, n)
_, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
}
return windows.UTF16ToString(b), nil
}
| package system // import "github.com/docker/docker/pkg/system"
import (
"fmt"
"path/filepath"
"strings"
"golang.org/x/sys/windows"
)
// GetLongPathName converts Windows short pathnames to full pathnames.
// For example C:\Users\ADMIN~1 --> C:\Users\Administrator.
// It is a no-op on non-Windows platforms
func GetLongPathName(path string) (string, error) {
// See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg
p, err := windows.UTF16FromString(path)
if err != nil {
return "", err
}
b := p // GetLongPathName says we can reuse buffer
n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
if n > uint32(len(b)) {
b = make([]uint16, n)
_, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
}
return windows.UTF16ToString(b), nil
}
// checkSystemDriveAndRemoveDriveLetter is the Windows implementation
// of CheckSystemDriveAndRemoveDriveLetter
func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) {
if len(path) == 2 && string(path[1]) == ":" {
return "", fmt.Errorf("No relative path specified in %q", path)
}
if !driver.IsAbs(path) || len(path) < 2 {
return filepath.FromSlash(path), nil
}
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
}
return filepath.FromSlash(path[2:]), nil
}
| thaJeztah | 0ad2293d0e5bbf4c966a0e8b27c3ac3835265577 | a7ea29a5a6dcf65a2f4fff0be9becc2f7c587b4e | Can we just export this as CheckSystemDriveAndRemoveDriveLetter? | AkihiroSuda | 4,744 |
moby/moby | 42,451 | Remove LCOW (step 1) | taken from https://github.com/moby/moby/pull/42170, but splitting into smaller chunks
The LCOW implementation in dockerd has been deprecated in favor of re-implementation
in containerd (in progress). Microsoft started removing the LCOW V1 code from the
build dependencies we use in Microsoft/opengcs (soon to be part of Microsoft/hcshhim),
which means that we need to start removing this code.
This first step removes the lcow graphdriver, the LCOW initialization code, and
some LCOW-related utilities.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-01 09:49:38+00:00 | 2021-06-08 20:41:45+00:00 | pkg/system/path_windows.go | package system // import "github.com/docker/docker/pkg/system"
import "golang.org/x/sys/windows"
// GetLongPathName converts Windows short pathnames to full pathnames.
// For example C:\Users\ADMIN~1 --> C:\Users\Administrator.
// It is a no-op on non-Windows platforms
func GetLongPathName(path string) (string, error) {
// See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg
p, err := windows.UTF16FromString(path)
if err != nil {
return "", err
}
b := p // GetLongPathName says we can reuse buffer
n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
if n > uint32(len(b)) {
b = make([]uint16, n)
_, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
}
return windows.UTF16ToString(b), nil
}
| package system // import "github.com/docker/docker/pkg/system"
import (
"fmt"
"path/filepath"
"strings"
"golang.org/x/sys/windows"
)
// GetLongPathName converts Windows short pathnames to full pathnames.
// For example C:\Users\ADMIN~1 --> C:\Users\Administrator.
// It is a no-op on non-Windows platforms
func GetLongPathName(path string) (string, error) {
// See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg
p, err := windows.UTF16FromString(path)
if err != nil {
return "", err
}
b := p // GetLongPathName says we can reuse buffer
n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
if n > uint32(len(b)) {
b = make([]uint16, n)
_, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
}
return windows.UTF16ToString(b), nil
}
// checkSystemDriveAndRemoveDriveLetter is the Windows implementation
// of CheckSystemDriveAndRemoveDriveLetter
func checkSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) {
if len(path) == 2 && string(path[1]) == ":" {
return "", fmt.Errorf("No relative path specified in %q", path)
}
if !driver.IsAbs(path) || len(path) < 2 {
return filepath.FromSlash(path), nil
}
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
}
return filepath.FromSlash(path[2:]), nil
}
| thaJeztah | 0ad2293d0e5bbf4c966a0e8b27c3ac3835265577 | a7ea29a5a6dcf65a2f4fff0be9becc2f7c587b4e | We could, but the downside is we'll have to maintain GoDoc for both Windows and Linux separately, plus making sure that the signatures are in sync; having the "exported" version in a shared file, and the implementations separate makes sure we don't diverge. | thaJeztah | 4,745 |
moby/moby | 42,451 | Remove LCOW (step 1) | taken from https://github.com/moby/moby/pull/42170, but splitting into smaller chunks
The LCOW implementation in dockerd has been deprecated in favor of re-implementation
in containerd (in progress). Microsoft started removing the LCOW V1 code from the
build dependencies we use in Microsoft/opengcs (soon to be part of Microsoft/hcshhim),
which means that we need to start removing this code.
This first step removes the lcow graphdriver, the LCOW initialization code, and
some LCOW-related utilities.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-01 09:49:38+00:00 | 2021-06-08 20:41:45+00:00 | project/PACKAGERS.md | # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Getting Started
We want to help you package Docker successfully. Before doing any packaging, a
good first step is to introduce yourself on the [docker-dev mailing
list](https://groups.google.com/d/forum/docker-dev), explain what you're trying
to achieve, and tell us how we can help. Don't worry, we don't bite! There might
even be someone already working on packaging for the same distro!
You can also join the IRC channel - #docker and #docker-dev on Freenode are both
active and friendly.
We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our
"Packagers Relations", since he's always working to make sure our packagers have
a good, healthy upstream to work with (both in our communication and in our
build scripts). If you're having any kind of trouble, feel free to ping him
directly. He also likes to keep track of what distributions we have packagers
for, so feel free to reach out to him even just to say "Hi!"
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
To build Docker, you will need the following:
* A recent version of Git and Mercurial
* Go version 1.6 or later
* A clean checkout of the source added to a valid [Go
workspace](https://golang.org/doc/code.html#Workspaces) under the path
*src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
explained in more detail below)
To build the Docker daemon, you will additionally need:
* An amd64/x86_64 machine running Linux
* SQLite version 3.7.9 or later
* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version
2.02.89 or later
* btrfs-progs version 3.16.1 or later (unless using an older version is
absolutely necessary, in which case 3.8 is the minimum)
* libseccomp version 2.2.1 or later (for build tag seccomp)
Be sure to also check out Docker's Dockerfile for the most up-to-date list of
these build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
To use the vendored dependencies, simply make sure the path to "./vendor" is
included in `GOPATH` (or use `AUTO_GOPATH`, as explained below).
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
NOTE: if you're not able to package the exact version (to the exact commit) of a
given dependency, please get in touch so we can remediate! Who knows what
discrepancies can be caused by even the slightest deviation. We promise to do
our best to make everybody happy.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for all your compilation of
Docker. If there's something you need that it isn't doing, or something it could
be doing to make your life as a packager easier, please get in touch with Tianon
and help us rectify the situation. Chances are good that other packagers have
probably run into the same problems and a fix might already be in the works, but
none of us will know for sure unless you harass Tianon about it. :)
All the commands listed within this section should be run with the Docker source
checkout as the current working directory.
### `AUTO_GOPATH`
If you'd rather not be bothered with the hassles that setting up `GOPATH`
appropriately can be, and prefer to just get a "build that works", you should
add something similar to this to whatever script or process you're using to
build Docker:
```bash
export AUTO_GOPATH=1
```
This will cause the build scripts to set up a reasonable `GOPATH` that
automatically and properly includes both docker/docker from the local
directory, and the local "./vendor" directory as necessary.
### `DOCKER_BUILDTAGS`
If you're building a binary that may need to be used on platforms that include
AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
```bash
export DOCKER_BUILDTAGS='apparmor'
```
If you're building a binary that may need to be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
### LCOW (Linux Containers On Windows)
LCOW is an experimental feature on Windows, and requires the daemon to run with
experimental features enabled. Use the `no_lcow` build tag to disable the LCOW
feature at compile time,
### Static Daemon
If it is feasible within the constraints of your distribution, you should
seriously consider packaging Docker as a single static binary. A good comparison
is Busybox, which is often packaged statically as a feature to enable mass
portability. Because of the unique way Docker operates, being similarly static
is a "feature".
To build a static Docker daemon binary, run the following command (first
ensuring that all the necessary libraries are available in static form for
linking - see the "Build Dependencies" section above, and the relevant lines
within Docker's own Dockerfile that set up our official build environment):
```bash
./hack/make.sh binary
```
This will create a static binary under
"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of
the file "./VERSION". This binary is usually installed somewhere like
"/usr/bin/docker".
### Dynamic Daemon / Client-only Binary
If you are only interested in a Docker client binary, you can build using:
```bash
./hack/make.sh binary-client
```
If you need to (due to distro policy, distro library availability, or for other
reasons) create a dynamically compiled daemon binary, or if you are only
interested in creating a client binary for Docker, use something similar to the
following:
```bash
./hack/make.sh dynbinary-client
```
This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for
client-only builds is the important file to grab and install as appropriate.
### Cross Compilation
Limited cross compilation is supported due to requiring cgo for critical
functionality (such as seccomp support).
To cross compile run `make cross`. You can specify the platforms to target by
setting the `DOCKER_CROSSPLATFORMS` environment variable to a list of platforms
in the format `<GOOS>/<GOARCH>`. Specify multiple platforms by using a space
in between each desired platform.
For setting arm variants, you can specify the `GOARM` value by append `/v<GOARM>`
to your `<GOOS>/arm`. Example:
```
make DOCKER_CROSSPLATFORMS=linux/arm/v7 cross
```
This will create a linux binary targeting arm 7.
See `hack/make/.binary` for supported cross compliation platforms.
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* a [properly
mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
[is](https://github.com/docker/docker/issues/2683)
[not](https://github.com/docker/docker/issues/3485)
[sufficient](https://github.com/docker/docker/issues/4568))
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you will either need to discover the options necessary via trial and
error, or check out the [Gentoo
ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild),
in which a list is maintained (and if there are any issues or discrepancies in
that list, please contact Tianon so they can be rectified).
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists (and if one does not, contact Tianon about whether it might be
appropriate for your distro's init script to live there too!).
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, a `DOCKER_OPTS` variable of some kind is available for adding more
flags (such as changing the graph driver to use BTRFS, switching the location of
"/var/lib/docker", etc).
## Communicate
As a final note, please do feel free to reach out to Tianon at any time for
pretty much anything. He really does love hearing from our packagers and wants
to make sure we're not being a "hostile upstream". As should be a given, we
appreciate the work our packagers do to make sure we have broad distribution!
| # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Getting Started
We want to help you package Docker successfully. Before doing any packaging, a
good first step is to introduce yourself on the [docker-dev mailing
list](https://groups.google.com/d/forum/docker-dev), explain what you're trying
to achieve, and tell us how we can help. Don't worry, we don't bite! There might
even be someone already working on packaging for the same distro!
You can also join the IRC channel - #docker and #docker-dev on Freenode are both
active and friendly.
We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our
"Packagers Relations", since he's always working to make sure our packagers have
a good, healthy upstream to work with (both in our communication and in our
build scripts). If you're having any kind of trouble, feel free to ping him
directly. He also likes to keep track of what distributions we have packagers
for, so feel free to reach out to him even just to say "Hi!"
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
To build Docker, you will need the following:
* A recent version of Git and Mercurial
* Go version 1.6 or later
* A clean checkout of the source added to a valid [Go
workspace](https://golang.org/doc/code.html#Workspaces) under the path
*src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
explained in more detail below)
To build the Docker daemon, you will additionally need:
* An amd64/x86_64 machine running Linux
* SQLite version 3.7.9 or later
* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version
2.02.89 or later
* btrfs-progs version 3.16.1 or later (unless using an older version is
absolutely necessary, in which case 3.8 is the minimum)
* libseccomp version 2.2.1 or later (for build tag seccomp)
Be sure to also check out Docker's Dockerfile for the most up-to-date list of
these build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
To use the vendored dependencies, simply make sure the path to "./vendor" is
included in `GOPATH` (or use `AUTO_GOPATH`, as explained below).
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
NOTE: if you're not able to package the exact version (to the exact commit) of a
given dependency, please get in touch so we can remediate! Who knows what
discrepancies can be caused by even the slightest deviation. We promise to do
our best to make everybody happy.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for all your compilation of
Docker. If there's something you need that it isn't doing, or something it could
be doing to make your life as a packager easier, please get in touch with Tianon
and help us rectify the situation. Chances are good that other packagers have
probably run into the same problems and a fix might already be in the works, but
none of us will know for sure unless you harass Tianon about it. :)
All the commands listed within this section should be run with the Docker source
checkout as the current working directory.
### `AUTO_GOPATH`
If you'd rather not be bothered with the hassles that setting up `GOPATH`
appropriately can be, and prefer to just get a "build that works", you should
add something similar to this to whatever script or process you're using to
build Docker:
```bash
export AUTO_GOPATH=1
```
This will cause the build scripts to set up a reasonable `GOPATH` that
automatically and properly includes both docker/docker from the local
directory, and the local "./vendor" directory as necessary.
### `DOCKER_BUILDTAGS`
If you're building a binary that may need to be used on platforms that include
AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
```bash
export DOCKER_BUILDTAGS='apparmor'
```
If you're building a binary that may need to be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
### Static Daemon
If it is feasible within the constraints of your distribution, you should
seriously consider packaging Docker as a single static binary. A good comparison
is Busybox, which is often packaged statically as a feature to enable mass
portability. Because of the unique way Docker operates, being similarly static
is a "feature".
To build a static Docker daemon binary, run the following command (first
ensuring that all the necessary libraries are available in static form for
linking - see the "Build Dependencies" section above, and the relevant lines
within Docker's own Dockerfile that set up our official build environment):
```bash
./hack/make.sh binary
```
This will create a static binary under
"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of
the file "./VERSION". This binary is usually installed somewhere like
"/usr/bin/docker".
### Dynamic Daemon / Client-only Binary
If you are only interested in a Docker client binary, you can build using:
```bash
./hack/make.sh binary-client
```
If you need to (due to distro policy, distro library availability, or for other
reasons) create a dynamically compiled daemon binary, or if you are only
interested in creating a client binary for Docker, use something similar to the
following:
```bash
./hack/make.sh dynbinary-client
```
This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for
client-only builds is the important file to grab and install as appropriate.
### Cross Compilation
Limited cross compilation is supported due to requiring cgo for critical
functionality (such as seccomp support).
To cross compile run `make cross`. You can specify the platforms to target by
setting the `DOCKER_CROSSPLATFORMS` environment variable to a list of platforms
in the format `<GOOS>/<GOARCH>`. Specify multiple platforms by using a space
in between each desired platform.
For setting arm variants, you can specify the `GOARM` value by append `/v<GOARM>`
to your `<GOOS>/arm`. Example:
```
make DOCKER_CROSSPLATFORMS=linux/arm/v7 cross
```
This will create a linux binary targeting arm 7.
See `hack/make/.binary` for supported cross compliation platforms.
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* a [properly
mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
[is](https://github.com/docker/docker/issues/2683)
[not](https://github.com/docker/docker/issues/3485)
[sufficient](https://github.com/docker/docker/issues/4568))
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you will either need to discover the options necessary via trial and
error, or check out the [Gentoo
ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild),
in which a list is maintained (and if there are any issues or discrepancies in
that list, please contact Tianon so they can be rectified).
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists (and if one does not, contact Tianon about whether it might be
appropriate for your distro's init script to live there too!).
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, a `DOCKER_OPTS` variable of some kind is available for adding more
flags (such as changing the graph driver to use BTRFS, switching the location of
"/var/lib/docker", etc).
## Communicate
As a final note, please do feel free to reach out to Tianon at any time for
pretty much anything. He really does love hearing from our packagers and wants
to make sure we're not being a "hostile upstream". As should be a given, we
appreciate the work our packagers do to make sure we have broad distribution!
| thaJeztah | 0ad2293d0e5bbf4c966a0e8b27c3ac3835265577 | a7ea29a5a6dcf65a2f4fff0be9becc2f7c587b4e | (Off-topic: PACKAGERS.md seems completely outdated 😅) | AkihiroSuda | 4,746 |
moby/moby | 42,451 | Remove LCOW (step 1) | taken from https://github.com/moby/moby/pull/42170, but splitting into smaller chunks
The LCOW implementation in dockerd has been deprecated in favor of re-implementation
in containerd (in progress). Microsoft started removing the LCOW V1 code from the
build dependencies we use in Microsoft/opengcs (soon to be part of Microsoft/hcshhim),
which means that we need to start removing this code.
This first step removes the lcow graphdriver, the LCOW initialization code, and
some LCOW-related utilities.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-06-01 09:49:38+00:00 | 2021-06-08 20:41:45+00:00 | project/PACKAGERS.md | # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Getting Started
We want to help you package Docker successfully. Before doing any packaging, a
good first step is to introduce yourself on the [docker-dev mailing
list](https://groups.google.com/d/forum/docker-dev), explain what you're trying
to achieve, and tell us how we can help. Don't worry, we don't bite! There might
even be someone already working on packaging for the same distro!
You can also join the IRC channel - #docker and #docker-dev on Freenode are both
active and friendly.
We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our
"Packagers Relations", since he's always working to make sure our packagers have
a good, healthy upstream to work with (both in our communication and in our
build scripts). If you're having any kind of trouble, feel free to ping him
directly. He also likes to keep track of what distributions we have packagers
for, so feel free to reach out to him even just to say "Hi!"
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
To build Docker, you will need the following:
* A recent version of Git and Mercurial
* Go version 1.6 or later
* A clean checkout of the source added to a valid [Go
workspace](https://golang.org/doc/code.html#Workspaces) under the path
*src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
explained in more detail below)
To build the Docker daemon, you will additionally need:
* An amd64/x86_64 machine running Linux
* SQLite version 3.7.9 or later
* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version
2.02.89 or later
* btrfs-progs version 3.16.1 or later (unless using an older version is
absolutely necessary, in which case 3.8 is the minimum)
* libseccomp version 2.2.1 or later (for build tag seccomp)
Be sure to also check out Docker's Dockerfile for the most up-to-date list of
these build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
To use the vendored dependencies, simply make sure the path to "./vendor" is
included in `GOPATH` (or use `AUTO_GOPATH`, as explained below).
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
NOTE: if you're not able to package the exact version (to the exact commit) of a
given dependency, please get in touch so we can remediate! Who knows what
discrepancies can be caused by even the slightest deviation. We promise to do
our best to make everybody happy.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for all your compilation of
Docker. If there's something you need that it isn't doing, or something it could
be doing to make your life as a packager easier, please get in touch with Tianon
and help us rectify the situation. Chances are good that other packagers have
probably run into the same problems and a fix might already be in the works, but
none of us will know for sure unless you harass Tianon about it. :)
All the commands listed within this section should be run with the Docker source
checkout as the current working directory.
### `AUTO_GOPATH`
If you'd rather not be bothered with the hassles that setting up `GOPATH`
appropriately can be, and prefer to just get a "build that works", you should
add something similar to this to whatever script or process you're using to
build Docker:
```bash
export AUTO_GOPATH=1
```
This will cause the build scripts to set up a reasonable `GOPATH` that
automatically and properly includes both docker/docker from the local
directory, and the local "./vendor" directory as necessary.
### `DOCKER_BUILDTAGS`
If you're building a binary that may need to be used on platforms that include
AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
```bash
export DOCKER_BUILDTAGS='apparmor'
```
If you're building a binary that may need to be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
### LCOW (Linux Containers On Windows)
LCOW is an experimental feature on Windows, and requires the daemon to run with
experimental features enabled. Use the `no_lcow` build tag to disable the LCOW
feature at compile time,
### Static Daemon
If it is feasible within the constraints of your distribution, you should
seriously consider packaging Docker as a single static binary. A good comparison
is Busybox, which is often packaged statically as a feature to enable mass
portability. Because of the unique way Docker operates, being similarly static
is a "feature".
To build a static Docker daemon binary, run the following command (first
ensuring that all the necessary libraries are available in static form for
linking - see the "Build Dependencies" section above, and the relevant lines
within Docker's own Dockerfile that set up our official build environment):
```bash
./hack/make.sh binary
```
This will create a static binary under
"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of
the file "./VERSION". This binary is usually installed somewhere like
"/usr/bin/docker".
### Dynamic Daemon / Client-only Binary
If you are only interested in a Docker client binary, you can build using:
```bash
./hack/make.sh binary-client
```
If you need to (due to distro policy, distro library availability, or for other
reasons) create a dynamically compiled daemon binary, or if you are only
interested in creating a client binary for Docker, use something similar to the
following:
```bash
./hack/make.sh dynbinary-client
```
This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for
client-only builds is the important file to grab and install as appropriate.
### Cross Compilation
Limited cross compilation is supported due to requiring cgo for critical
functionality (such as seccomp support).
To cross compile run `make cross`. You can specify the platforms to target by
setting the `DOCKER_CROSSPLATFORMS` environment variable to a list of platforms
in the format `<GOOS>/<GOARCH>`. Specify multiple platforms by using a space
in between each desired platform.
For setting arm variants, you can specify the `GOARM` value by append `/v<GOARM>`
to your `<GOOS>/arm`. Example:
```
make DOCKER_CROSSPLATFORMS=linux/arm/v7 cross
```
This will create a linux binary targeting arm 7.
See `hack/make/.binary` for supported cross compliation platforms.
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* a [properly
mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
[is](https://github.com/docker/docker/issues/2683)
[not](https://github.com/docker/docker/issues/3485)
[sufficient](https://github.com/docker/docker/issues/4568))
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you will either need to discover the options necessary via trial and
error, or check out the [Gentoo
ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild),
in which a list is maintained (and if there are any issues or discrepancies in
that list, please contact Tianon so they can be rectified).
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists (and if one does not, contact Tianon about whether it might be
appropriate for your distro's init script to live there too!).
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, a `DOCKER_OPTS` variable of some kind is available for adding more
flags (such as changing the graph driver to use BTRFS, switching the location of
"/var/lib/docker", etc).
## Communicate
As a final note, please do feel free to reach out to Tianon at any time for
pretty much anything. He really does love hearing from our packagers and wants
to make sure we're not being a "hostile upstream". As should be a given, we
appreciate the work our packagers do to make sure we have broad distribution!
| # Dear Packager,
If you are looking to make Docker available on your favorite software
distribution, this document is for you. It summarizes the requirements for
building and running the Docker client and the Docker daemon.
## Getting Started
We want to help you package Docker successfully. Before doing any packaging, a
good first step is to introduce yourself on the [docker-dev mailing
list](https://groups.google.com/d/forum/docker-dev), explain what you're trying
to achieve, and tell us how we can help. Don't worry, we don't bite! There might
even be someone already working on packaging for the same distro!
You can also join the IRC channel - #docker and #docker-dev on Freenode are both
active and friendly.
We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our
"Packagers Relations", since he's always working to make sure our packagers have
a good, healthy upstream to work with (both in our communication and in our
build scripts). If you're having any kind of trouble, feel free to ping him
directly. He also likes to keep track of what distributions we have packagers
for, so feel free to reach out to him even just to say "Hi!"
## Package Name
If possible, your package should be called "docker". If that name is already
taken, a second choice is "docker-engine". Another possible choice is "docker.io".
## Official Build vs Distro Build
The Docker project maintains its own build and release toolchain. It is pretty
neat and entirely based on Docker (surprise!). This toolchain is the canonical
way to build Docker. We encourage you to give it a try, and if the circumstances
allow you to use it, we recommend that you do.
You might not be able to use the official build toolchain - usually because your
distribution has a toolchain and packaging policy of its own. We get it! Your
house, your rules. The rest of this document should give you the information you
need to package Docker your way, without denaturing it in the process.
## Build Dependencies
To build Docker, you will need the following:
* A recent version of Git and Mercurial
* Go version 1.6 or later
* A clean checkout of the source added to a valid [Go
workspace](https://golang.org/doc/code.html#Workspaces) under the path
*src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`,
explained in more detail below)
To build the Docker daemon, you will additionally need:
* An amd64/x86_64 machine running Linux
* SQLite version 3.7.9 or later
* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version
2.02.89 or later
* btrfs-progs version 3.16.1 or later (unless using an older version is
absolutely necessary, in which case 3.8 is the minimum)
* libseccomp version 2.2.1 or later (for build tag seccomp)
Be sure to also check out Docker's Dockerfile for the most up-to-date list of
these build-time dependencies.
### Go Dependencies
All Go dependencies are vendored under "./vendor". They are used by the official
build, so the source of truth for the current version of each dependency is
whatever is in "./vendor".
To use the vendored dependencies, simply make sure the path to "./vendor" is
included in `GOPATH` (or use `AUTO_GOPATH`, as explained below).
If you would rather (or must, due to distro policy) package these dependencies
yourself, take a look at "vendor.conf" for an easy-to-parse list of the
exact version for each.
NOTE: if you're not able to package the exact version (to the exact commit) of a
given dependency, please get in touch so we can remediate! Who knows what
discrepancies can be caused by even the slightest deviation. We promise to do
our best to make everybody happy.
## Stripping Binaries
Please, please, please do not strip any compiled binaries. This is really
important.
In our own testing, stripping the resulting binaries sometimes results in a
binary that appears to work, but more often causes random panics, segfaults, and
other issues. Even if the binary appears to work, please don't strip.
See the following quotes from Dave Cheney, which explain this position better
from the upstream Golang perspective.
### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3)
> Super super important: Do not strip go binaries or archives. It isn't tested,
> often breaks, and doesn't work.
### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8)
> To quote myself: "Please do not strip Go binaries, it is not supported, not
> tested, is often broken, and doesn't do what you want"
>
> To unpack that a bit
>
> * not supported, as in, we don't support it, and recommend against it when
> asked
> * not tested, we don't test stripped binaries as part of the build CI process
> * is often broken, stripping a go binary will produce anywhere from no, to
> subtle, to outright execution failure, see above
### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13)
> To clarify my previous statements.
>
> * I do not disagree with the debian policy, it is there for a good reason
> * Having said that, it stripping Go binaries doesn't work, and nobody is
> looking at making it work, so there is that.
>
> Thanks for patching the build formula.
## Building Docker
Please use our build script ("./hack/make.sh") for all your compilation of
Docker. If there's something you need that it isn't doing, or something it could
be doing to make your life as a packager easier, please get in touch with Tianon
and help us rectify the situation. Chances are good that other packagers have
probably run into the same problems and a fix might already be in the works, but
none of us will know for sure unless you harass Tianon about it. :)
All the commands listed within this section should be run with the Docker source
checkout as the current working directory.
### `AUTO_GOPATH`
If you'd rather not be bothered with the hassles that setting up `GOPATH`
appropriately can be, and prefer to just get a "build that works", you should
add something similar to this to whatever script or process you're using to
build Docker:
```bash
export AUTO_GOPATH=1
```
This will cause the build scripts to set up a reasonable `GOPATH` that
automatically and properly includes both docker/docker from the local
directory, and the local "./vendor" directory as necessary.
### `DOCKER_BUILDTAGS`
If you're building a binary that may need to be used on platforms that include
AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows:
```bash
export DOCKER_BUILDTAGS='apparmor'
```
If you're building a binary that may need to be used on platforms that include
seccomp, you will need to use the `seccomp` build tag:
```bash
export DOCKER_BUILDTAGS='seccomp'
```
There are build tags for disabling graphdrivers as well. By default, support
for all graphdrivers are built in.
To disable btrfs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs'
```
To disable devicemapper:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper'
```
To disable aufs:
```bash
export DOCKER_BUILDTAGS='exclude_graphdriver_aufs'
```
NOTE: if you need to set more than one build tag, space separate them:
```bash
export DOCKER_BUILDTAGS='apparmor exclude_graphdriver_aufs'
```
### Static Daemon
If it is feasible within the constraints of your distribution, you should
seriously consider packaging Docker as a single static binary. A good comparison
is Busybox, which is often packaged statically as a feature to enable mass
portability. Because of the unique way Docker operates, being similarly static
is a "feature".
To build a static Docker daemon binary, run the following command (first
ensuring that all the necessary libraries are available in static form for
linking - see the "Build Dependencies" section above, and the relevant lines
within Docker's own Dockerfile that set up our official build environment):
```bash
./hack/make.sh binary
```
This will create a static binary under
"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of
the file "./VERSION". This binary is usually installed somewhere like
"/usr/bin/docker".
### Dynamic Daemon / Client-only Binary
If you are only interested in a Docker client binary, you can build using:
```bash
./hack/make.sh binary-client
```
If you need to (due to distro policy, distro library availability, or for other
reasons) create a dynamically compiled daemon binary, or if you are only
interested in creating a client binary for Docker, use something similar to the
following:
```bash
./hack/make.sh dynbinary-client
```
This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for
client-only builds is the important file to grab and install as appropriate.
### Cross Compilation
Limited cross compilation is supported due to requiring cgo for critical
functionality (such as seccomp support).
To cross compile run `make cross`. You can specify the platforms to target by
setting the `DOCKER_CROSSPLATFORMS` environment variable to a list of platforms
in the format `<GOOS>/<GOARCH>`. Specify multiple platforms by using a space
in between each desired platform.
For setting arm variants, you can specify the `GOARM` value by append `/v<GOARM>`
to your `<GOOS>/arm`. Example:
```
make DOCKER_CROSSPLATFORMS=linux/arm/v7 cross
```
This will create a linux binary targeting arm 7.
See `hack/make/.binary` for supported cross compliation platforms.
## System Dependencies
### Runtime Dependencies
To function properly, the Docker daemon needs the following software to be
installed and available at runtime:
* iptables version 1.4 or later
* procps (or similar provider of a "ps" executable)
* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs)
* xfsprogs (in use: mkfs.xfs)
* XZ Utils version 4.9 or later
* a [properly
mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount)
cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point
[is](https://github.com/docker/docker/issues/2683)
[not](https://github.com/docker/docker/issues/3485)
[sufficient](https://github.com/docker/docker/issues/4568))
Additionally, the Docker client needs the following software to be installed and
available at runtime:
* Git version 1.7 or later
### Kernel Requirements
The Docker daemon has very specific kernel requirements. Most pre-packaged
kernels already include the necessary options enabled. If you are building your
own kernel, you will either need to discover the options necessary via trial and
error, or check out the [Gentoo
ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild),
in which a list is maintained (and if there are any issues or discrepancies in
that list, please contact Tianon so they can be rectified).
Note that in client mode, there are no specific kernel requirements, and that
the client will even run on alternative platforms such as Mac OS X / Darwin.
### Optional Dependencies
Some of Docker's features are activated by using optional command-line flags or
by having support for them in the kernel or userspace. A few examples include:
* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at
least the "auplink" utility from aufs-tools)
* BTRFS graph driver (requires BTRFS support enabled in the kernel)
* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module)
* Libseccomp to allow running seccomp profiles with containers
## Daemon Init Script
Docker expects to run as a daemon at machine startup. Your package will need to
include a script for your distro's process supervisor of choice. Be sure to
check out the "contrib/init" folder in case a suitable init script already
exists (and if one does not, contact Tianon about whether it might be
appropriate for your distro's init script to live there too!).
In general, Docker should be run as root, similar to the following:
```bash
dockerd
```
Generally, a `DOCKER_OPTS` variable of some kind is available for adding more
flags (such as changing the graph driver to use BTRFS, switching the location of
"/var/lib/docker", etc).
## Communicate
As a final note, please do feel free to reach out to Tianon at any time for
pretty much anything. He really does love hearing from our packagers and wants
to make sure we're not being a "hostile upstream". As should be a given, we
appreciate the work our packagers do to make sure we have broad distribution!
| thaJeztah | 0ad2293d0e5bbf4c966a0e8b27c3ac3835265577 | a7ea29a5a6dcf65a2f4fff0be9becc2f7c587b4e | Yes, it needs to be cleaned up | thaJeztah | 4,747 |
moby/moby | 42,445 | [testing] ~update~ fix linting issues found by golangci-lint v1.40.1 | Splitting this from https://github.com/moby/moby/pull/42303, which needs some work (use a custom action instead of the default one; similar to buildkit CI)
This ~updates~ fixes linting issues found by the latest golangci-lint (v1.40.1) | null | 2021-05-31 12:10:41+00:00 | 2021-06-16 20:15:01+00:00 | daemon/logger/journald/read.go | // +build linux,cgo,!static_build,journald
package journald // import "github.com/docker/docker/daemon/logger/journald"
// #include <sys/types.h>
// #include <sys/poll.h>
// #include <systemd/sd-journal.h>
// #include <errno.h>
// #include <stdio.h>
// #include <stdlib.h>
// #include <string.h>
// #include <time.h>
// #include <unistd.h>
//
//static int get_message(sd_journal *j, const char **msg, size_t *length, int *partial)
//{
// int rc;
// size_t plength;
// *msg = NULL;
// *length = 0;
// plength = strlen("CONTAINER_PARTIAL_MESSAGE=true");
// rc = sd_journal_get_data(j, "CONTAINER_PARTIAL_MESSAGE", (const void **) msg, length);
// *partial = ((rc == 0) && (*length == plength) && (memcmp(*msg, "CONTAINER_PARTIAL_MESSAGE=true", plength) == 0));
// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length);
// if (rc == 0) {
// if (*length > 8) {
// (*msg) += 8;
// *length -= 8;
// } else {
// *msg = NULL;
// *length = 0;
// rc = -ENOENT;
// }
// }
// return rc;
//}
//static int get_priority(sd_journal *j, int *priority)
//{
// const void *data;
// size_t i, length;
// int rc;
// *priority = -1;
// rc = sd_journal_get_data(j, "PRIORITY", &data, &length);
// if (rc == 0) {
// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) {
// *priority = 0;
// for (i = 9; i < length; i++) {
// *priority = *priority * 10 + ((const char *)data)[i] - '0';
// }
// if (length > 9) {
// rc = 0;
// }
// }
// }
// return rc;
//}
//static int is_attribute_field(const char *msg, size_t length)
//{
// static const struct known_field {
// const char *name;
// size_t length;
// } fields[] = {
// {"MESSAGE", sizeof("MESSAGE") - 1},
// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1},
// {"PRIORITY", sizeof("PRIORITY") - 1},
// {"CODE_FILE", sizeof("CODE_FILE") - 1},
// {"CODE_LINE", sizeof("CODE_LINE") - 1},
// {"CODE_FUNC", sizeof("CODE_FUNC") - 1},
// {"ERRNO", sizeof("ERRNO") - 1},
// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1},
// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1},
// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1},
// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1},
// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1},
// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1},
// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1},
// };
// unsigned int i;
// void *p;
// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) {
// return -1;
// }
// length = ((const char *) p) - msg;
// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) {
// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) {
// return -1;
// }
// }
// return 0;
//}
//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length)
//{
// int rc;
// *msg = NULL;
// *length = 0;
// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) {
// if (is_attribute_field(*msg, *length) == 0) {
// break;
// }
// rc = -ENOENT;
// }
// return rc;
//}
import "C"
import (
"errors"
"strings"
"time"
"unsafe"
"github.com/coreos/go-systemd/v22/journal"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/daemon/logger"
"github.com/sirupsen/logrus"
)
func (s *journald) Close() error {
s.mu.Lock()
for r := range s.readers {
r.ProducerGone()
delete(s.readers, r)
}
s.mu.Unlock()
return nil
}
// convert error code returned from a sd_journal_* function
// (which returns -errno) to a string
func CErr(ret C.int) string {
return C.GoString(C.strerror(C.int(-ret)))
}
func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, oldCursor *C.char, untilUnixMicro uint64) (*C.char, bool, int) {
var (
msg, data, cursor *C.char
length C.size_t
stamp C.uint64_t
priority, partial C.int
done bool
shown int
)
// Walk the journal from here forward until we run out of new entries
// or we reach the until value (if provided).
drain:
for {
// Try not to send a given entry twice.
if oldCursor != nil {
for C.sd_journal_test_cursor(j, oldCursor) > 0 {
if C.sd_journal_next(j) <= 0 {
break drain
}
}
}
// Read and send the logged message, if there is one to read.
i := C.get_message(j, &msg, &length, &partial)
if i != -C.ENOENT && i != -C.EADDRNOTAVAIL {
// Read the entry's timestamp.
if C.sd_journal_get_realtime_usec(j, &stamp) != 0 {
break
}
// Break if the timestamp exceeds any provided until flag.
if untilUnixMicro != 0 && untilUnixMicro < uint64(stamp) {
done = true
break
}
// Set up the time and text of the entry.
timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000)
line := C.GoBytes(unsafe.Pointer(msg), C.int(length))
if partial == 0 {
line = append(line, "\n"...)
}
// Recover the stream name by mapping
// from the journal priority back to
// the stream that we would have
// assigned that value.
source := ""
if C.get_priority(j, &priority) == 0 {
if priority == C.int(journal.PriErr) {
source = "stderr"
} else if priority == C.int(journal.PriInfo) {
source = "stdout"
}
}
// Retrieve the values of any variables we're adding to the journal.
var attrs []backend.LogAttr
C.sd_journal_restart_data(j)
for C.get_attribute_field(j, &data, &length) > C.int(0) {
kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2)
attrs = append(attrs, backend.LogAttr{Key: kv[0], Value: kv[1]})
}
// Send the log message, unless the consumer is gone
select {
case <-logWatcher.WatchConsumerGone():
done = true // we won't be able to write anything anymore
break drain
case logWatcher.Msg <- &logger.Message{
Line: line,
Source: source,
Timestamp: timestamp.In(time.UTC),
Attrs: attrs,
}:
shown++
}
// Call sd_journal_process() periodically during the processing loop
// to close any opened file descriptors for rotated (deleted) journal files.
if shown%1024 == 0 {
if ret := C.sd_journal_process(j); ret < 0 {
// log a warning but ignore it for now
logrus.WithField("container", s.vars["CONTAINER_ID_FULL"]).
WithField("error", CErr(ret)).
Warn("journald: error processing journal")
}
}
}
// If we're at the end of the journal, we're done (for now).
if C.sd_journal_next(j) <= 0 {
break
}
}
// free(NULL) is safe
C.free(unsafe.Pointer(oldCursor))
if C.sd_journal_get_cursor(j, &cursor) != 0 {
// ensure that we won't be freeing an address that's invalid
cursor = nil
}
return cursor, done, shown
}
func (s *journald) followJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, cursor *C.char, untilUnixMicro uint64) *C.char {
s.mu.Lock()
s.readers[logWatcher] = struct{}{}
s.mu.Unlock()
waitTimeout := C.uint64_t(250000) // 0.25s
for {
status := C.sd_journal_wait(j, waitTimeout)
if status < 0 {
logWatcher.Err <- errors.New("error waiting for journal: " + CErr(status))
goto cleanup
}
select {
case <-logWatcher.WatchConsumerGone():
goto cleanup // won't be able to write anything anymore
case <-logWatcher.WatchProducerGone():
// container is gone, drain journal
default:
// container is still alive
if status == C.SD_JOURNAL_NOP {
// no new data -- keep waiting
continue
}
}
newCursor, done, recv := s.drainJournal(logWatcher, j, cursor, untilUnixMicro)
cursor = newCursor
if done || (status == C.SD_JOURNAL_NOP && recv == 0) {
break
}
}
cleanup:
s.mu.Lock()
delete(s.readers, logWatcher)
s.mu.Unlock()
close(logWatcher.Msg)
return cursor
}
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
var (
j *C.sd_journal
cmatch, cursor *C.char
stamp C.uint64_t
sinceUnixMicro uint64
untilUnixMicro uint64
)
// Get a handle to the journal.
if rc := C.sd_journal_open(&j, C.int(0)); rc != 0 {
logWatcher.Err <- errors.New("error opening journal: " + CErr(rc))
close(logWatcher.Msg)
return
}
if config.Follow {
// Initialize library inotify watches early
if rc := C.sd_journal_get_fd(j); rc < 0 {
logWatcher.Err <- errors.New("error getting journald fd: " + CErr(rc))
close(logWatcher.Msg)
return
}
}
// If we end up following the log, we can set the journal context
// pointer and the channel pointer to nil so that we won't close them
// here, potentially while the goroutine that uses them is still
// running. Otherwise, close them when we return from this function.
following := false
defer func() {
if !following {
close(logWatcher.Msg)
}
C.sd_journal_close(j)
}()
// Remove limits on the size of data items that we'll retrieve.
if rc := C.sd_journal_set_data_threshold(j, C.size_t(0)); rc != 0 {
logWatcher.Err <- errors.New("error setting journal data threshold: " + CErr(rc))
return
}
// Add a match to have the library do the searching for us.
cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"])
defer C.free(unsafe.Pointer(cmatch))
if rc := C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)); rc != 0 {
logWatcher.Err <- errors.New("error setting journal match: " + CErr(rc))
return
}
// If we have a cutoff time, convert it to Unix time once.
if !config.Since.IsZero() {
nano := config.Since.UnixNano()
sinceUnixMicro = uint64(nano / 1000)
}
// If we have an until value, convert it too
if !config.Until.IsZero() {
nano := config.Until.UnixNano()
untilUnixMicro = uint64(nano / 1000)
}
if config.Tail >= 0 {
// If until time provided, start from there.
// Otherwise start at the end of the journal.
if untilUnixMicro != 0 {
if rc := C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)); rc != 0 {
logWatcher.Err <- errors.New("error seeking provided until value: " + CErr(rc))
return
}
} else if rc := C.sd_journal_seek_tail(j); rc != 0 {
logWatcher.Err <- errors.New("error seeking to end of journal: " + CErr(rc))
return
}
// (Try to) skip backwards by the requested number of lines...
if C.sd_journal_previous_skip(j, C.uint64_t(config.Tail)) >= 0 {
// ...but not before "since"
if sinceUnixMicro != 0 &&
C.sd_journal_get_realtime_usec(j, &stamp) == 0 &&
uint64(stamp) < sinceUnixMicro {
C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro))
}
}
} else {
// Start at the beginning of the journal.
if rc := C.sd_journal_seek_head(j); rc != 0 {
logWatcher.Err <- errors.New("error seeking to start of journal: " + CErr(rc))
return
}
// If we have a cutoff date, fast-forward to it.
if sinceUnixMicro != 0 {
if rc := C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)); rc != 0 {
logWatcher.Err <- errors.New("error seeking to start time in journal: " + CErr(rc))
return
}
}
if rc := C.sd_journal_next(j); rc < 0 {
logWatcher.Err <- errors.New("error skipping to next journal entry: " + CErr(rc))
return
}
}
if config.Tail != 0 { // special case for --tail 0
cursor, _, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro)
}
if config.Follow {
cursor = s.followJournal(logWatcher, j, cursor, untilUnixMicro)
// Let followJournal handle freeing the journal context
// object and closing the channel.
following = true
}
C.free(unsafe.Pointer(cursor))
return
}
func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
logWatcher := logger.NewLogWatcher()
go s.readLogs(logWatcher, config)
return logWatcher
}
| // +build linux,cgo,!static_build,journald
package journald // import "github.com/docker/docker/daemon/logger/journald"
// #include <sys/types.h>
// #include <sys/poll.h>
// #include <systemd/sd-journal.h>
// #include <errno.h>
// #include <stdio.h>
// #include <stdlib.h>
// #include <string.h>
// #include <time.h>
// #include <unistd.h>
//
//static int get_message(sd_journal *j, const char **msg, size_t *length, int *partial)
//{
// int rc;
// size_t plength;
// *msg = NULL;
// *length = 0;
// plength = strlen("CONTAINER_PARTIAL_MESSAGE=true");
// rc = sd_journal_get_data(j, "CONTAINER_PARTIAL_MESSAGE", (const void **) msg, length);
// *partial = ((rc == 0) && (*length == plength) && (memcmp(*msg, "CONTAINER_PARTIAL_MESSAGE=true", plength) == 0));
// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length);
// if (rc == 0) {
// if (*length > 8) {
// (*msg) += 8;
// *length -= 8;
// } else {
// *msg = NULL;
// *length = 0;
// rc = -ENOENT;
// }
// }
// return rc;
//}
//static int get_priority(sd_journal *j, int *priority)
//{
// const void *data;
// size_t i, length;
// int rc;
// *priority = -1;
// rc = sd_journal_get_data(j, "PRIORITY", &data, &length);
// if (rc == 0) {
// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) {
// *priority = 0;
// for (i = 9; i < length; i++) {
// *priority = *priority * 10 + ((const char *)data)[i] - '0';
// }
// if (length > 9) {
// rc = 0;
// }
// }
// }
// return rc;
//}
//static int is_attribute_field(const char *msg, size_t length)
//{
// static const struct known_field {
// const char *name;
// size_t length;
// } fields[] = {
// {"MESSAGE", sizeof("MESSAGE") - 1},
// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1},
// {"PRIORITY", sizeof("PRIORITY") - 1},
// {"CODE_FILE", sizeof("CODE_FILE") - 1},
// {"CODE_LINE", sizeof("CODE_LINE") - 1},
// {"CODE_FUNC", sizeof("CODE_FUNC") - 1},
// {"ERRNO", sizeof("ERRNO") - 1},
// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1},
// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1},
// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1},
// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1},
// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1},
// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1},
// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1},
// };
// unsigned int i;
// void *p;
// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) {
// return -1;
// }
// length = ((const char *) p) - msg;
// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) {
// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) {
// return -1;
// }
// }
// return 0;
//}
//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length)
//{
// int rc;
// *msg = NULL;
// *length = 0;
// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) {
// if (is_attribute_field(*msg, *length) == 0) {
// break;
// }
// rc = -ENOENT;
// }
// return rc;
//}
import "C"
import (
"errors"
"strings"
"time"
"unsafe"
"github.com/coreos/go-systemd/v22/journal"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/daemon/logger"
"github.com/sirupsen/logrus"
)
func (s *journald) Close() error {
s.mu.Lock()
for r := range s.readers {
r.ProducerGone()
delete(s.readers, r)
}
s.mu.Unlock()
return nil
}
// CErr converts error code returned from a sd_journal_* function
// (which returns -errno) to a string
func CErr(ret C.int) string {
return C.GoString(C.strerror(-ret))
}
func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, oldCursor *C.char, untilUnixMicro uint64) (*C.char, bool, int) {
var (
msg, data, cursor *C.char
length C.size_t
stamp C.uint64_t
priority, partial C.int
done bool
shown int
)
// Walk the journal from here forward until we run out of new entries
// or we reach the until value (if provided).
drain:
for {
// Try not to send a given entry twice.
if oldCursor != nil {
for C.sd_journal_test_cursor(j, oldCursor) > 0 {
if C.sd_journal_next(j) <= 0 {
break drain
}
}
}
// Read and send the logged message, if there is one to read.
i := C.get_message(j, &msg, &length, &partial)
if i != -C.ENOENT && i != -C.EADDRNOTAVAIL {
// Read the entry's timestamp.
if C.sd_journal_get_realtime_usec(j, &stamp) != 0 {
break
}
// Break if the timestamp exceeds any provided until flag.
if untilUnixMicro != 0 && untilUnixMicro < uint64(stamp) {
done = true
break
}
// Set up the time and text of the entry.
timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000)
line := C.GoBytes(unsafe.Pointer(msg), C.int(length))
if partial == 0 {
line = append(line, "\n"...)
}
// Recover the stream name by mapping
// from the journal priority back to
// the stream that we would have
// assigned that value.
source := ""
if C.get_priority(j, &priority) == 0 {
if priority == C.int(journal.PriErr) {
source = "stderr"
} else if priority == C.int(journal.PriInfo) {
source = "stdout"
}
}
// Retrieve the values of any variables we're adding to the journal.
var attrs []backend.LogAttr
C.sd_journal_restart_data(j)
for C.get_attribute_field(j, &data, &length) > C.int(0) {
kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2)
attrs = append(attrs, backend.LogAttr{Key: kv[0], Value: kv[1]})
}
// Send the log message, unless the consumer is gone
select {
case <-logWatcher.WatchConsumerGone():
done = true // we won't be able to write anything anymore
break drain
case logWatcher.Msg <- &logger.Message{
Line: line,
Source: source,
Timestamp: timestamp.In(time.UTC),
Attrs: attrs,
}:
shown++
}
// Call sd_journal_process() periodically during the processing loop
// to close any opened file descriptors for rotated (deleted) journal files.
if shown%1024 == 0 {
if ret := C.sd_journal_process(j); ret < 0 {
// log a warning but ignore it for now
logrus.WithField("container", s.vars["CONTAINER_ID_FULL"]).
WithField("error", CErr(ret)).
Warn("journald: error processing journal")
}
}
}
// If we're at the end of the journal, we're done (for now).
if C.sd_journal_next(j) <= 0 {
break
}
}
// free(NULL) is safe
C.free(unsafe.Pointer(oldCursor))
if C.sd_journal_get_cursor(j, &cursor) != 0 {
// ensure that we won't be freeing an address that's invalid
cursor = nil
}
return cursor, done, shown
}
func (s *journald) followJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, cursor *C.char, untilUnixMicro uint64) *C.char {
s.mu.Lock()
s.readers[logWatcher] = struct{}{}
s.mu.Unlock()
waitTimeout := C.uint64_t(250000) // 0.25s
for {
status := C.sd_journal_wait(j, waitTimeout)
if status < 0 {
logWatcher.Err <- errors.New("error waiting for journal: " + CErr(status))
goto cleanup
}
select {
case <-logWatcher.WatchConsumerGone():
goto cleanup // won't be able to write anything anymore
case <-logWatcher.WatchProducerGone():
// container is gone, drain journal
default:
// container is still alive
if status == C.SD_JOURNAL_NOP {
// no new data -- keep waiting
continue
}
}
newCursor, done, recv := s.drainJournal(logWatcher, j, cursor, untilUnixMicro)
cursor = newCursor
if done || (status == C.SD_JOURNAL_NOP && recv == 0) {
break
}
}
cleanup:
s.mu.Lock()
delete(s.readers, logWatcher)
s.mu.Unlock()
close(logWatcher.Msg)
return cursor
}
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
var (
j *C.sd_journal
cmatch, cursor *C.char
stamp C.uint64_t
sinceUnixMicro uint64
untilUnixMicro uint64
)
// Get a handle to the journal.
if rc := C.sd_journal_open(&j, C.int(0)); rc != 0 {
logWatcher.Err <- errors.New("error opening journal: " + CErr(rc))
close(logWatcher.Msg)
return
}
if config.Follow {
// Initialize library inotify watches early
if rc := C.sd_journal_get_fd(j); rc < 0 {
logWatcher.Err <- errors.New("error getting journald fd: " + CErr(rc))
close(logWatcher.Msg)
return
}
}
// If we end up following the log, we can set the journal context
// pointer and the channel pointer to nil so that we won't close them
// here, potentially while the goroutine that uses them is still
// running. Otherwise, close them when we return from this function.
following := false
defer func() {
if !following {
close(logWatcher.Msg)
}
C.sd_journal_close(j)
}()
// Remove limits on the size of data items that we'll retrieve.
if rc := C.sd_journal_set_data_threshold(j, C.size_t(0)); rc != 0 {
logWatcher.Err <- errors.New("error setting journal data threshold: " + CErr(rc))
return
}
// Add a match to have the library do the searching for us.
cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"])
defer C.free(unsafe.Pointer(cmatch))
if rc := C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)); rc != 0 {
logWatcher.Err <- errors.New("error setting journal match: " + CErr(rc))
return
}
// If we have a cutoff time, convert it to Unix time once.
if !config.Since.IsZero() {
nano := config.Since.UnixNano()
sinceUnixMicro = uint64(nano / 1000)
}
// If we have an until value, convert it too
if !config.Until.IsZero() {
nano := config.Until.UnixNano()
untilUnixMicro = uint64(nano / 1000)
}
if config.Tail >= 0 {
// If until time provided, start from there.
// Otherwise start at the end of the journal.
if untilUnixMicro != 0 {
if rc := C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)); rc != 0 {
logWatcher.Err <- errors.New("error seeking provided until value: " + CErr(rc))
return
}
} else if rc := C.sd_journal_seek_tail(j); rc != 0 {
logWatcher.Err <- errors.New("error seeking to end of journal: " + CErr(rc))
return
}
// (Try to) skip backwards by the requested number of lines...
if C.sd_journal_previous_skip(j, C.uint64_t(config.Tail)) >= 0 {
// ...but not before "since"
if sinceUnixMicro != 0 &&
C.sd_journal_get_realtime_usec(j, &stamp) == 0 &&
uint64(stamp) < sinceUnixMicro {
C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro))
}
}
} else {
// Start at the beginning of the journal.
if rc := C.sd_journal_seek_head(j); rc != 0 {
logWatcher.Err <- errors.New("error seeking to start of journal: " + CErr(rc))
return
}
// If we have a cutoff date, fast-forward to it.
if sinceUnixMicro != 0 {
if rc := C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)); rc != 0 {
logWatcher.Err <- errors.New("error seeking to start time in journal: " + CErr(rc))
return
}
}
if rc := C.sd_journal_next(j); rc < 0 {
logWatcher.Err <- errors.New("error skipping to next journal entry: " + CErr(rc))
return
}
}
if config.Tail != 0 { // special case for --tail 0
cursor, _, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro)
}
if config.Follow {
cursor = s.followJournal(logWatcher, j, cursor, untilUnixMicro)
// Let followJournal handle freeing the journal context
// object and closing the channel.
following = true
}
C.free(unsafe.Pointer(cursor))
}
func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
logWatcher := logger.NewLogWatcher()
go s.readLogs(logWatcher, config)
return logWatcher
}
| thaJeztah | 5e62ca1a05f2aab49066d91c9070cb7dc7ad2461 | 2773f81aa5e9e34733675a7aa7e4219391caccb0 | Why wouldn't we write `C.GoString(C.strerror(-ret))`? `ret` is already a `C.int`. | samuelkarp | 4,748 |
moby/moby | 42,445 | [testing] ~update~ fix linting issues found by golangci-lint v1.40.1 | Splitting this from https://github.com/moby/moby/pull/42303, which needs some work (use a custom action instead of the default one; similar to buildkit CI)
This ~updates~ fixes linting issues found by the latest golangci-lint (v1.40.1) | null | 2021-05-31 12:10:41+00:00 | 2021-06-16 20:15:01+00:00 | daemon/logger/journald/read.go | // +build linux,cgo,!static_build,journald
package journald // import "github.com/docker/docker/daemon/logger/journald"
// #include <sys/types.h>
// #include <sys/poll.h>
// #include <systemd/sd-journal.h>
// #include <errno.h>
// #include <stdio.h>
// #include <stdlib.h>
// #include <string.h>
// #include <time.h>
// #include <unistd.h>
//
//static int get_message(sd_journal *j, const char **msg, size_t *length, int *partial)
//{
// int rc;
// size_t plength;
// *msg = NULL;
// *length = 0;
// plength = strlen("CONTAINER_PARTIAL_MESSAGE=true");
// rc = sd_journal_get_data(j, "CONTAINER_PARTIAL_MESSAGE", (const void **) msg, length);
// *partial = ((rc == 0) && (*length == plength) && (memcmp(*msg, "CONTAINER_PARTIAL_MESSAGE=true", plength) == 0));
// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length);
// if (rc == 0) {
// if (*length > 8) {
// (*msg) += 8;
// *length -= 8;
// } else {
// *msg = NULL;
// *length = 0;
// rc = -ENOENT;
// }
// }
// return rc;
//}
//static int get_priority(sd_journal *j, int *priority)
//{
// const void *data;
// size_t i, length;
// int rc;
// *priority = -1;
// rc = sd_journal_get_data(j, "PRIORITY", &data, &length);
// if (rc == 0) {
// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) {
// *priority = 0;
// for (i = 9; i < length; i++) {
// *priority = *priority * 10 + ((const char *)data)[i] - '0';
// }
// if (length > 9) {
// rc = 0;
// }
// }
// }
// return rc;
//}
//static int is_attribute_field(const char *msg, size_t length)
//{
// static const struct known_field {
// const char *name;
// size_t length;
// } fields[] = {
// {"MESSAGE", sizeof("MESSAGE") - 1},
// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1},
// {"PRIORITY", sizeof("PRIORITY") - 1},
// {"CODE_FILE", sizeof("CODE_FILE") - 1},
// {"CODE_LINE", sizeof("CODE_LINE") - 1},
// {"CODE_FUNC", sizeof("CODE_FUNC") - 1},
// {"ERRNO", sizeof("ERRNO") - 1},
// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1},
// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1},
// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1},
// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1},
// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1},
// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1},
// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1},
// };
// unsigned int i;
// void *p;
// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) {
// return -1;
// }
// length = ((const char *) p) - msg;
// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) {
// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) {
// return -1;
// }
// }
// return 0;
//}
//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length)
//{
// int rc;
// *msg = NULL;
// *length = 0;
// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) {
// if (is_attribute_field(*msg, *length) == 0) {
// break;
// }
// rc = -ENOENT;
// }
// return rc;
//}
import "C"
import (
"errors"
"strings"
"time"
"unsafe"
"github.com/coreos/go-systemd/v22/journal"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/daemon/logger"
"github.com/sirupsen/logrus"
)
func (s *journald) Close() error {
s.mu.Lock()
for r := range s.readers {
r.ProducerGone()
delete(s.readers, r)
}
s.mu.Unlock()
return nil
}
// convert error code returned from a sd_journal_* function
// (which returns -errno) to a string
func CErr(ret C.int) string {
return C.GoString(C.strerror(C.int(-ret)))
}
func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, oldCursor *C.char, untilUnixMicro uint64) (*C.char, bool, int) {
var (
msg, data, cursor *C.char
length C.size_t
stamp C.uint64_t
priority, partial C.int
done bool
shown int
)
// Walk the journal from here forward until we run out of new entries
// or we reach the until value (if provided).
drain:
for {
// Try not to send a given entry twice.
if oldCursor != nil {
for C.sd_journal_test_cursor(j, oldCursor) > 0 {
if C.sd_journal_next(j) <= 0 {
break drain
}
}
}
// Read and send the logged message, if there is one to read.
i := C.get_message(j, &msg, &length, &partial)
if i != -C.ENOENT && i != -C.EADDRNOTAVAIL {
// Read the entry's timestamp.
if C.sd_journal_get_realtime_usec(j, &stamp) != 0 {
break
}
// Break if the timestamp exceeds any provided until flag.
if untilUnixMicro != 0 && untilUnixMicro < uint64(stamp) {
done = true
break
}
// Set up the time and text of the entry.
timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000)
line := C.GoBytes(unsafe.Pointer(msg), C.int(length))
if partial == 0 {
line = append(line, "\n"...)
}
// Recover the stream name by mapping
// from the journal priority back to
// the stream that we would have
// assigned that value.
source := ""
if C.get_priority(j, &priority) == 0 {
if priority == C.int(journal.PriErr) {
source = "stderr"
} else if priority == C.int(journal.PriInfo) {
source = "stdout"
}
}
// Retrieve the values of any variables we're adding to the journal.
var attrs []backend.LogAttr
C.sd_journal_restart_data(j)
for C.get_attribute_field(j, &data, &length) > C.int(0) {
kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2)
attrs = append(attrs, backend.LogAttr{Key: kv[0], Value: kv[1]})
}
// Send the log message, unless the consumer is gone
select {
case <-logWatcher.WatchConsumerGone():
done = true // we won't be able to write anything anymore
break drain
case logWatcher.Msg <- &logger.Message{
Line: line,
Source: source,
Timestamp: timestamp.In(time.UTC),
Attrs: attrs,
}:
shown++
}
// Call sd_journal_process() periodically during the processing loop
// to close any opened file descriptors for rotated (deleted) journal files.
if shown%1024 == 0 {
if ret := C.sd_journal_process(j); ret < 0 {
// log a warning but ignore it for now
logrus.WithField("container", s.vars["CONTAINER_ID_FULL"]).
WithField("error", CErr(ret)).
Warn("journald: error processing journal")
}
}
}
// If we're at the end of the journal, we're done (for now).
if C.sd_journal_next(j) <= 0 {
break
}
}
// free(NULL) is safe
C.free(unsafe.Pointer(oldCursor))
if C.sd_journal_get_cursor(j, &cursor) != 0 {
// ensure that we won't be freeing an address that's invalid
cursor = nil
}
return cursor, done, shown
}
func (s *journald) followJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, cursor *C.char, untilUnixMicro uint64) *C.char {
s.mu.Lock()
s.readers[logWatcher] = struct{}{}
s.mu.Unlock()
waitTimeout := C.uint64_t(250000) // 0.25s
for {
status := C.sd_journal_wait(j, waitTimeout)
if status < 0 {
logWatcher.Err <- errors.New("error waiting for journal: " + CErr(status))
goto cleanup
}
select {
case <-logWatcher.WatchConsumerGone():
goto cleanup // won't be able to write anything anymore
case <-logWatcher.WatchProducerGone():
// container is gone, drain journal
default:
// container is still alive
if status == C.SD_JOURNAL_NOP {
// no new data -- keep waiting
continue
}
}
newCursor, done, recv := s.drainJournal(logWatcher, j, cursor, untilUnixMicro)
cursor = newCursor
if done || (status == C.SD_JOURNAL_NOP && recv == 0) {
break
}
}
cleanup:
s.mu.Lock()
delete(s.readers, logWatcher)
s.mu.Unlock()
close(logWatcher.Msg)
return cursor
}
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
var (
j *C.sd_journal
cmatch, cursor *C.char
stamp C.uint64_t
sinceUnixMicro uint64
untilUnixMicro uint64
)
// Get a handle to the journal.
if rc := C.sd_journal_open(&j, C.int(0)); rc != 0 {
logWatcher.Err <- errors.New("error opening journal: " + CErr(rc))
close(logWatcher.Msg)
return
}
if config.Follow {
// Initialize library inotify watches early
if rc := C.sd_journal_get_fd(j); rc < 0 {
logWatcher.Err <- errors.New("error getting journald fd: " + CErr(rc))
close(logWatcher.Msg)
return
}
}
// If we end up following the log, we can set the journal context
// pointer and the channel pointer to nil so that we won't close them
// here, potentially while the goroutine that uses them is still
// running. Otherwise, close them when we return from this function.
following := false
defer func() {
if !following {
close(logWatcher.Msg)
}
C.sd_journal_close(j)
}()
// Remove limits on the size of data items that we'll retrieve.
if rc := C.sd_journal_set_data_threshold(j, C.size_t(0)); rc != 0 {
logWatcher.Err <- errors.New("error setting journal data threshold: " + CErr(rc))
return
}
// Add a match to have the library do the searching for us.
cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"])
defer C.free(unsafe.Pointer(cmatch))
if rc := C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)); rc != 0 {
logWatcher.Err <- errors.New("error setting journal match: " + CErr(rc))
return
}
// If we have a cutoff time, convert it to Unix time once.
if !config.Since.IsZero() {
nano := config.Since.UnixNano()
sinceUnixMicro = uint64(nano / 1000)
}
// If we have an until value, convert it too
if !config.Until.IsZero() {
nano := config.Until.UnixNano()
untilUnixMicro = uint64(nano / 1000)
}
if config.Tail >= 0 {
// If until time provided, start from there.
// Otherwise start at the end of the journal.
if untilUnixMicro != 0 {
if rc := C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)); rc != 0 {
logWatcher.Err <- errors.New("error seeking provided until value: " + CErr(rc))
return
}
} else if rc := C.sd_journal_seek_tail(j); rc != 0 {
logWatcher.Err <- errors.New("error seeking to end of journal: " + CErr(rc))
return
}
// (Try to) skip backwards by the requested number of lines...
if C.sd_journal_previous_skip(j, C.uint64_t(config.Tail)) >= 0 {
// ...but not before "since"
if sinceUnixMicro != 0 &&
C.sd_journal_get_realtime_usec(j, &stamp) == 0 &&
uint64(stamp) < sinceUnixMicro {
C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro))
}
}
} else {
// Start at the beginning of the journal.
if rc := C.sd_journal_seek_head(j); rc != 0 {
logWatcher.Err <- errors.New("error seeking to start of journal: " + CErr(rc))
return
}
// If we have a cutoff date, fast-forward to it.
if sinceUnixMicro != 0 {
if rc := C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)); rc != 0 {
logWatcher.Err <- errors.New("error seeking to start time in journal: " + CErr(rc))
return
}
}
if rc := C.sd_journal_next(j); rc < 0 {
logWatcher.Err <- errors.New("error skipping to next journal entry: " + CErr(rc))
return
}
}
if config.Tail != 0 { // special case for --tail 0
cursor, _, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro)
}
if config.Follow {
cursor = s.followJournal(logWatcher, j, cursor, untilUnixMicro)
// Let followJournal handle freeing the journal context
// object and closing the channel.
following = true
}
C.free(unsafe.Pointer(cursor))
return
}
func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
logWatcher := logger.NewLogWatcher()
go s.readLogs(logWatcher, config)
return logWatcher
}
| // +build linux,cgo,!static_build,journald
package journald // import "github.com/docker/docker/daemon/logger/journald"
// #include <sys/types.h>
// #include <sys/poll.h>
// #include <systemd/sd-journal.h>
// #include <errno.h>
// #include <stdio.h>
// #include <stdlib.h>
// #include <string.h>
// #include <time.h>
// #include <unistd.h>
//
//static int get_message(sd_journal *j, const char **msg, size_t *length, int *partial)
//{
// int rc;
// size_t plength;
// *msg = NULL;
// *length = 0;
// plength = strlen("CONTAINER_PARTIAL_MESSAGE=true");
// rc = sd_journal_get_data(j, "CONTAINER_PARTIAL_MESSAGE", (const void **) msg, length);
// *partial = ((rc == 0) && (*length == plength) && (memcmp(*msg, "CONTAINER_PARTIAL_MESSAGE=true", plength) == 0));
// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length);
// if (rc == 0) {
// if (*length > 8) {
// (*msg) += 8;
// *length -= 8;
// } else {
// *msg = NULL;
// *length = 0;
// rc = -ENOENT;
// }
// }
// return rc;
//}
//static int get_priority(sd_journal *j, int *priority)
//{
// const void *data;
// size_t i, length;
// int rc;
// *priority = -1;
// rc = sd_journal_get_data(j, "PRIORITY", &data, &length);
// if (rc == 0) {
// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) {
// *priority = 0;
// for (i = 9; i < length; i++) {
// *priority = *priority * 10 + ((const char *)data)[i] - '0';
// }
// if (length > 9) {
// rc = 0;
// }
// }
// }
// return rc;
//}
//static int is_attribute_field(const char *msg, size_t length)
//{
// static const struct known_field {
// const char *name;
// size_t length;
// } fields[] = {
// {"MESSAGE", sizeof("MESSAGE") - 1},
// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1},
// {"PRIORITY", sizeof("PRIORITY") - 1},
// {"CODE_FILE", sizeof("CODE_FILE") - 1},
// {"CODE_LINE", sizeof("CODE_LINE") - 1},
// {"CODE_FUNC", sizeof("CODE_FUNC") - 1},
// {"ERRNO", sizeof("ERRNO") - 1},
// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1},
// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1},
// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1},
// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1},
// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1},
// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1},
// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1},
// };
// unsigned int i;
// void *p;
// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) {
// return -1;
// }
// length = ((const char *) p) - msg;
// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) {
// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) {
// return -1;
// }
// }
// return 0;
//}
//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length)
//{
// int rc;
// *msg = NULL;
// *length = 0;
// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) {
// if (is_attribute_field(*msg, *length) == 0) {
// break;
// }
// rc = -ENOENT;
// }
// return rc;
//}
import "C"
import (
"errors"
"strings"
"time"
"unsafe"
"github.com/coreos/go-systemd/v22/journal"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/daemon/logger"
"github.com/sirupsen/logrus"
)
func (s *journald) Close() error {
s.mu.Lock()
for r := range s.readers {
r.ProducerGone()
delete(s.readers, r)
}
s.mu.Unlock()
return nil
}
// CErr converts error code returned from a sd_journal_* function
// (which returns -errno) to a string
func CErr(ret C.int) string {
return C.GoString(C.strerror(-ret))
}
func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, oldCursor *C.char, untilUnixMicro uint64) (*C.char, bool, int) {
var (
msg, data, cursor *C.char
length C.size_t
stamp C.uint64_t
priority, partial C.int
done bool
shown int
)
// Walk the journal from here forward until we run out of new entries
// or we reach the until value (if provided).
drain:
for {
// Try not to send a given entry twice.
if oldCursor != nil {
for C.sd_journal_test_cursor(j, oldCursor) > 0 {
if C.sd_journal_next(j) <= 0 {
break drain
}
}
}
// Read and send the logged message, if there is one to read.
i := C.get_message(j, &msg, &length, &partial)
if i != -C.ENOENT && i != -C.EADDRNOTAVAIL {
// Read the entry's timestamp.
if C.sd_journal_get_realtime_usec(j, &stamp) != 0 {
break
}
// Break if the timestamp exceeds any provided until flag.
if untilUnixMicro != 0 && untilUnixMicro < uint64(stamp) {
done = true
break
}
// Set up the time and text of the entry.
timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000)
line := C.GoBytes(unsafe.Pointer(msg), C.int(length))
if partial == 0 {
line = append(line, "\n"...)
}
// Recover the stream name by mapping
// from the journal priority back to
// the stream that we would have
// assigned that value.
source := ""
if C.get_priority(j, &priority) == 0 {
if priority == C.int(journal.PriErr) {
source = "stderr"
} else if priority == C.int(journal.PriInfo) {
source = "stdout"
}
}
// Retrieve the values of any variables we're adding to the journal.
var attrs []backend.LogAttr
C.sd_journal_restart_data(j)
for C.get_attribute_field(j, &data, &length) > C.int(0) {
kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2)
attrs = append(attrs, backend.LogAttr{Key: kv[0], Value: kv[1]})
}
// Send the log message, unless the consumer is gone
select {
case <-logWatcher.WatchConsumerGone():
done = true // we won't be able to write anything anymore
break drain
case logWatcher.Msg <- &logger.Message{
Line: line,
Source: source,
Timestamp: timestamp.In(time.UTC),
Attrs: attrs,
}:
shown++
}
// Call sd_journal_process() periodically during the processing loop
// to close any opened file descriptors for rotated (deleted) journal files.
if shown%1024 == 0 {
if ret := C.sd_journal_process(j); ret < 0 {
// log a warning but ignore it for now
logrus.WithField("container", s.vars["CONTAINER_ID_FULL"]).
WithField("error", CErr(ret)).
Warn("journald: error processing journal")
}
}
}
// If we're at the end of the journal, we're done (for now).
if C.sd_journal_next(j) <= 0 {
break
}
}
// free(NULL) is safe
C.free(unsafe.Pointer(oldCursor))
if C.sd_journal_get_cursor(j, &cursor) != 0 {
// ensure that we won't be freeing an address that's invalid
cursor = nil
}
return cursor, done, shown
}
func (s *journald) followJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, cursor *C.char, untilUnixMicro uint64) *C.char {
s.mu.Lock()
s.readers[logWatcher] = struct{}{}
s.mu.Unlock()
waitTimeout := C.uint64_t(250000) // 0.25s
for {
status := C.sd_journal_wait(j, waitTimeout)
if status < 0 {
logWatcher.Err <- errors.New("error waiting for journal: " + CErr(status))
goto cleanup
}
select {
case <-logWatcher.WatchConsumerGone():
goto cleanup // won't be able to write anything anymore
case <-logWatcher.WatchProducerGone():
// container is gone, drain journal
default:
// container is still alive
if status == C.SD_JOURNAL_NOP {
// no new data -- keep waiting
continue
}
}
newCursor, done, recv := s.drainJournal(logWatcher, j, cursor, untilUnixMicro)
cursor = newCursor
if done || (status == C.SD_JOURNAL_NOP && recv == 0) {
break
}
}
cleanup:
s.mu.Lock()
delete(s.readers, logWatcher)
s.mu.Unlock()
close(logWatcher.Msg)
return cursor
}
func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
var (
j *C.sd_journal
cmatch, cursor *C.char
stamp C.uint64_t
sinceUnixMicro uint64
untilUnixMicro uint64
)
// Get a handle to the journal.
if rc := C.sd_journal_open(&j, C.int(0)); rc != 0 {
logWatcher.Err <- errors.New("error opening journal: " + CErr(rc))
close(logWatcher.Msg)
return
}
if config.Follow {
// Initialize library inotify watches early
if rc := C.sd_journal_get_fd(j); rc < 0 {
logWatcher.Err <- errors.New("error getting journald fd: " + CErr(rc))
close(logWatcher.Msg)
return
}
}
// If we end up following the log, we can set the journal context
// pointer and the channel pointer to nil so that we won't close them
// here, potentially while the goroutine that uses them is still
// running. Otherwise, close them when we return from this function.
following := false
defer func() {
if !following {
close(logWatcher.Msg)
}
C.sd_journal_close(j)
}()
// Remove limits on the size of data items that we'll retrieve.
if rc := C.sd_journal_set_data_threshold(j, C.size_t(0)); rc != 0 {
logWatcher.Err <- errors.New("error setting journal data threshold: " + CErr(rc))
return
}
// Add a match to have the library do the searching for us.
cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"])
defer C.free(unsafe.Pointer(cmatch))
if rc := C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)); rc != 0 {
logWatcher.Err <- errors.New("error setting journal match: " + CErr(rc))
return
}
// If we have a cutoff time, convert it to Unix time once.
if !config.Since.IsZero() {
nano := config.Since.UnixNano()
sinceUnixMicro = uint64(nano / 1000)
}
// If we have an until value, convert it too
if !config.Until.IsZero() {
nano := config.Until.UnixNano()
untilUnixMicro = uint64(nano / 1000)
}
if config.Tail >= 0 {
// If until time provided, start from there.
// Otherwise start at the end of the journal.
if untilUnixMicro != 0 {
if rc := C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)); rc != 0 {
logWatcher.Err <- errors.New("error seeking provided until value: " + CErr(rc))
return
}
} else if rc := C.sd_journal_seek_tail(j); rc != 0 {
logWatcher.Err <- errors.New("error seeking to end of journal: " + CErr(rc))
return
}
// (Try to) skip backwards by the requested number of lines...
if C.sd_journal_previous_skip(j, C.uint64_t(config.Tail)) >= 0 {
// ...but not before "since"
if sinceUnixMicro != 0 &&
C.sd_journal_get_realtime_usec(j, &stamp) == 0 &&
uint64(stamp) < sinceUnixMicro {
C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro))
}
}
} else {
// Start at the beginning of the journal.
if rc := C.sd_journal_seek_head(j); rc != 0 {
logWatcher.Err <- errors.New("error seeking to start of journal: " + CErr(rc))
return
}
// If we have a cutoff date, fast-forward to it.
if sinceUnixMicro != 0 {
if rc := C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)); rc != 0 {
logWatcher.Err <- errors.New("error seeking to start time in journal: " + CErr(rc))
return
}
}
if rc := C.sd_journal_next(j); rc < 0 {
logWatcher.Err <- errors.New("error skipping to next journal entry: " + CErr(rc))
return
}
}
if config.Tail != 0 { // special case for --tail 0
cursor, _, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro)
}
if config.Follow {
cursor = s.followJournal(logWatcher, j, cursor, untilUnixMicro)
// Let followJournal handle freeing the journal context
// object and closing the channel.
following = true
}
C.free(unsafe.Pointer(cursor))
}
func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
logWatcher := logger.NewLogWatcher()
go s.readLogs(logWatcher, config)
return logWatcher
}
| thaJeztah | 5e62ca1a05f2aab49066d91c9070cb7dc7ad2461 | 2773f81aa5e9e34733675a7aa7e4219391caccb0 | Hm... good call. I hate CGO, and probably wasn't sure if it was safe to remove the casting; let me update 👍 | thaJeztah | 4,749 |
moby/moby | 42,445 | [testing] ~update~ fix linting issues found by golangci-lint v1.40.1 | Splitting this from https://github.com/moby/moby/pull/42303, which needs some work (use a custom action instead of the default one; similar to buildkit CI)
This ~updates~ fixes linting issues found by the latest golangci-lint (v1.40.1) | null | 2021-05-31 12:10:41+00:00 | 2021-06-16 20:15:01+00:00 | daemon/network.go | package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"net"
"sort"
"strconv"
"strings"
"sync"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/container"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
internalnetwork "github.com/docker/docker/daemon/network"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork"
lncluster "github.com/docker/docker/libnetwork/cluster"
"github.com/docker/docker/libnetwork/driverapi"
"github.com/docker/docker/libnetwork/ipamapi"
"github.com/docker/docker/libnetwork/netlabel"
"github.com/docker/docker/libnetwork/networkdb"
"github.com/docker/docker/libnetwork/options"
networktypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// PredefinedNetworkError is returned when user tries to create predefined network that already exists.
type PredefinedNetworkError string
func (pnr PredefinedNetworkError) Error() string {
return fmt.Sprintf("operation is not permitted on predefined %s network ", string(pnr))
}
// Forbidden denotes the type of this error
func (pnr PredefinedNetworkError) Forbidden() {}
// NetworkControllerEnabled checks if the networking stack is enabled.
// This feature depends on OS primitives and it's disabled in systems like Windows.
func (daemon *Daemon) NetworkControllerEnabled() bool {
return daemon.netController != nil
}
// NetworkController returns the network controller created by the daemon.
func (daemon *Daemon) NetworkController() libnetwork.NetworkController {
return daemon.netController
}
// FindNetwork returns a network based on:
// 1. Full ID
// 2. Full Name
// 3. Partial ID
// as long as there is no ambiguity
func (daemon *Daemon) FindNetwork(term string) (libnetwork.Network, error) {
listByFullName := []libnetwork.Network{}
listByPartialID := []libnetwork.Network{}
for _, nw := range daemon.getAllNetworks() {
if nw.ID() == term {
return nw, nil
}
if nw.Name() == term {
listByFullName = append(listByFullName, nw)
}
if strings.HasPrefix(nw.ID(), term) {
listByPartialID = append(listByPartialID, nw)
}
}
switch {
case len(listByFullName) == 1:
return listByFullName[0], nil
case len(listByFullName) > 1:
return nil, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found on name)", term, len(listByFullName)))
case len(listByPartialID) == 1:
return listByPartialID[0], nil
case len(listByPartialID) > 1:
return nil, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
}
// Be very careful to change the error type here, the
// libnetwork.ErrNoSuchNetwork error is used by the controller
// to retry the creation of the network as managed through the swarm manager
return nil, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
}
// GetNetworkByID function returns a network whose ID matches the given ID.
// It fails with an error if no matching network is found.
func (daemon *Daemon) GetNetworkByID(id string) (libnetwork.Network, error) {
c := daemon.netController
if c == nil {
return nil, errors.Wrap(libnetwork.ErrNoSuchNetwork(id), "netcontroller is nil")
}
return c.NetworkByID(id)
}
// GetNetworkByName function returns a network for a given network name.
// If no network name is given, the default network is returned.
func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) {
c := daemon.netController
if c == nil {
return nil, libnetwork.ErrNoSuchNetwork(name)
}
if name == "" {
name = c.Config().Daemon.DefaultNetwork
}
return c.NetworkByName(name)
}
// GetNetworksByIDPrefix returns a list of networks whose ID partially matches zero or more networks
func (daemon *Daemon) GetNetworksByIDPrefix(partialID string) []libnetwork.Network {
c := daemon.netController
if c == nil {
return nil
}
list := []libnetwork.Network{}
l := func(nw libnetwork.Network) bool {
if strings.HasPrefix(nw.ID(), partialID) {
list = append(list, nw)
}
return false
}
c.WalkNetworks(l)
return list
}
// getAllNetworks returns a list containing all networks
func (daemon *Daemon) getAllNetworks() []libnetwork.Network {
c := daemon.netController
if c == nil {
return nil
}
return c.Networks()
}
type ingressJob struct {
create *clustertypes.NetworkCreateRequest
ip net.IP
jobDone chan struct{}
}
var (
ingressWorkerOnce sync.Once
ingressJobsChannel chan *ingressJob
ingressID string
)
func (daemon *Daemon) startIngressWorker() {
ingressJobsChannel = make(chan *ingressJob, 100)
go func() {
// nolint: gosimple
for {
select {
case r := <-ingressJobsChannel:
if r.create != nil {
daemon.setupIngress(r.create, r.ip, ingressID)
ingressID = r.create.ID
} else {
daemon.releaseIngress(ingressID)
ingressID = ""
}
close(r.jobDone)
}
}
}()
}
// enqueueIngressJob adds a ingress add/rm request to the worker queue.
// It guarantees the worker is started.
func (daemon *Daemon) enqueueIngressJob(job *ingressJob) {
ingressWorkerOnce.Do(daemon.startIngressWorker)
ingressJobsChannel <- job
}
// SetupIngress setups ingress networking.
// The function returns a channel which will signal the caller when the programming is completed.
func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) (<-chan struct{}, error) {
ip, _, err := net.ParseCIDR(nodeIP)
if err != nil {
return nil, err
}
done := make(chan struct{})
daemon.enqueueIngressJob(&ingressJob{&create, ip, done})
return done, nil
}
// ReleaseIngress releases the ingress networking.
// The function returns a channel which will signal the caller when the programming is completed.
func (daemon *Daemon) ReleaseIngress() (<-chan struct{}, error) {
done := make(chan struct{})
daemon.enqueueIngressJob(&ingressJob{nil, nil, done})
return done, nil
}
func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) {
controller := daemon.netController
controller.AgentInitWait()
if staleID != "" && staleID != create.ID {
daemon.releaseIngress(staleID)
}
if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil {
// If it is any other error other than already
// exists error log error and return.
if _, ok := err.(libnetwork.NetworkNameError); !ok {
logrus.Errorf("Failed creating ingress network: %v", err)
return
}
// Otherwise continue down the call to create or recreate sandbox.
}
_, err := daemon.GetNetworkByID(create.ID)
if err != nil {
logrus.Errorf("Failed getting ingress network by id after creating: %v", err)
}
}
func (daemon *Daemon) releaseIngress(id string) {
controller := daemon.netController
if id == "" {
return
}
n, err := controller.NetworkByID(id)
if err != nil {
logrus.Errorf("failed to retrieve ingress network %s: %v", id, err)
return
}
if err := n.Delete(libnetwork.NetworkDeleteOptionRemoveLB); err != nil {
logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err)
return
}
}
// SetNetworkBootstrapKeys sets the bootstrap keys.
func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error {
err := daemon.netController.SetKeys(keys)
if err == nil {
// Upon successful key setting dispatch the keys available event
daemon.cluster.SendClusterEvent(lncluster.EventNetworkKeysAvailable)
}
return err
}
// UpdateAttachment notifies the attacher about the attachment config.
func (daemon *Daemon) UpdateAttachment(networkName, networkID, containerID string, config *network.NetworkingConfig) error {
if daemon.clusterProvider == nil {
return fmt.Errorf("cluster provider is not initialized")
}
if err := daemon.clusterProvider.UpdateAttachment(networkName, containerID, config); err != nil {
return daemon.clusterProvider.UpdateAttachment(networkID, containerID, config)
}
return nil
}
// WaitForDetachment makes the cluster manager wait for detachment of
// the container from the network.
func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error {
if daemon.clusterProvider == nil {
return fmt.Errorf("cluster provider is not initialized")
}
return daemon.clusterProvider.WaitForDetachment(ctx, networkName, networkID, taskID, containerID)
}
// CreateManagedNetwork creates an agent network.
func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error {
_, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true)
return err
}
// CreateNetwork creates a network with the given name, driver and other optional parameters
func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) {
resp, err := daemon.createNetwork(create, "", false)
if err != nil {
return nil, err
}
return resp, err
}
func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
if runconfig.IsPreDefinedNetwork(create.Name) {
return nil, PredefinedNetworkError(create.Name)
}
var warning string
nw, err := daemon.GetNetworkByName(create.Name)
if err != nil {
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
return nil, err
}
}
if nw != nil {
// check if user defined CheckDuplicate, if set true, return err
// otherwise prepare a warning message
if create.CheckDuplicate {
if !agent || nw.Info().Dynamic() {
return nil, libnetwork.NetworkNameError(create.Name)
}
}
warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID())
}
c := daemon.netController
driver := create.Driver
if driver == "" {
driver = c.Config().Daemon.DefaultDriver
}
nwOptions := []libnetwork.NetworkOption{
libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6),
libnetwork.NetworkOptionDriverOpts(create.Options),
libnetwork.NetworkOptionLabels(create.Labels),
libnetwork.NetworkOptionAttachable(create.Attachable),
libnetwork.NetworkOptionIngress(create.Ingress),
libnetwork.NetworkOptionScope(create.Scope),
}
if create.ConfigOnly {
nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigOnly())
}
if create.IPAM != nil {
ipam := create.IPAM
v4Conf, v6Conf, err := getIpamConfig(ipam.Config)
if err != nil {
return nil, err
}
nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options))
}
if create.Internal {
nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork())
}
if agent {
nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic())
nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false))
}
if create.ConfigFrom != nil {
nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network))
}
if agent && driver == "overlay" {
nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id)
if !exists {
return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id)
}
nwOptions = append(nwOptions, libnetwork.NetworkOptionLBEndpoint(nodeIP))
}
n, err := c.NewNetwork(driver, create.Name, id, nwOptions...)
if err != nil {
if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok {
// nolint: golint
return nil, errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.")
}
return nil, err
}
daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.Acquire)
if create.IPAM != nil {
daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.Acquire)
}
daemon.LogNetworkEvent(n, "create")
return &types.NetworkCreateResponse{
ID: n.ID(),
Warning: warning,
}, nil
}
func (daemon *Daemon) pluginRefCount(driver, capability string, mode int) {
var builtinDrivers []string
if capability == driverapi.NetworkPluginEndpointType {
builtinDrivers = daemon.netController.BuiltinDrivers()
} else if capability == ipamapi.PluginEndpointType {
builtinDrivers = daemon.netController.BuiltinIPAMDrivers()
}
for _, d := range builtinDrivers {
if d == driver {
return
}
}
if daemon.PluginStore != nil {
_, err := daemon.PluginStore.Get(driver, capability, mode)
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation")
}
}
}
func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) {
ipamV4Cfg := []*libnetwork.IpamConf{}
ipamV6Cfg := []*libnetwork.IpamConf{}
for _, d := range data {
iCfg := libnetwork.IpamConf{}
iCfg.PreferredPool = d.Subnet
iCfg.SubPool = d.IPRange
iCfg.Gateway = d.Gateway
iCfg.AuxAddresses = d.AuxAddress
ip, _, err := net.ParseCIDR(d.Subnet)
if err != nil {
return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err)
}
if ip.To4() != nil {
ipamV4Cfg = append(ipamV4Cfg, &iCfg)
} else {
ipamV6Cfg = append(ipamV6Cfg, &iCfg)
}
}
return ipamV4Cfg, ipamV6Cfg, nil
}
// UpdateContainerServiceConfig updates a service configuration.
func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error {
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
ctr.NetworkSettings.Service = serviceConfig
return nil
}
// ConnectContainerToNetwork connects the given container to the given
// network. If either cannot be found, an err is returned. If the
// network cannot be set up, an err is returned.
func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error {
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
return daemon.ConnectToNetwork(ctr, networkName, endpointConfig)
}
// DisconnectContainerFromNetwork disconnects the given container from
// the given network. If either cannot be found, an err is returned.
func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error {
ctr, err := daemon.GetContainer(containerName)
if err != nil {
if force {
return daemon.ForceEndpointDelete(containerName, networkName)
}
return err
}
return daemon.DisconnectFromNetwork(ctr, networkName, force)
}
// GetNetworkDriverList returns the list of plugins drivers
// registered for network.
func (daemon *Daemon) GetNetworkDriverList() []string {
if !daemon.NetworkControllerEnabled() {
return nil
}
pluginList := daemon.netController.BuiltinDrivers()
managedPlugins := daemon.PluginStore.GetAllManagedPluginsByCap(driverapi.NetworkPluginEndpointType)
for _, plugin := range managedPlugins {
pluginList = append(pluginList, plugin.Name())
}
pluginMap := make(map[string]bool)
for _, plugin := range pluginList {
pluginMap[plugin] = true
}
networks := daemon.netController.Networks()
for _, nw := range networks {
if !pluginMap[nw.Type()] {
pluginList = append(pluginList, nw.Type())
pluginMap[nw.Type()] = true
}
}
sort.Strings(pluginList)
return pluginList
}
// DeleteManagedNetwork deletes an agent network.
// The requirement of networkID is enforced.
func (daemon *Daemon) DeleteManagedNetwork(networkID string) error {
n, err := daemon.GetNetworkByID(networkID)
if err != nil {
return err
}
return daemon.deleteNetwork(n, true)
}
// DeleteNetwork destroys a network unless it's one of docker's predefined networks.
func (daemon *Daemon) DeleteNetwork(networkID string) error {
n, err := daemon.GetNetworkByID(networkID)
if err != nil {
return errors.Wrap(err, "could not find network by ID")
}
return daemon.deleteNetwork(n, false)
}
func (daemon *Daemon) deleteNetwork(nw libnetwork.Network, dynamic bool) error {
if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic {
err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name())
return errdefs.Forbidden(err)
}
if dynamic && !nw.Info().Dynamic() {
if runconfig.IsPreDefinedNetwork(nw.Name()) {
// Predefined networks now support swarm services. Make this
// a no-op when cluster requests to remove the predefined network.
return nil
}
err := fmt.Errorf("%s is not a dynamic network", nw.Name())
return errdefs.Forbidden(err)
}
if err := nw.Delete(); err != nil {
return errors.Wrap(err, "error while removing network")
}
// If this is not a configuration only network, we need to
// update the corresponding remote drivers' reference counts
if !nw.Info().ConfigOnly() {
daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.Release)
ipamType, _, _, _ := nw.Info().IpamConfig()
daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.Release)
daemon.LogNetworkEvent(nw, "destroy")
}
return nil
}
// GetNetworks returns a list of all networks
func (daemon *Daemon) GetNetworks(filter filters.Args, config types.NetworkListConfig) ([]types.NetworkResource, error) {
networks := daemon.getAllNetworks()
list := make([]types.NetworkResource, 0, len(networks))
var idx map[string]libnetwork.Network
if config.Detailed {
idx = make(map[string]libnetwork.Network)
}
for _, n := range networks {
nr := buildNetworkResource(n)
list = append(list, nr)
if config.Detailed {
idx[nr.ID] = n
}
}
var err error
list, err = internalnetwork.FilterNetworks(list, filter)
if err != nil {
return nil, err
}
if config.Detailed {
for i, n := range list {
np := &n
buildDetailedNetworkResources(np, idx[n.ID], config.Verbose)
list[i] = *np
}
}
return list, nil
}
func buildNetworkResource(nw libnetwork.Network) types.NetworkResource {
r := types.NetworkResource{}
if nw == nil {
return r
}
info := nw.Info()
r.Name = nw.Name()
r.ID = nw.ID()
r.Created = info.Created()
r.Scope = info.Scope()
r.Driver = nw.Type()
r.EnableIPv6 = info.IPv6Enabled()
r.Internal = info.Internal()
r.Attachable = info.Attachable()
r.Ingress = info.Ingress()
r.Options = info.DriverOptions()
r.Containers = make(map[string]types.EndpointResource)
buildIpamResources(&r, info)
r.Labels = info.Labels()
r.ConfigOnly = info.ConfigOnly()
if cn := info.ConfigFrom(); cn != "" {
r.ConfigFrom = network.ConfigReference{Network: cn}
}
peers := info.Peers()
if len(peers) != 0 {
r.Peers = buildPeerInfoResources(peers)
}
return r
}
func buildDetailedNetworkResources(r *types.NetworkResource, nw libnetwork.Network, verbose bool) {
if nw == nil {
return
}
epl := nw.Endpoints()
for _, e := range epl {
ei := e.Info()
if ei == nil {
continue
}
sb := ei.Sandbox()
tmpID := e.ID()
key := "ep-" + tmpID
if sb != nil {
key = sb.ContainerID()
}
r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei)
}
if !verbose {
return
}
services := nw.Info().Services()
r.Services = make(map[string]network.ServiceInfo)
for name, service := range services {
tasks := []network.Task{}
for _, t := range service.Tasks {
tasks = append(tasks, network.Task{
Name: t.Name,
EndpointID: t.EndpointID,
EndpointIP: t.EndpointIP,
Info: t.Info,
})
}
r.Services[name] = network.ServiceInfo{
VIP: service.VIP,
Ports: service.Ports,
Tasks: tasks,
LocalLBIndex: service.LocalLBIndex,
}
}
}
func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo {
peerInfo := make([]network.PeerInfo, 0, len(peers))
for _, peer := range peers {
peerInfo = append(peerInfo, network.PeerInfo{
Name: peer.Name,
IP: peer.IP,
})
}
return peerInfo
}
func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) {
id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig()
ipv4Info, ipv6Info := nwInfo.IpamInfo()
r.IPAM.Driver = id
r.IPAM.Options = opts
r.IPAM.Config = []network.IPAMConfig{}
for _, ip4 := range ipv4conf {
if ip4.PreferredPool == "" {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip4.PreferredPool
iData.IPRange = ip4.SubPool
iData.Gateway = ip4.Gateway
iData.AuxAddress = ip4.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if len(r.IPAM.Config) == 0 {
for _, ip4Info := range ipv4Info {
iData := network.IPAMConfig{}
iData.Subnet = ip4Info.IPAMData.Pool.String()
if ip4Info.IPAMData.Gateway != nil {
iData.Gateway = ip4Info.IPAMData.Gateway.IP.String()
}
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
hasIpv6Conf := false
for _, ip6 := range ipv6conf {
if ip6.PreferredPool == "" {
continue
}
hasIpv6Conf = true
iData := network.IPAMConfig{}
iData.Subnet = ip6.PreferredPool
iData.IPRange = ip6.SubPool
iData.Gateway = ip6.Gateway
iData.AuxAddress = ip6.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if !hasIpv6Conf {
for _, ip6Info := range ipv6Info {
if ip6Info.IPAMData.Pool == nil {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip6Info.IPAMData.Pool.String()
iData.Gateway = ip6Info.IPAMData.Gateway.String()
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
}
func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource {
er := types.EndpointResource{}
er.EndpointID = id
er.Name = name
ei := info
if ei == nil {
return er
}
if iface := ei.Iface(); iface != nil {
if mac := iface.MacAddress(); mac != nil {
er.MacAddress = mac.String()
}
if ip := iface.Address(); ip != nil && len(ip.IP) > 0 {
er.IPv4Address = ip.String()
}
if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 {
er.IPv6Address = ipv6.String()
}
}
return er
}
// clearAttachableNetworks removes the attachable networks
// after disconnecting any connected container
func (daemon *Daemon) clearAttachableNetworks() {
for _, n := range daemon.getAllNetworks() {
if !n.Info().Attachable() {
continue
}
for _, ep := range n.Endpoints() {
epInfo := ep.Info()
if epInfo == nil {
continue
}
sb := epInfo.Sandbox()
if sb == nil {
continue
}
containerID := sb.ContainerID()
if err := daemon.DisconnectContainerFromNetwork(containerID, n.ID(), true); err != nil {
logrus.Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v",
containerID, n.Name(), err)
}
}
if err := daemon.DeleteManagedNetwork(n.ID()); err != nil {
logrus.Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err)
}
}
}
// buildCreateEndpointOptions builds endpoint options from a given network.
func buildCreateEndpointOptions(c *container.Container, n libnetwork.Network, epConfig *network.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) {
var (
bindings = make(nat.PortMap)
pbList []networktypes.PortBinding
exposeList []networktypes.TransportPort
createOptions []libnetwork.EndpointOption
)
defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName()
if (!serviceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) ||
c.NetworkSettings.IsAnonymousEndpoint {
createOptions = append(createOptions, libnetwork.CreateOptionAnonymous())
}
if epConfig != nil {
ipam := epConfig.IPAMConfig
if ipam != nil {
var (
ipList []net.IP
ip, ip6, linkip net.IP
)
for _, ips := range ipam.LinkLocalIPs {
if linkip = net.ParseIP(ips); linkip == nil && ips != "" {
return nil, errors.Errorf("Invalid link-local IP address: %s", ipam.LinkLocalIPs)
}
ipList = append(ipList, linkip)
}
if ip = net.ParseIP(ipam.IPv4Address); ip == nil && ipam.IPv4Address != "" {
return nil, errors.Errorf("Invalid IPv4 address: %s)", ipam.IPv4Address)
}
if ip6 = net.ParseIP(ipam.IPv6Address); ip6 == nil && ipam.IPv6Address != "" {
return nil, errors.Errorf("Invalid IPv6 address: %s)", ipam.IPv6Address)
}
createOptions = append(createOptions,
libnetwork.CreateOptionIpam(ip, ip6, ipList, nil))
}
for _, alias := range epConfig.Aliases {
createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias))
}
for k, v := range epConfig.DriverOpts {
createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v}))
}
}
if c.NetworkSettings.Service != nil {
svcCfg := c.NetworkSettings.Service
var vip string
if svcCfg.VirtualAddresses[n.ID()] != nil {
vip = svcCfg.VirtualAddresses[n.ID()].IPv4
}
var portConfigs []*libnetwork.PortConfig
for _, portConfig := range svcCfg.ExposedPorts {
portConfigs = append(portConfigs, &libnetwork.PortConfig{
Name: portConfig.Name,
Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol),
TargetPort: portConfig.TargetPort,
PublishedPort: portConfig.PublishedPort,
})
}
createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()]))
}
if !containertypes.NetworkMode(n.Name()).IsUserDefined() {
createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution())
}
// configs that are applicable only for the endpoint in the network
// to which container was connected to on docker run.
// Ideally all these network-specific endpoint configurations must be moved under
// container.NetworkSettings.Networks[n.Name()]
if n.Name() == c.HostConfig.NetworkMode.NetworkName() ||
(n.Name() == defaultNetName && c.HostConfig.NetworkMode.IsDefault()) {
if c.Config.MacAddress != "" {
mac, err := net.ParseMAC(c.Config.MacAddress)
if err != nil {
return nil, err
}
genericOption := options.Generic{
netlabel.MacAddress: mac,
}
createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption))
}
}
// Port-mapping rules belong to the container & applicable only to non-internal networks
portmaps := getSandboxPortMapInfo(sb)
if n.Info().Internal() || len(portmaps) > 0 {
return createOptions, nil
}
if c.HostConfig.PortBindings != nil {
for p, b := range c.HostConfig.PortBindings {
bindings[p] = []nat.PortBinding{}
for _, bb := range b {
bindings[p] = append(bindings[p], nat.PortBinding{
HostIP: bb.HostIP,
HostPort: bb.HostPort,
})
}
}
}
portSpecs := c.Config.ExposedPorts
ports := make([]nat.Port, len(portSpecs))
var i int
for p := range portSpecs {
ports[i] = p
i++
}
nat.SortPortMap(ports, bindings)
for _, port := range ports {
expose := networktypes.TransportPort{}
expose.Proto = networktypes.ParseProtocol(port.Proto())
expose.Port = uint16(port.Int())
exposeList = append(exposeList, expose)
pb := networktypes.PortBinding{Port: expose.Port, Proto: expose.Proto}
binding := bindings[port]
for i := 0; i < len(binding); i++ {
pbCopy := pb.GetCopy()
newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort))
var portStart, portEnd int
if err == nil {
portStart, portEnd, err = newP.Range()
}
if err != nil {
return nil, errors.Wrapf(err, "Error parsing HostPort value (%s)", binding[i].HostPort)
}
pbCopy.HostPort = uint16(portStart)
pbCopy.HostPortEnd = uint16(portEnd)
pbCopy.HostIP = net.ParseIP(binding[i].HostIP)
pbList = append(pbList, pbCopy)
}
if c.HostConfig.PublishAllPorts && len(binding) == 0 {
pbList = append(pbList, pb)
}
}
var dns []string
if len(c.HostConfig.DNS) > 0 {
dns = c.HostConfig.DNS
} else if len(daemonDNS) > 0 {
dns = daemonDNS
}
if len(dns) > 0 {
createOptions = append(createOptions,
libnetwork.CreateOptionDNS(dns))
}
createOptions = append(createOptions,
libnetwork.CreateOptionPortMapping(pbList),
libnetwork.CreateOptionExposedPorts(exposeList))
return createOptions, nil
}
// getSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox
func getSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap {
pm := nat.PortMap{}
if sb == nil {
return pm
}
for _, ep := range sb.Endpoints() {
pm, _ = getEndpointPortMapInfo(ep)
if len(pm) > 0 {
break
}
}
return pm
}
func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) {
pm := nat.PortMap{}
driverInfo, err := ep.DriverInfo()
if err != nil {
return pm, err
}
if driverInfo == nil {
// It is not an error for epInfo to be nil
return pm, nil
}
if expData, ok := driverInfo[netlabel.ExposedPorts]; ok {
if exposedPorts, ok := expData.([]networktypes.TransportPort); ok {
for _, tp := range exposedPorts {
natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port)))
if err != nil {
return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err)
}
pm[natPort] = nil
}
}
}
mapData, ok := driverInfo[netlabel.PortMap]
if !ok {
return pm, nil
}
if portMapping, ok := mapData.([]networktypes.PortBinding); ok {
for _, pp := range portMapping {
natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port)))
if err != nil {
return pm, err
}
natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
pm[natPort] = append(pm[natPort], natBndg)
}
}
return pm, nil
}
// buildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint.
func buildEndpointInfo(networkSettings *internalnetwork.Settings, n libnetwork.Network, ep libnetwork.Endpoint) error {
if ep == nil {
return errors.New("endpoint cannot be nil")
}
if networkSettings == nil {
return errors.New("network cannot be nil")
}
epInfo := ep.Info()
if epInfo == nil {
// It is not an error to get an empty endpoint info
return nil
}
if _, ok := networkSettings.Networks[n.Name()]; !ok {
networkSettings.Networks[n.Name()] = &internalnetwork.EndpointSettings{
EndpointSettings: &network.EndpointSettings{},
}
}
networkSettings.Networks[n.Name()].NetworkID = n.ID()
networkSettings.Networks[n.Name()].EndpointID = ep.ID()
iface := epInfo.Iface()
if iface == nil {
return nil
}
if iface.MacAddress() != nil {
networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String()
}
if iface.Address() != nil {
ones, _ := iface.Address().Mask.Size()
networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String()
networkSettings.Networks[n.Name()].IPPrefixLen = ones
}
if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil {
onesv6, _ := iface.AddressIPv6().Mask.Size()
networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String()
networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6
}
return nil
}
// buildJoinOptions builds endpoint Join options from a given network.
func buildJoinOptions(networkSettings *internalnetwork.Settings, n interface {
Name() string
}) ([]libnetwork.EndpointOption, error) {
var joinOptions []libnetwork.EndpointOption
if epConfig, ok := networkSettings.Networks[n.Name()]; ok {
for _, str := range epConfig.Links {
name, alias, err := opts.ParseLink(str)
if err != nil {
return nil, err
}
joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias))
}
for k, v := range epConfig.DriverOpts {
joinOptions = append(joinOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v}))
}
}
return joinOptions, nil
}
| package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"net"
"sort"
"strconv"
"strings"
"sync"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/container"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
internalnetwork "github.com/docker/docker/daemon/network"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork"
lncluster "github.com/docker/docker/libnetwork/cluster"
"github.com/docker/docker/libnetwork/driverapi"
"github.com/docker/docker/libnetwork/ipamapi"
"github.com/docker/docker/libnetwork/netlabel"
"github.com/docker/docker/libnetwork/networkdb"
"github.com/docker/docker/libnetwork/options"
networktypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// PredefinedNetworkError is returned when user tries to create predefined network that already exists.
type PredefinedNetworkError string
func (pnr PredefinedNetworkError) Error() string {
return fmt.Sprintf("operation is not permitted on predefined %s network ", string(pnr))
}
// Forbidden denotes the type of this error
func (pnr PredefinedNetworkError) Forbidden() {}
// NetworkControllerEnabled checks if the networking stack is enabled.
// This feature depends on OS primitives and it's disabled in systems like Windows.
func (daemon *Daemon) NetworkControllerEnabled() bool {
return daemon.netController != nil
}
// NetworkController returns the network controller created by the daemon.
func (daemon *Daemon) NetworkController() libnetwork.NetworkController {
return daemon.netController
}
// FindNetwork returns a network based on:
// 1. Full ID
// 2. Full Name
// 3. Partial ID
// as long as there is no ambiguity
func (daemon *Daemon) FindNetwork(term string) (libnetwork.Network, error) {
listByFullName := []libnetwork.Network{}
listByPartialID := []libnetwork.Network{}
for _, nw := range daemon.getAllNetworks() {
if nw.ID() == term {
return nw, nil
}
if nw.Name() == term {
listByFullName = append(listByFullName, nw)
}
if strings.HasPrefix(nw.ID(), term) {
listByPartialID = append(listByPartialID, nw)
}
}
switch {
case len(listByFullName) == 1:
return listByFullName[0], nil
case len(listByFullName) > 1:
return nil, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found on name)", term, len(listByFullName)))
case len(listByPartialID) == 1:
return listByPartialID[0], nil
case len(listByPartialID) > 1:
return nil, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
}
// Be very careful to change the error type here, the
// libnetwork.ErrNoSuchNetwork error is used by the controller
// to retry the creation of the network as managed through the swarm manager
return nil, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
}
// GetNetworkByID function returns a network whose ID matches the given ID.
// It fails with an error if no matching network is found.
func (daemon *Daemon) GetNetworkByID(id string) (libnetwork.Network, error) {
c := daemon.netController
if c == nil {
return nil, errors.Wrap(libnetwork.ErrNoSuchNetwork(id), "netcontroller is nil")
}
return c.NetworkByID(id)
}
// GetNetworkByName function returns a network for a given network name.
// If no network name is given, the default network is returned.
func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) {
c := daemon.netController
if c == nil {
return nil, libnetwork.ErrNoSuchNetwork(name)
}
if name == "" {
name = c.Config().Daemon.DefaultNetwork
}
return c.NetworkByName(name)
}
// GetNetworksByIDPrefix returns a list of networks whose ID partially matches zero or more networks
func (daemon *Daemon) GetNetworksByIDPrefix(partialID string) []libnetwork.Network {
c := daemon.netController
if c == nil {
return nil
}
list := []libnetwork.Network{}
l := func(nw libnetwork.Network) bool {
if strings.HasPrefix(nw.ID(), partialID) {
list = append(list, nw)
}
return false
}
c.WalkNetworks(l)
return list
}
// getAllNetworks returns a list containing all networks
func (daemon *Daemon) getAllNetworks() []libnetwork.Network {
c := daemon.netController
if c == nil {
return nil
}
return c.Networks()
}
type ingressJob struct {
create *clustertypes.NetworkCreateRequest
ip net.IP
jobDone chan struct{}
}
var (
ingressWorkerOnce sync.Once
ingressJobsChannel chan *ingressJob
ingressID string
)
func (daemon *Daemon) startIngressWorker() {
ingressJobsChannel = make(chan *ingressJob, 100)
go func() {
//nolint: gosimple
for {
select {
case r := <-ingressJobsChannel:
if r.create != nil {
daemon.setupIngress(r.create, r.ip, ingressID)
ingressID = r.create.ID
} else {
daemon.releaseIngress(ingressID)
ingressID = ""
}
close(r.jobDone)
}
}
}()
}
// enqueueIngressJob adds a ingress add/rm request to the worker queue.
// It guarantees the worker is started.
func (daemon *Daemon) enqueueIngressJob(job *ingressJob) {
ingressWorkerOnce.Do(daemon.startIngressWorker)
ingressJobsChannel <- job
}
// SetupIngress setups ingress networking.
// The function returns a channel which will signal the caller when the programming is completed.
func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) (<-chan struct{}, error) {
ip, _, err := net.ParseCIDR(nodeIP)
if err != nil {
return nil, err
}
done := make(chan struct{})
daemon.enqueueIngressJob(&ingressJob{&create, ip, done})
return done, nil
}
// ReleaseIngress releases the ingress networking.
// The function returns a channel which will signal the caller when the programming is completed.
func (daemon *Daemon) ReleaseIngress() (<-chan struct{}, error) {
done := make(chan struct{})
daemon.enqueueIngressJob(&ingressJob{nil, nil, done})
return done, nil
}
func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) {
controller := daemon.netController
controller.AgentInitWait()
if staleID != "" && staleID != create.ID {
daemon.releaseIngress(staleID)
}
if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil {
// If it is any other error other than already
// exists error log error and return.
if _, ok := err.(libnetwork.NetworkNameError); !ok {
logrus.Errorf("Failed creating ingress network: %v", err)
return
}
// Otherwise continue down the call to create or recreate sandbox.
}
_, err := daemon.GetNetworkByID(create.ID)
if err != nil {
logrus.Errorf("Failed getting ingress network by id after creating: %v", err)
}
}
func (daemon *Daemon) releaseIngress(id string) {
controller := daemon.netController
if id == "" {
return
}
n, err := controller.NetworkByID(id)
if err != nil {
logrus.Errorf("failed to retrieve ingress network %s: %v", id, err)
return
}
if err := n.Delete(libnetwork.NetworkDeleteOptionRemoveLB); err != nil {
logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err)
return
}
}
// SetNetworkBootstrapKeys sets the bootstrap keys.
func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error {
err := daemon.netController.SetKeys(keys)
if err == nil {
// Upon successful key setting dispatch the keys available event
daemon.cluster.SendClusterEvent(lncluster.EventNetworkKeysAvailable)
}
return err
}
// UpdateAttachment notifies the attacher about the attachment config.
func (daemon *Daemon) UpdateAttachment(networkName, networkID, containerID string, config *network.NetworkingConfig) error {
if daemon.clusterProvider == nil {
return fmt.Errorf("cluster provider is not initialized")
}
if err := daemon.clusterProvider.UpdateAttachment(networkName, containerID, config); err != nil {
return daemon.clusterProvider.UpdateAttachment(networkID, containerID, config)
}
return nil
}
// WaitForDetachment makes the cluster manager wait for detachment of
// the container from the network.
func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error {
if daemon.clusterProvider == nil {
return fmt.Errorf("cluster provider is not initialized")
}
return daemon.clusterProvider.WaitForDetachment(ctx, networkName, networkID, taskID, containerID)
}
// CreateManagedNetwork creates an agent network.
func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error {
_, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true)
return err
}
// CreateNetwork creates a network with the given name, driver and other optional parameters
func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) {
resp, err := daemon.createNetwork(create, "", false)
if err != nil {
return nil, err
}
return resp, err
}
func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
if runconfig.IsPreDefinedNetwork(create.Name) {
return nil, PredefinedNetworkError(create.Name)
}
var warning string
nw, err := daemon.GetNetworkByName(create.Name)
if err != nil {
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
return nil, err
}
}
if nw != nil {
// check if user defined CheckDuplicate, if set true, return err
// otherwise prepare a warning message
if create.CheckDuplicate {
if !agent || nw.Info().Dynamic() {
return nil, libnetwork.NetworkNameError(create.Name)
}
}
warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID())
}
c := daemon.netController
driver := create.Driver
if driver == "" {
driver = c.Config().Daemon.DefaultDriver
}
nwOptions := []libnetwork.NetworkOption{
libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6),
libnetwork.NetworkOptionDriverOpts(create.Options),
libnetwork.NetworkOptionLabels(create.Labels),
libnetwork.NetworkOptionAttachable(create.Attachable),
libnetwork.NetworkOptionIngress(create.Ingress),
libnetwork.NetworkOptionScope(create.Scope),
}
if create.ConfigOnly {
nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigOnly())
}
if create.IPAM != nil {
ipam := create.IPAM
v4Conf, v6Conf, err := getIpamConfig(ipam.Config)
if err != nil {
return nil, err
}
nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options))
}
if create.Internal {
nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork())
}
if agent {
nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic())
nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false))
}
if create.ConfigFrom != nil {
nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network))
}
if agent && driver == "overlay" {
nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id)
if !exists {
return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id)
}
nwOptions = append(nwOptions, libnetwork.NetworkOptionLBEndpoint(nodeIP))
}
n, err := c.NewNetwork(driver, create.Name, id, nwOptions...)
if err != nil {
if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok {
//nolint: golint
return nil, errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.")
}
return nil, err
}
daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.Acquire)
if create.IPAM != nil {
daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.Acquire)
}
daemon.LogNetworkEvent(n, "create")
return &types.NetworkCreateResponse{
ID: n.ID(),
Warning: warning,
}, nil
}
func (daemon *Daemon) pluginRefCount(driver, capability string, mode int) {
var builtinDrivers []string
if capability == driverapi.NetworkPluginEndpointType {
builtinDrivers = daemon.netController.BuiltinDrivers()
} else if capability == ipamapi.PluginEndpointType {
builtinDrivers = daemon.netController.BuiltinIPAMDrivers()
}
for _, d := range builtinDrivers {
if d == driver {
return
}
}
if daemon.PluginStore != nil {
_, err := daemon.PluginStore.Get(driver, capability, mode)
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation")
}
}
}
func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) {
ipamV4Cfg := []*libnetwork.IpamConf{}
ipamV6Cfg := []*libnetwork.IpamConf{}
for _, d := range data {
iCfg := libnetwork.IpamConf{}
iCfg.PreferredPool = d.Subnet
iCfg.SubPool = d.IPRange
iCfg.Gateway = d.Gateway
iCfg.AuxAddresses = d.AuxAddress
ip, _, err := net.ParseCIDR(d.Subnet)
if err != nil {
return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err)
}
if ip.To4() != nil {
ipamV4Cfg = append(ipamV4Cfg, &iCfg)
} else {
ipamV6Cfg = append(ipamV6Cfg, &iCfg)
}
}
return ipamV4Cfg, ipamV6Cfg, nil
}
// UpdateContainerServiceConfig updates a service configuration.
func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error {
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
ctr.NetworkSettings.Service = serviceConfig
return nil
}
// ConnectContainerToNetwork connects the given container to the given
// network. If either cannot be found, an err is returned. If the
// network cannot be set up, an err is returned.
func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error {
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
return daemon.ConnectToNetwork(ctr, networkName, endpointConfig)
}
// DisconnectContainerFromNetwork disconnects the given container from
// the given network. If either cannot be found, an err is returned.
func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error {
ctr, err := daemon.GetContainer(containerName)
if err != nil {
if force {
return daemon.ForceEndpointDelete(containerName, networkName)
}
return err
}
return daemon.DisconnectFromNetwork(ctr, networkName, force)
}
// GetNetworkDriverList returns the list of plugins drivers
// registered for network.
func (daemon *Daemon) GetNetworkDriverList() []string {
if !daemon.NetworkControllerEnabled() {
return nil
}
pluginList := daemon.netController.BuiltinDrivers()
managedPlugins := daemon.PluginStore.GetAllManagedPluginsByCap(driverapi.NetworkPluginEndpointType)
for _, plugin := range managedPlugins {
pluginList = append(pluginList, plugin.Name())
}
pluginMap := make(map[string]bool)
for _, plugin := range pluginList {
pluginMap[plugin] = true
}
networks := daemon.netController.Networks()
for _, nw := range networks {
if !pluginMap[nw.Type()] {
pluginList = append(pluginList, nw.Type())
pluginMap[nw.Type()] = true
}
}
sort.Strings(pluginList)
return pluginList
}
// DeleteManagedNetwork deletes an agent network.
// The requirement of networkID is enforced.
func (daemon *Daemon) DeleteManagedNetwork(networkID string) error {
n, err := daemon.GetNetworkByID(networkID)
if err != nil {
return err
}
return daemon.deleteNetwork(n, true)
}
// DeleteNetwork destroys a network unless it's one of docker's predefined networks.
func (daemon *Daemon) DeleteNetwork(networkID string) error {
n, err := daemon.GetNetworkByID(networkID)
if err != nil {
return errors.Wrap(err, "could not find network by ID")
}
return daemon.deleteNetwork(n, false)
}
func (daemon *Daemon) deleteNetwork(nw libnetwork.Network, dynamic bool) error {
if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic {
err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name())
return errdefs.Forbidden(err)
}
if dynamic && !nw.Info().Dynamic() {
if runconfig.IsPreDefinedNetwork(nw.Name()) {
// Predefined networks now support swarm services. Make this
// a no-op when cluster requests to remove the predefined network.
return nil
}
err := fmt.Errorf("%s is not a dynamic network", nw.Name())
return errdefs.Forbidden(err)
}
if err := nw.Delete(); err != nil {
return errors.Wrap(err, "error while removing network")
}
// If this is not a configuration only network, we need to
// update the corresponding remote drivers' reference counts
if !nw.Info().ConfigOnly() {
daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.Release)
ipamType, _, _, _ := nw.Info().IpamConfig()
daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.Release)
daemon.LogNetworkEvent(nw, "destroy")
}
return nil
}
// GetNetworks returns a list of all networks
func (daemon *Daemon) GetNetworks(filter filters.Args, config types.NetworkListConfig) ([]types.NetworkResource, error) {
networks := daemon.getAllNetworks()
list := make([]types.NetworkResource, 0, len(networks))
var idx map[string]libnetwork.Network
if config.Detailed {
idx = make(map[string]libnetwork.Network)
}
for _, n := range networks {
nr := buildNetworkResource(n)
list = append(list, nr)
if config.Detailed {
idx[nr.ID] = n
}
}
var err error
list, err = internalnetwork.FilterNetworks(list, filter)
if err != nil {
return nil, err
}
if config.Detailed {
for i := range list {
np := &list[i]
buildDetailedNetworkResources(np, idx[np.ID], config.Verbose)
list[i] = *np
}
}
return list, nil
}
func buildNetworkResource(nw libnetwork.Network) types.NetworkResource {
r := types.NetworkResource{}
if nw == nil {
return r
}
info := nw.Info()
r.Name = nw.Name()
r.ID = nw.ID()
r.Created = info.Created()
r.Scope = info.Scope()
r.Driver = nw.Type()
r.EnableIPv6 = info.IPv6Enabled()
r.Internal = info.Internal()
r.Attachable = info.Attachable()
r.Ingress = info.Ingress()
r.Options = info.DriverOptions()
r.Containers = make(map[string]types.EndpointResource)
buildIpamResources(&r, info)
r.Labels = info.Labels()
r.ConfigOnly = info.ConfigOnly()
if cn := info.ConfigFrom(); cn != "" {
r.ConfigFrom = network.ConfigReference{Network: cn}
}
peers := info.Peers()
if len(peers) != 0 {
r.Peers = buildPeerInfoResources(peers)
}
return r
}
func buildDetailedNetworkResources(r *types.NetworkResource, nw libnetwork.Network, verbose bool) {
if nw == nil {
return
}
epl := nw.Endpoints()
for _, e := range epl {
ei := e.Info()
if ei == nil {
continue
}
sb := ei.Sandbox()
tmpID := e.ID()
key := "ep-" + tmpID
if sb != nil {
key = sb.ContainerID()
}
r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei)
}
if !verbose {
return
}
services := nw.Info().Services()
r.Services = make(map[string]network.ServiceInfo)
for name, service := range services {
tasks := []network.Task{}
for _, t := range service.Tasks {
tasks = append(tasks, network.Task{
Name: t.Name,
EndpointID: t.EndpointID,
EndpointIP: t.EndpointIP,
Info: t.Info,
})
}
r.Services[name] = network.ServiceInfo{
VIP: service.VIP,
Ports: service.Ports,
Tasks: tasks,
LocalLBIndex: service.LocalLBIndex,
}
}
}
func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo {
peerInfo := make([]network.PeerInfo, 0, len(peers))
for _, peer := range peers {
peerInfo = append(peerInfo, network.PeerInfo{
Name: peer.Name,
IP: peer.IP,
})
}
return peerInfo
}
func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) {
id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig()
ipv4Info, ipv6Info := nwInfo.IpamInfo()
r.IPAM.Driver = id
r.IPAM.Options = opts
r.IPAM.Config = []network.IPAMConfig{}
for _, ip4 := range ipv4conf {
if ip4.PreferredPool == "" {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip4.PreferredPool
iData.IPRange = ip4.SubPool
iData.Gateway = ip4.Gateway
iData.AuxAddress = ip4.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if len(r.IPAM.Config) == 0 {
for _, ip4Info := range ipv4Info {
iData := network.IPAMConfig{}
iData.Subnet = ip4Info.IPAMData.Pool.String()
if ip4Info.IPAMData.Gateway != nil {
iData.Gateway = ip4Info.IPAMData.Gateway.IP.String()
}
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
hasIpv6Conf := false
for _, ip6 := range ipv6conf {
if ip6.PreferredPool == "" {
continue
}
hasIpv6Conf = true
iData := network.IPAMConfig{}
iData.Subnet = ip6.PreferredPool
iData.IPRange = ip6.SubPool
iData.Gateway = ip6.Gateway
iData.AuxAddress = ip6.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if !hasIpv6Conf {
for _, ip6Info := range ipv6Info {
if ip6Info.IPAMData.Pool == nil {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip6Info.IPAMData.Pool.String()
iData.Gateway = ip6Info.IPAMData.Gateway.String()
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
}
func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource {
er := types.EndpointResource{}
er.EndpointID = id
er.Name = name
ei := info
if ei == nil {
return er
}
if iface := ei.Iface(); iface != nil {
if mac := iface.MacAddress(); mac != nil {
er.MacAddress = mac.String()
}
if ip := iface.Address(); ip != nil && len(ip.IP) > 0 {
er.IPv4Address = ip.String()
}
if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 {
er.IPv6Address = ipv6.String()
}
}
return er
}
// clearAttachableNetworks removes the attachable networks
// after disconnecting any connected container
func (daemon *Daemon) clearAttachableNetworks() {
for _, n := range daemon.getAllNetworks() {
if !n.Info().Attachable() {
continue
}
for _, ep := range n.Endpoints() {
epInfo := ep.Info()
if epInfo == nil {
continue
}
sb := epInfo.Sandbox()
if sb == nil {
continue
}
containerID := sb.ContainerID()
if err := daemon.DisconnectContainerFromNetwork(containerID, n.ID(), true); err != nil {
logrus.Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v",
containerID, n.Name(), err)
}
}
if err := daemon.DeleteManagedNetwork(n.ID()); err != nil {
logrus.Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err)
}
}
}
// buildCreateEndpointOptions builds endpoint options from a given network.
func buildCreateEndpointOptions(c *container.Container, n libnetwork.Network, epConfig *network.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) {
var (
bindings = make(nat.PortMap)
pbList []networktypes.PortBinding
exposeList []networktypes.TransportPort
createOptions []libnetwork.EndpointOption
)
defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName()
if (!serviceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) ||
c.NetworkSettings.IsAnonymousEndpoint {
createOptions = append(createOptions, libnetwork.CreateOptionAnonymous())
}
if epConfig != nil {
ipam := epConfig.IPAMConfig
if ipam != nil {
var (
ipList []net.IP
ip, ip6, linkip net.IP
)
for _, ips := range ipam.LinkLocalIPs {
if linkip = net.ParseIP(ips); linkip == nil && ips != "" {
return nil, errors.Errorf("Invalid link-local IP address: %s", ipam.LinkLocalIPs)
}
ipList = append(ipList, linkip)
}
if ip = net.ParseIP(ipam.IPv4Address); ip == nil && ipam.IPv4Address != "" {
return nil, errors.Errorf("Invalid IPv4 address: %s)", ipam.IPv4Address)
}
if ip6 = net.ParseIP(ipam.IPv6Address); ip6 == nil && ipam.IPv6Address != "" {
return nil, errors.Errorf("Invalid IPv6 address: %s)", ipam.IPv6Address)
}
createOptions = append(createOptions,
libnetwork.CreateOptionIpam(ip, ip6, ipList, nil))
}
for _, alias := range epConfig.Aliases {
createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias))
}
for k, v := range epConfig.DriverOpts {
createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v}))
}
}
if c.NetworkSettings.Service != nil {
svcCfg := c.NetworkSettings.Service
var vip string
if svcCfg.VirtualAddresses[n.ID()] != nil {
vip = svcCfg.VirtualAddresses[n.ID()].IPv4
}
var portConfigs []*libnetwork.PortConfig
for _, portConfig := range svcCfg.ExposedPorts {
portConfigs = append(portConfigs, &libnetwork.PortConfig{
Name: portConfig.Name,
Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol),
TargetPort: portConfig.TargetPort,
PublishedPort: portConfig.PublishedPort,
})
}
createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()]))
}
if !containertypes.NetworkMode(n.Name()).IsUserDefined() {
createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution())
}
// configs that are applicable only for the endpoint in the network
// to which container was connected to on docker run.
// Ideally all these network-specific endpoint configurations must be moved under
// container.NetworkSettings.Networks[n.Name()]
if n.Name() == c.HostConfig.NetworkMode.NetworkName() ||
(n.Name() == defaultNetName && c.HostConfig.NetworkMode.IsDefault()) {
if c.Config.MacAddress != "" {
mac, err := net.ParseMAC(c.Config.MacAddress)
if err != nil {
return nil, err
}
genericOption := options.Generic{
netlabel.MacAddress: mac,
}
createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption))
}
}
// Port-mapping rules belong to the container & applicable only to non-internal networks
portmaps := getSandboxPortMapInfo(sb)
if n.Info().Internal() || len(portmaps) > 0 {
return createOptions, nil
}
if c.HostConfig.PortBindings != nil {
for p, b := range c.HostConfig.PortBindings {
bindings[p] = []nat.PortBinding{}
for _, bb := range b {
bindings[p] = append(bindings[p], nat.PortBinding{
HostIP: bb.HostIP,
HostPort: bb.HostPort,
})
}
}
}
portSpecs := c.Config.ExposedPorts
ports := make([]nat.Port, len(portSpecs))
var i int
for p := range portSpecs {
ports[i] = p
i++
}
nat.SortPortMap(ports, bindings)
for _, port := range ports {
expose := networktypes.TransportPort{}
expose.Proto = networktypes.ParseProtocol(port.Proto())
expose.Port = uint16(port.Int())
exposeList = append(exposeList, expose)
pb := networktypes.PortBinding{Port: expose.Port, Proto: expose.Proto}
binding := bindings[port]
for i := 0; i < len(binding); i++ {
pbCopy := pb.GetCopy()
newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort))
var portStart, portEnd int
if err == nil {
portStart, portEnd, err = newP.Range()
}
if err != nil {
return nil, errors.Wrapf(err, "Error parsing HostPort value (%s)", binding[i].HostPort)
}
pbCopy.HostPort = uint16(portStart)
pbCopy.HostPortEnd = uint16(portEnd)
pbCopy.HostIP = net.ParseIP(binding[i].HostIP)
pbList = append(pbList, pbCopy)
}
if c.HostConfig.PublishAllPorts && len(binding) == 0 {
pbList = append(pbList, pb)
}
}
var dns []string
if len(c.HostConfig.DNS) > 0 {
dns = c.HostConfig.DNS
} else if len(daemonDNS) > 0 {
dns = daemonDNS
}
if len(dns) > 0 {
createOptions = append(createOptions,
libnetwork.CreateOptionDNS(dns))
}
createOptions = append(createOptions,
libnetwork.CreateOptionPortMapping(pbList),
libnetwork.CreateOptionExposedPorts(exposeList))
return createOptions, nil
}
// getSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox
func getSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap {
pm := nat.PortMap{}
if sb == nil {
return pm
}
for _, ep := range sb.Endpoints() {
pm, _ = getEndpointPortMapInfo(ep)
if len(pm) > 0 {
break
}
}
return pm
}
func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) {
pm := nat.PortMap{}
driverInfo, err := ep.DriverInfo()
if err != nil {
return pm, err
}
if driverInfo == nil {
// It is not an error for epInfo to be nil
return pm, nil
}
if expData, ok := driverInfo[netlabel.ExposedPorts]; ok {
if exposedPorts, ok := expData.([]networktypes.TransportPort); ok {
for _, tp := range exposedPorts {
natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port)))
if err != nil {
return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err)
}
pm[natPort] = nil
}
}
}
mapData, ok := driverInfo[netlabel.PortMap]
if !ok {
return pm, nil
}
if portMapping, ok := mapData.([]networktypes.PortBinding); ok {
for _, pp := range portMapping {
natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port)))
if err != nil {
return pm, err
}
natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
pm[natPort] = append(pm[natPort], natBndg)
}
}
return pm, nil
}
// buildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint.
func buildEndpointInfo(networkSettings *internalnetwork.Settings, n libnetwork.Network, ep libnetwork.Endpoint) error {
if ep == nil {
return errors.New("endpoint cannot be nil")
}
if networkSettings == nil {
return errors.New("network cannot be nil")
}
epInfo := ep.Info()
if epInfo == nil {
// It is not an error to get an empty endpoint info
return nil
}
if _, ok := networkSettings.Networks[n.Name()]; !ok {
networkSettings.Networks[n.Name()] = &internalnetwork.EndpointSettings{
EndpointSettings: &network.EndpointSettings{},
}
}
networkSettings.Networks[n.Name()].NetworkID = n.ID()
networkSettings.Networks[n.Name()].EndpointID = ep.ID()
iface := epInfo.Iface()
if iface == nil {
return nil
}
if iface.MacAddress() != nil {
networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String()
}
if iface.Address() != nil {
ones, _ := iface.Address().Mask.Size()
networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String()
networkSettings.Networks[n.Name()].IPPrefixLen = ones
}
if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil {
onesv6, _ := iface.AddressIPv6().Mask.Size()
networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String()
networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6
}
return nil
}
// buildJoinOptions builds endpoint Join options from a given network.
func buildJoinOptions(networkSettings *internalnetwork.Settings, n interface {
Name() string
}) ([]libnetwork.EndpointOption, error) {
var joinOptions []libnetwork.EndpointOption
if epConfig, ok := networkSettings.Networks[n.Name()]; ok {
for _, str := range epConfig.Links {
name, alias, err := opts.ParseLink(str)
if err != nil {
return nil, err
}
joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias))
}
for k, v := range epConfig.DriverOpts {
joinOptions = append(joinOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v}))
}
}
return joinOptions, nil
}
| thaJeztah | 5e62ca1a05f2aab49066d91c9070cb7dc7ad2461 | 2773f81aa5e9e34733675a7aa7e4219391caccb0 | Is it worth switching `n.ID` here to `np.ID` and ditching `n` completely? | tianon | 4,750 |
moby/moby | 42,445 | [testing] ~update~ fix linting issues found by golangci-lint v1.40.1 | Splitting this from https://github.com/moby/moby/pull/42303, which needs some work (use a custom action instead of the default one; similar to buildkit CI)
This ~updates~ fixes linting issues found by the latest golangci-lint (v1.40.1) | null | 2021-05-31 12:10:41+00:00 | 2021-06-16 20:15:01+00:00 | daemon/network.go | package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"net"
"sort"
"strconv"
"strings"
"sync"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/container"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
internalnetwork "github.com/docker/docker/daemon/network"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork"
lncluster "github.com/docker/docker/libnetwork/cluster"
"github.com/docker/docker/libnetwork/driverapi"
"github.com/docker/docker/libnetwork/ipamapi"
"github.com/docker/docker/libnetwork/netlabel"
"github.com/docker/docker/libnetwork/networkdb"
"github.com/docker/docker/libnetwork/options"
networktypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// PredefinedNetworkError is returned when user tries to create predefined network that already exists.
type PredefinedNetworkError string
func (pnr PredefinedNetworkError) Error() string {
return fmt.Sprintf("operation is not permitted on predefined %s network ", string(pnr))
}
// Forbidden denotes the type of this error
func (pnr PredefinedNetworkError) Forbidden() {}
// NetworkControllerEnabled checks if the networking stack is enabled.
// This feature depends on OS primitives and it's disabled in systems like Windows.
func (daemon *Daemon) NetworkControllerEnabled() bool {
return daemon.netController != nil
}
// NetworkController returns the network controller created by the daemon.
func (daemon *Daemon) NetworkController() libnetwork.NetworkController {
return daemon.netController
}
// FindNetwork returns a network based on:
// 1. Full ID
// 2. Full Name
// 3. Partial ID
// as long as there is no ambiguity
func (daemon *Daemon) FindNetwork(term string) (libnetwork.Network, error) {
listByFullName := []libnetwork.Network{}
listByPartialID := []libnetwork.Network{}
for _, nw := range daemon.getAllNetworks() {
if nw.ID() == term {
return nw, nil
}
if nw.Name() == term {
listByFullName = append(listByFullName, nw)
}
if strings.HasPrefix(nw.ID(), term) {
listByPartialID = append(listByPartialID, nw)
}
}
switch {
case len(listByFullName) == 1:
return listByFullName[0], nil
case len(listByFullName) > 1:
return nil, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found on name)", term, len(listByFullName)))
case len(listByPartialID) == 1:
return listByPartialID[0], nil
case len(listByPartialID) > 1:
return nil, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
}
// Be very careful to change the error type here, the
// libnetwork.ErrNoSuchNetwork error is used by the controller
// to retry the creation of the network as managed through the swarm manager
return nil, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
}
// GetNetworkByID function returns a network whose ID matches the given ID.
// It fails with an error if no matching network is found.
func (daemon *Daemon) GetNetworkByID(id string) (libnetwork.Network, error) {
c := daemon.netController
if c == nil {
return nil, errors.Wrap(libnetwork.ErrNoSuchNetwork(id), "netcontroller is nil")
}
return c.NetworkByID(id)
}
// GetNetworkByName function returns a network for a given network name.
// If no network name is given, the default network is returned.
func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) {
c := daemon.netController
if c == nil {
return nil, libnetwork.ErrNoSuchNetwork(name)
}
if name == "" {
name = c.Config().Daemon.DefaultNetwork
}
return c.NetworkByName(name)
}
// GetNetworksByIDPrefix returns a list of networks whose ID partially matches zero or more networks
func (daemon *Daemon) GetNetworksByIDPrefix(partialID string) []libnetwork.Network {
c := daemon.netController
if c == nil {
return nil
}
list := []libnetwork.Network{}
l := func(nw libnetwork.Network) bool {
if strings.HasPrefix(nw.ID(), partialID) {
list = append(list, nw)
}
return false
}
c.WalkNetworks(l)
return list
}
// getAllNetworks returns a list containing all networks
func (daemon *Daemon) getAllNetworks() []libnetwork.Network {
c := daemon.netController
if c == nil {
return nil
}
return c.Networks()
}
type ingressJob struct {
create *clustertypes.NetworkCreateRequest
ip net.IP
jobDone chan struct{}
}
var (
ingressWorkerOnce sync.Once
ingressJobsChannel chan *ingressJob
ingressID string
)
func (daemon *Daemon) startIngressWorker() {
ingressJobsChannel = make(chan *ingressJob, 100)
go func() {
// nolint: gosimple
for {
select {
case r := <-ingressJobsChannel:
if r.create != nil {
daemon.setupIngress(r.create, r.ip, ingressID)
ingressID = r.create.ID
} else {
daemon.releaseIngress(ingressID)
ingressID = ""
}
close(r.jobDone)
}
}
}()
}
// enqueueIngressJob adds a ingress add/rm request to the worker queue.
// It guarantees the worker is started.
func (daemon *Daemon) enqueueIngressJob(job *ingressJob) {
ingressWorkerOnce.Do(daemon.startIngressWorker)
ingressJobsChannel <- job
}
// SetupIngress setups ingress networking.
// The function returns a channel which will signal the caller when the programming is completed.
func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) (<-chan struct{}, error) {
ip, _, err := net.ParseCIDR(nodeIP)
if err != nil {
return nil, err
}
done := make(chan struct{})
daemon.enqueueIngressJob(&ingressJob{&create, ip, done})
return done, nil
}
// ReleaseIngress releases the ingress networking.
// The function returns a channel which will signal the caller when the programming is completed.
func (daemon *Daemon) ReleaseIngress() (<-chan struct{}, error) {
done := make(chan struct{})
daemon.enqueueIngressJob(&ingressJob{nil, nil, done})
return done, nil
}
func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) {
controller := daemon.netController
controller.AgentInitWait()
if staleID != "" && staleID != create.ID {
daemon.releaseIngress(staleID)
}
if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil {
// If it is any other error other than already
// exists error log error and return.
if _, ok := err.(libnetwork.NetworkNameError); !ok {
logrus.Errorf("Failed creating ingress network: %v", err)
return
}
// Otherwise continue down the call to create or recreate sandbox.
}
_, err := daemon.GetNetworkByID(create.ID)
if err != nil {
logrus.Errorf("Failed getting ingress network by id after creating: %v", err)
}
}
func (daemon *Daemon) releaseIngress(id string) {
controller := daemon.netController
if id == "" {
return
}
n, err := controller.NetworkByID(id)
if err != nil {
logrus.Errorf("failed to retrieve ingress network %s: %v", id, err)
return
}
if err := n.Delete(libnetwork.NetworkDeleteOptionRemoveLB); err != nil {
logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err)
return
}
}
// SetNetworkBootstrapKeys sets the bootstrap keys.
func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error {
err := daemon.netController.SetKeys(keys)
if err == nil {
// Upon successful key setting dispatch the keys available event
daemon.cluster.SendClusterEvent(lncluster.EventNetworkKeysAvailable)
}
return err
}
// UpdateAttachment notifies the attacher about the attachment config.
func (daemon *Daemon) UpdateAttachment(networkName, networkID, containerID string, config *network.NetworkingConfig) error {
if daemon.clusterProvider == nil {
return fmt.Errorf("cluster provider is not initialized")
}
if err := daemon.clusterProvider.UpdateAttachment(networkName, containerID, config); err != nil {
return daemon.clusterProvider.UpdateAttachment(networkID, containerID, config)
}
return nil
}
// WaitForDetachment makes the cluster manager wait for detachment of
// the container from the network.
func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error {
if daemon.clusterProvider == nil {
return fmt.Errorf("cluster provider is not initialized")
}
return daemon.clusterProvider.WaitForDetachment(ctx, networkName, networkID, taskID, containerID)
}
// CreateManagedNetwork creates an agent network.
func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error {
_, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true)
return err
}
// CreateNetwork creates a network with the given name, driver and other optional parameters
func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) {
resp, err := daemon.createNetwork(create, "", false)
if err != nil {
return nil, err
}
return resp, err
}
func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
if runconfig.IsPreDefinedNetwork(create.Name) {
return nil, PredefinedNetworkError(create.Name)
}
var warning string
nw, err := daemon.GetNetworkByName(create.Name)
if err != nil {
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
return nil, err
}
}
if nw != nil {
// check if user defined CheckDuplicate, if set true, return err
// otherwise prepare a warning message
if create.CheckDuplicate {
if !agent || nw.Info().Dynamic() {
return nil, libnetwork.NetworkNameError(create.Name)
}
}
warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID())
}
c := daemon.netController
driver := create.Driver
if driver == "" {
driver = c.Config().Daemon.DefaultDriver
}
nwOptions := []libnetwork.NetworkOption{
libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6),
libnetwork.NetworkOptionDriverOpts(create.Options),
libnetwork.NetworkOptionLabels(create.Labels),
libnetwork.NetworkOptionAttachable(create.Attachable),
libnetwork.NetworkOptionIngress(create.Ingress),
libnetwork.NetworkOptionScope(create.Scope),
}
if create.ConfigOnly {
nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigOnly())
}
if create.IPAM != nil {
ipam := create.IPAM
v4Conf, v6Conf, err := getIpamConfig(ipam.Config)
if err != nil {
return nil, err
}
nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options))
}
if create.Internal {
nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork())
}
if agent {
nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic())
nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false))
}
if create.ConfigFrom != nil {
nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network))
}
if agent && driver == "overlay" {
nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id)
if !exists {
return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id)
}
nwOptions = append(nwOptions, libnetwork.NetworkOptionLBEndpoint(nodeIP))
}
n, err := c.NewNetwork(driver, create.Name, id, nwOptions...)
if err != nil {
if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok {
// nolint: golint
return nil, errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.")
}
return nil, err
}
daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.Acquire)
if create.IPAM != nil {
daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.Acquire)
}
daemon.LogNetworkEvent(n, "create")
return &types.NetworkCreateResponse{
ID: n.ID(),
Warning: warning,
}, nil
}
func (daemon *Daemon) pluginRefCount(driver, capability string, mode int) {
var builtinDrivers []string
if capability == driverapi.NetworkPluginEndpointType {
builtinDrivers = daemon.netController.BuiltinDrivers()
} else if capability == ipamapi.PluginEndpointType {
builtinDrivers = daemon.netController.BuiltinIPAMDrivers()
}
for _, d := range builtinDrivers {
if d == driver {
return
}
}
if daemon.PluginStore != nil {
_, err := daemon.PluginStore.Get(driver, capability, mode)
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation")
}
}
}
func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) {
ipamV4Cfg := []*libnetwork.IpamConf{}
ipamV6Cfg := []*libnetwork.IpamConf{}
for _, d := range data {
iCfg := libnetwork.IpamConf{}
iCfg.PreferredPool = d.Subnet
iCfg.SubPool = d.IPRange
iCfg.Gateway = d.Gateway
iCfg.AuxAddresses = d.AuxAddress
ip, _, err := net.ParseCIDR(d.Subnet)
if err != nil {
return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err)
}
if ip.To4() != nil {
ipamV4Cfg = append(ipamV4Cfg, &iCfg)
} else {
ipamV6Cfg = append(ipamV6Cfg, &iCfg)
}
}
return ipamV4Cfg, ipamV6Cfg, nil
}
// UpdateContainerServiceConfig updates a service configuration.
func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error {
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
ctr.NetworkSettings.Service = serviceConfig
return nil
}
// ConnectContainerToNetwork connects the given container to the given
// network. If either cannot be found, an err is returned. If the
// network cannot be set up, an err is returned.
func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error {
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
return daemon.ConnectToNetwork(ctr, networkName, endpointConfig)
}
// DisconnectContainerFromNetwork disconnects the given container from
// the given network. If either cannot be found, an err is returned.
func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error {
ctr, err := daemon.GetContainer(containerName)
if err != nil {
if force {
return daemon.ForceEndpointDelete(containerName, networkName)
}
return err
}
return daemon.DisconnectFromNetwork(ctr, networkName, force)
}
// GetNetworkDriverList returns the list of plugins drivers
// registered for network.
func (daemon *Daemon) GetNetworkDriverList() []string {
if !daemon.NetworkControllerEnabled() {
return nil
}
pluginList := daemon.netController.BuiltinDrivers()
managedPlugins := daemon.PluginStore.GetAllManagedPluginsByCap(driverapi.NetworkPluginEndpointType)
for _, plugin := range managedPlugins {
pluginList = append(pluginList, plugin.Name())
}
pluginMap := make(map[string]bool)
for _, plugin := range pluginList {
pluginMap[plugin] = true
}
networks := daemon.netController.Networks()
for _, nw := range networks {
if !pluginMap[nw.Type()] {
pluginList = append(pluginList, nw.Type())
pluginMap[nw.Type()] = true
}
}
sort.Strings(pluginList)
return pluginList
}
// DeleteManagedNetwork deletes an agent network.
// The requirement of networkID is enforced.
func (daemon *Daemon) DeleteManagedNetwork(networkID string) error {
n, err := daemon.GetNetworkByID(networkID)
if err != nil {
return err
}
return daemon.deleteNetwork(n, true)
}
// DeleteNetwork destroys a network unless it's one of docker's predefined networks.
func (daemon *Daemon) DeleteNetwork(networkID string) error {
n, err := daemon.GetNetworkByID(networkID)
if err != nil {
return errors.Wrap(err, "could not find network by ID")
}
return daemon.deleteNetwork(n, false)
}
func (daemon *Daemon) deleteNetwork(nw libnetwork.Network, dynamic bool) error {
if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic {
err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name())
return errdefs.Forbidden(err)
}
if dynamic && !nw.Info().Dynamic() {
if runconfig.IsPreDefinedNetwork(nw.Name()) {
// Predefined networks now support swarm services. Make this
// a no-op when cluster requests to remove the predefined network.
return nil
}
err := fmt.Errorf("%s is not a dynamic network", nw.Name())
return errdefs.Forbidden(err)
}
if err := nw.Delete(); err != nil {
return errors.Wrap(err, "error while removing network")
}
// If this is not a configuration only network, we need to
// update the corresponding remote drivers' reference counts
if !nw.Info().ConfigOnly() {
daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.Release)
ipamType, _, _, _ := nw.Info().IpamConfig()
daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.Release)
daemon.LogNetworkEvent(nw, "destroy")
}
return nil
}
// GetNetworks returns a list of all networks
func (daemon *Daemon) GetNetworks(filter filters.Args, config types.NetworkListConfig) ([]types.NetworkResource, error) {
networks := daemon.getAllNetworks()
list := make([]types.NetworkResource, 0, len(networks))
var idx map[string]libnetwork.Network
if config.Detailed {
idx = make(map[string]libnetwork.Network)
}
for _, n := range networks {
nr := buildNetworkResource(n)
list = append(list, nr)
if config.Detailed {
idx[nr.ID] = n
}
}
var err error
list, err = internalnetwork.FilterNetworks(list, filter)
if err != nil {
return nil, err
}
if config.Detailed {
for i, n := range list {
np := &n
buildDetailedNetworkResources(np, idx[n.ID], config.Verbose)
list[i] = *np
}
}
return list, nil
}
func buildNetworkResource(nw libnetwork.Network) types.NetworkResource {
r := types.NetworkResource{}
if nw == nil {
return r
}
info := nw.Info()
r.Name = nw.Name()
r.ID = nw.ID()
r.Created = info.Created()
r.Scope = info.Scope()
r.Driver = nw.Type()
r.EnableIPv6 = info.IPv6Enabled()
r.Internal = info.Internal()
r.Attachable = info.Attachable()
r.Ingress = info.Ingress()
r.Options = info.DriverOptions()
r.Containers = make(map[string]types.EndpointResource)
buildIpamResources(&r, info)
r.Labels = info.Labels()
r.ConfigOnly = info.ConfigOnly()
if cn := info.ConfigFrom(); cn != "" {
r.ConfigFrom = network.ConfigReference{Network: cn}
}
peers := info.Peers()
if len(peers) != 0 {
r.Peers = buildPeerInfoResources(peers)
}
return r
}
func buildDetailedNetworkResources(r *types.NetworkResource, nw libnetwork.Network, verbose bool) {
if nw == nil {
return
}
epl := nw.Endpoints()
for _, e := range epl {
ei := e.Info()
if ei == nil {
continue
}
sb := ei.Sandbox()
tmpID := e.ID()
key := "ep-" + tmpID
if sb != nil {
key = sb.ContainerID()
}
r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei)
}
if !verbose {
return
}
services := nw.Info().Services()
r.Services = make(map[string]network.ServiceInfo)
for name, service := range services {
tasks := []network.Task{}
for _, t := range service.Tasks {
tasks = append(tasks, network.Task{
Name: t.Name,
EndpointID: t.EndpointID,
EndpointIP: t.EndpointIP,
Info: t.Info,
})
}
r.Services[name] = network.ServiceInfo{
VIP: service.VIP,
Ports: service.Ports,
Tasks: tasks,
LocalLBIndex: service.LocalLBIndex,
}
}
}
func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo {
peerInfo := make([]network.PeerInfo, 0, len(peers))
for _, peer := range peers {
peerInfo = append(peerInfo, network.PeerInfo{
Name: peer.Name,
IP: peer.IP,
})
}
return peerInfo
}
func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) {
id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig()
ipv4Info, ipv6Info := nwInfo.IpamInfo()
r.IPAM.Driver = id
r.IPAM.Options = opts
r.IPAM.Config = []network.IPAMConfig{}
for _, ip4 := range ipv4conf {
if ip4.PreferredPool == "" {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip4.PreferredPool
iData.IPRange = ip4.SubPool
iData.Gateway = ip4.Gateway
iData.AuxAddress = ip4.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if len(r.IPAM.Config) == 0 {
for _, ip4Info := range ipv4Info {
iData := network.IPAMConfig{}
iData.Subnet = ip4Info.IPAMData.Pool.String()
if ip4Info.IPAMData.Gateway != nil {
iData.Gateway = ip4Info.IPAMData.Gateway.IP.String()
}
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
hasIpv6Conf := false
for _, ip6 := range ipv6conf {
if ip6.PreferredPool == "" {
continue
}
hasIpv6Conf = true
iData := network.IPAMConfig{}
iData.Subnet = ip6.PreferredPool
iData.IPRange = ip6.SubPool
iData.Gateway = ip6.Gateway
iData.AuxAddress = ip6.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if !hasIpv6Conf {
for _, ip6Info := range ipv6Info {
if ip6Info.IPAMData.Pool == nil {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip6Info.IPAMData.Pool.String()
iData.Gateway = ip6Info.IPAMData.Gateway.String()
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
}
func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource {
er := types.EndpointResource{}
er.EndpointID = id
er.Name = name
ei := info
if ei == nil {
return er
}
if iface := ei.Iface(); iface != nil {
if mac := iface.MacAddress(); mac != nil {
er.MacAddress = mac.String()
}
if ip := iface.Address(); ip != nil && len(ip.IP) > 0 {
er.IPv4Address = ip.String()
}
if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 {
er.IPv6Address = ipv6.String()
}
}
return er
}
// clearAttachableNetworks removes the attachable networks
// after disconnecting any connected container
func (daemon *Daemon) clearAttachableNetworks() {
for _, n := range daemon.getAllNetworks() {
if !n.Info().Attachable() {
continue
}
for _, ep := range n.Endpoints() {
epInfo := ep.Info()
if epInfo == nil {
continue
}
sb := epInfo.Sandbox()
if sb == nil {
continue
}
containerID := sb.ContainerID()
if err := daemon.DisconnectContainerFromNetwork(containerID, n.ID(), true); err != nil {
logrus.Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v",
containerID, n.Name(), err)
}
}
if err := daemon.DeleteManagedNetwork(n.ID()); err != nil {
logrus.Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err)
}
}
}
// buildCreateEndpointOptions builds endpoint options from a given network.
func buildCreateEndpointOptions(c *container.Container, n libnetwork.Network, epConfig *network.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) {
var (
bindings = make(nat.PortMap)
pbList []networktypes.PortBinding
exposeList []networktypes.TransportPort
createOptions []libnetwork.EndpointOption
)
defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName()
if (!serviceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) ||
c.NetworkSettings.IsAnonymousEndpoint {
createOptions = append(createOptions, libnetwork.CreateOptionAnonymous())
}
if epConfig != nil {
ipam := epConfig.IPAMConfig
if ipam != nil {
var (
ipList []net.IP
ip, ip6, linkip net.IP
)
for _, ips := range ipam.LinkLocalIPs {
if linkip = net.ParseIP(ips); linkip == nil && ips != "" {
return nil, errors.Errorf("Invalid link-local IP address: %s", ipam.LinkLocalIPs)
}
ipList = append(ipList, linkip)
}
if ip = net.ParseIP(ipam.IPv4Address); ip == nil && ipam.IPv4Address != "" {
return nil, errors.Errorf("Invalid IPv4 address: %s)", ipam.IPv4Address)
}
if ip6 = net.ParseIP(ipam.IPv6Address); ip6 == nil && ipam.IPv6Address != "" {
return nil, errors.Errorf("Invalid IPv6 address: %s)", ipam.IPv6Address)
}
createOptions = append(createOptions,
libnetwork.CreateOptionIpam(ip, ip6, ipList, nil))
}
for _, alias := range epConfig.Aliases {
createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias))
}
for k, v := range epConfig.DriverOpts {
createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v}))
}
}
if c.NetworkSettings.Service != nil {
svcCfg := c.NetworkSettings.Service
var vip string
if svcCfg.VirtualAddresses[n.ID()] != nil {
vip = svcCfg.VirtualAddresses[n.ID()].IPv4
}
var portConfigs []*libnetwork.PortConfig
for _, portConfig := range svcCfg.ExposedPorts {
portConfigs = append(portConfigs, &libnetwork.PortConfig{
Name: portConfig.Name,
Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol),
TargetPort: portConfig.TargetPort,
PublishedPort: portConfig.PublishedPort,
})
}
createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()]))
}
if !containertypes.NetworkMode(n.Name()).IsUserDefined() {
createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution())
}
// configs that are applicable only for the endpoint in the network
// to which container was connected to on docker run.
// Ideally all these network-specific endpoint configurations must be moved under
// container.NetworkSettings.Networks[n.Name()]
if n.Name() == c.HostConfig.NetworkMode.NetworkName() ||
(n.Name() == defaultNetName && c.HostConfig.NetworkMode.IsDefault()) {
if c.Config.MacAddress != "" {
mac, err := net.ParseMAC(c.Config.MacAddress)
if err != nil {
return nil, err
}
genericOption := options.Generic{
netlabel.MacAddress: mac,
}
createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption))
}
}
// Port-mapping rules belong to the container & applicable only to non-internal networks
portmaps := getSandboxPortMapInfo(sb)
if n.Info().Internal() || len(portmaps) > 0 {
return createOptions, nil
}
if c.HostConfig.PortBindings != nil {
for p, b := range c.HostConfig.PortBindings {
bindings[p] = []nat.PortBinding{}
for _, bb := range b {
bindings[p] = append(bindings[p], nat.PortBinding{
HostIP: bb.HostIP,
HostPort: bb.HostPort,
})
}
}
}
portSpecs := c.Config.ExposedPorts
ports := make([]nat.Port, len(portSpecs))
var i int
for p := range portSpecs {
ports[i] = p
i++
}
nat.SortPortMap(ports, bindings)
for _, port := range ports {
expose := networktypes.TransportPort{}
expose.Proto = networktypes.ParseProtocol(port.Proto())
expose.Port = uint16(port.Int())
exposeList = append(exposeList, expose)
pb := networktypes.PortBinding{Port: expose.Port, Proto: expose.Proto}
binding := bindings[port]
for i := 0; i < len(binding); i++ {
pbCopy := pb.GetCopy()
newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort))
var portStart, portEnd int
if err == nil {
portStart, portEnd, err = newP.Range()
}
if err != nil {
return nil, errors.Wrapf(err, "Error parsing HostPort value (%s)", binding[i].HostPort)
}
pbCopy.HostPort = uint16(portStart)
pbCopy.HostPortEnd = uint16(portEnd)
pbCopy.HostIP = net.ParseIP(binding[i].HostIP)
pbList = append(pbList, pbCopy)
}
if c.HostConfig.PublishAllPorts && len(binding) == 0 {
pbList = append(pbList, pb)
}
}
var dns []string
if len(c.HostConfig.DNS) > 0 {
dns = c.HostConfig.DNS
} else if len(daemonDNS) > 0 {
dns = daemonDNS
}
if len(dns) > 0 {
createOptions = append(createOptions,
libnetwork.CreateOptionDNS(dns))
}
createOptions = append(createOptions,
libnetwork.CreateOptionPortMapping(pbList),
libnetwork.CreateOptionExposedPorts(exposeList))
return createOptions, nil
}
// getSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox
func getSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap {
pm := nat.PortMap{}
if sb == nil {
return pm
}
for _, ep := range sb.Endpoints() {
pm, _ = getEndpointPortMapInfo(ep)
if len(pm) > 0 {
break
}
}
return pm
}
func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) {
pm := nat.PortMap{}
driverInfo, err := ep.DriverInfo()
if err != nil {
return pm, err
}
if driverInfo == nil {
// It is not an error for epInfo to be nil
return pm, nil
}
if expData, ok := driverInfo[netlabel.ExposedPorts]; ok {
if exposedPorts, ok := expData.([]networktypes.TransportPort); ok {
for _, tp := range exposedPorts {
natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port)))
if err != nil {
return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err)
}
pm[natPort] = nil
}
}
}
mapData, ok := driverInfo[netlabel.PortMap]
if !ok {
return pm, nil
}
if portMapping, ok := mapData.([]networktypes.PortBinding); ok {
for _, pp := range portMapping {
natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port)))
if err != nil {
return pm, err
}
natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
pm[natPort] = append(pm[natPort], natBndg)
}
}
return pm, nil
}
// buildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint.
func buildEndpointInfo(networkSettings *internalnetwork.Settings, n libnetwork.Network, ep libnetwork.Endpoint) error {
if ep == nil {
return errors.New("endpoint cannot be nil")
}
if networkSettings == nil {
return errors.New("network cannot be nil")
}
epInfo := ep.Info()
if epInfo == nil {
// It is not an error to get an empty endpoint info
return nil
}
if _, ok := networkSettings.Networks[n.Name()]; !ok {
networkSettings.Networks[n.Name()] = &internalnetwork.EndpointSettings{
EndpointSettings: &network.EndpointSettings{},
}
}
networkSettings.Networks[n.Name()].NetworkID = n.ID()
networkSettings.Networks[n.Name()].EndpointID = ep.ID()
iface := epInfo.Iface()
if iface == nil {
return nil
}
if iface.MacAddress() != nil {
networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String()
}
if iface.Address() != nil {
ones, _ := iface.Address().Mask.Size()
networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String()
networkSettings.Networks[n.Name()].IPPrefixLen = ones
}
if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil {
onesv6, _ := iface.AddressIPv6().Mask.Size()
networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String()
networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6
}
return nil
}
// buildJoinOptions builds endpoint Join options from a given network.
func buildJoinOptions(networkSettings *internalnetwork.Settings, n interface {
Name() string
}) ([]libnetwork.EndpointOption, error) {
var joinOptions []libnetwork.EndpointOption
if epConfig, ok := networkSettings.Networks[n.Name()]; ok {
for _, str := range epConfig.Links {
name, alias, err := opts.ParseLink(str)
if err != nil {
return nil, err
}
joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias))
}
for k, v := range epConfig.DriverOpts {
joinOptions = append(joinOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v}))
}
}
return joinOptions, nil
}
| package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"net"
"sort"
"strconv"
"strings"
"sync"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/container"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
internalnetwork "github.com/docker/docker/daemon/network"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libnetwork"
lncluster "github.com/docker/docker/libnetwork/cluster"
"github.com/docker/docker/libnetwork/driverapi"
"github.com/docker/docker/libnetwork/ipamapi"
"github.com/docker/docker/libnetwork/netlabel"
"github.com/docker/docker/libnetwork/networkdb"
"github.com/docker/docker/libnetwork/options"
networktypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// PredefinedNetworkError is returned when user tries to create predefined network that already exists.
type PredefinedNetworkError string
func (pnr PredefinedNetworkError) Error() string {
return fmt.Sprintf("operation is not permitted on predefined %s network ", string(pnr))
}
// Forbidden denotes the type of this error
func (pnr PredefinedNetworkError) Forbidden() {}
// NetworkControllerEnabled checks if the networking stack is enabled.
// This feature depends on OS primitives and it's disabled in systems like Windows.
func (daemon *Daemon) NetworkControllerEnabled() bool {
return daemon.netController != nil
}
// NetworkController returns the network controller created by the daemon.
func (daemon *Daemon) NetworkController() libnetwork.NetworkController {
return daemon.netController
}
// FindNetwork returns a network based on:
// 1. Full ID
// 2. Full Name
// 3. Partial ID
// as long as there is no ambiguity
func (daemon *Daemon) FindNetwork(term string) (libnetwork.Network, error) {
listByFullName := []libnetwork.Network{}
listByPartialID := []libnetwork.Network{}
for _, nw := range daemon.getAllNetworks() {
if nw.ID() == term {
return nw, nil
}
if nw.Name() == term {
listByFullName = append(listByFullName, nw)
}
if strings.HasPrefix(nw.ID(), term) {
listByPartialID = append(listByPartialID, nw)
}
}
switch {
case len(listByFullName) == 1:
return listByFullName[0], nil
case len(listByFullName) > 1:
return nil, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found on name)", term, len(listByFullName)))
case len(listByPartialID) == 1:
return listByPartialID[0], nil
case len(listByPartialID) > 1:
return nil, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID)))
}
// Be very careful to change the error type here, the
// libnetwork.ErrNoSuchNetwork error is used by the controller
// to retry the creation of the network as managed through the swarm manager
return nil, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term))
}
// GetNetworkByID function returns a network whose ID matches the given ID.
// It fails with an error if no matching network is found.
func (daemon *Daemon) GetNetworkByID(id string) (libnetwork.Network, error) {
c := daemon.netController
if c == nil {
return nil, errors.Wrap(libnetwork.ErrNoSuchNetwork(id), "netcontroller is nil")
}
return c.NetworkByID(id)
}
// GetNetworkByName function returns a network for a given network name.
// If no network name is given, the default network is returned.
func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) {
c := daemon.netController
if c == nil {
return nil, libnetwork.ErrNoSuchNetwork(name)
}
if name == "" {
name = c.Config().Daemon.DefaultNetwork
}
return c.NetworkByName(name)
}
// GetNetworksByIDPrefix returns a list of networks whose ID partially matches zero or more networks
func (daemon *Daemon) GetNetworksByIDPrefix(partialID string) []libnetwork.Network {
c := daemon.netController
if c == nil {
return nil
}
list := []libnetwork.Network{}
l := func(nw libnetwork.Network) bool {
if strings.HasPrefix(nw.ID(), partialID) {
list = append(list, nw)
}
return false
}
c.WalkNetworks(l)
return list
}
// getAllNetworks returns a list containing all networks
func (daemon *Daemon) getAllNetworks() []libnetwork.Network {
c := daemon.netController
if c == nil {
return nil
}
return c.Networks()
}
type ingressJob struct {
create *clustertypes.NetworkCreateRequest
ip net.IP
jobDone chan struct{}
}
var (
ingressWorkerOnce sync.Once
ingressJobsChannel chan *ingressJob
ingressID string
)
func (daemon *Daemon) startIngressWorker() {
ingressJobsChannel = make(chan *ingressJob, 100)
go func() {
//nolint: gosimple
for {
select {
case r := <-ingressJobsChannel:
if r.create != nil {
daemon.setupIngress(r.create, r.ip, ingressID)
ingressID = r.create.ID
} else {
daemon.releaseIngress(ingressID)
ingressID = ""
}
close(r.jobDone)
}
}
}()
}
// enqueueIngressJob adds a ingress add/rm request to the worker queue.
// It guarantees the worker is started.
func (daemon *Daemon) enqueueIngressJob(job *ingressJob) {
ingressWorkerOnce.Do(daemon.startIngressWorker)
ingressJobsChannel <- job
}
// SetupIngress setups ingress networking.
// The function returns a channel which will signal the caller when the programming is completed.
func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) (<-chan struct{}, error) {
ip, _, err := net.ParseCIDR(nodeIP)
if err != nil {
return nil, err
}
done := make(chan struct{})
daemon.enqueueIngressJob(&ingressJob{&create, ip, done})
return done, nil
}
// ReleaseIngress releases the ingress networking.
// The function returns a channel which will signal the caller when the programming is completed.
func (daemon *Daemon) ReleaseIngress() (<-chan struct{}, error) {
done := make(chan struct{})
daemon.enqueueIngressJob(&ingressJob{nil, nil, done})
return done, nil
}
func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) {
controller := daemon.netController
controller.AgentInitWait()
if staleID != "" && staleID != create.ID {
daemon.releaseIngress(staleID)
}
if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil {
// If it is any other error other than already
// exists error log error and return.
if _, ok := err.(libnetwork.NetworkNameError); !ok {
logrus.Errorf("Failed creating ingress network: %v", err)
return
}
// Otherwise continue down the call to create or recreate sandbox.
}
_, err := daemon.GetNetworkByID(create.ID)
if err != nil {
logrus.Errorf("Failed getting ingress network by id after creating: %v", err)
}
}
func (daemon *Daemon) releaseIngress(id string) {
controller := daemon.netController
if id == "" {
return
}
n, err := controller.NetworkByID(id)
if err != nil {
logrus.Errorf("failed to retrieve ingress network %s: %v", id, err)
return
}
if err := n.Delete(libnetwork.NetworkDeleteOptionRemoveLB); err != nil {
logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err)
return
}
}
// SetNetworkBootstrapKeys sets the bootstrap keys.
func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error {
err := daemon.netController.SetKeys(keys)
if err == nil {
// Upon successful key setting dispatch the keys available event
daemon.cluster.SendClusterEvent(lncluster.EventNetworkKeysAvailable)
}
return err
}
// UpdateAttachment notifies the attacher about the attachment config.
func (daemon *Daemon) UpdateAttachment(networkName, networkID, containerID string, config *network.NetworkingConfig) error {
if daemon.clusterProvider == nil {
return fmt.Errorf("cluster provider is not initialized")
}
if err := daemon.clusterProvider.UpdateAttachment(networkName, containerID, config); err != nil {
return daemon.clusterProvider.UpdateAttachment(networkID, containerID, config)
}
return nil
}
// WaitForDetachment makes the cluster manager wait for detachment of
// the container from the network.
func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error {
if daemon.clusterProvider == nil {
return fmt.Errorf("cluster provider is not initialized")
}
return daemon.clusterProvider.WaitForDetachment(ctx, networkName, networkID, taskID, containerID)
}
// CreateManagedNetwork creates an agent network.
func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error {
_, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true)
return err
}
// CreateNetwork creates a network with the given name, driver and other optional parameters
func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) {
resp, err := daemon.createNetwork(create, "", false)
if err != nil {
return nil, err
}
return resp, err
}
func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
if runconfig.IsPreDefinedNetwork(create.Name) {
return nil, PredefinedNetworkError(create.Name)
}
var warning string
nw, err := daemon.GetNetworkByName(create.Name)
if err != nil {
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
return nil, err
}
}
if nw != nil {
// check if user defined CheckDuplicate, if set true, return err
// otherwise prepare a warning message
if create.CheckDuplicate {
if !agent || nw.Info().Dynamic() {
return nil, libnetwork.NetworkNameError(create.Name)
}
}
warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID())
}
c := daemon.netController
driver := create.Driver
if driver == "" {
driver = c.Config().Daemon.DefaultDriver
}
nwOptions := []libnetwork.NetworkOption{
libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6),
libnetwork.NetworkOptionDriverOpts(create.Options),
libnetwork.NetworkOptionLabels(create.Labels),
libnetwork.NetworkOptionAttachable(create.Attachable),
libnetwork.NetworkOptionIngress(create.Ingress),
libnetwork.NetworkOptionScope(create.Scope),
}
if create.ConfigOnly {
nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigOnly())
}
if create.IPAM != nil {
ipam := create.IPAM
v4Conf, v6Conf, err := getIpamConfig(ipam.Config)
if err != nil {
return nil, err
}
nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options))
}
if create.Internal {
nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork())
}
if agent {
nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic())
nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false))
}
if create.ConfigFrom != nil {
nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network))
}
if agent && driver == "overlay" {
nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id)
if !exists {
return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id)
}
nwOptions = append(nwOptions, libnetwork.NetworkOptionLBEndpoint(nodeIP))
}
n, err := c.NewNetwork(driver, create.Name, id, nwOptions...)
if err != nil {
if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok {
//nolint: golint
return nil, errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.")
}
return nil, err
}
daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.Acquire)
if create.IPAM != nil {
daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.Acquire)
}
daemon.LogNetworkEvent(n, "create")
return &types.NetworkCreateResponse{
ID: n.ID(),
Warning: warning,
}, nil
}
func (daemon *Daemon) pluginRefCount(driver, capability string, mode int) {
var builtinDrivers []string
if capability == driverapi.NetworkPluginEndpointType {
builtinDrivers = daemon.netController.BuiltinDrivers()
} else if capability == ipamapi.PluginEndpointType {
builtinDrivers = daemon.netController.BuiltinIPAMDrivers()
}
for _, d := range builtinDrivers {
if d == driver {
return
}
}
if daemon.PluginStore != nil {
_, err := daemon.PluginStore.Get(driver, capability, mode)
if err != nil {
logrus.WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation")
}
}
}
func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) {
ipamV4Cfg := []*libnetwork.IpamConf{}
ipamV6Cfg := []*libnetwork.IpamConf{}
for _, d := range data {
iCfg := libnetwork.IpamConf{}
iCfg.PreferredPool = d.Subnet
iCfg.SubPool = d.IPRange
iCfg.Gateway = d.Gateway
iCfg.AuxAddresses = d.AuxAddress
ip, _, err := net.ParseCIDR(d.Subnet)
if err != nil {
return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err)
}
if ip.To4() != nil {
ipamV4Cfg = append(ipamV4Cfg, &iCfg)
} else {
ipamV6Cfg = append(ipamV6Cfg, &iCfg)
}
}
return ipamV4Cfg, ipamV6Cfg, nil
}
// UpdateContainerServiceConfig updates a service configuration.
func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error {
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
ctr.NetworkSettings.Service = serviceConfig
return nil
}
// ConnectContainerToNetwork connects the given container to the given
// network. If either cannot be found, an err is returned. If the
// network cannot be set up, an err is returned.
func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error {
ctr, err := daemon.GetContainer(containerName)
if err != nil {
return err
}
return daemon.ConnectToNetwork(ctr, networkName, endpointConfig)
}
// DisconnectContainerFromNetwork disconnects the given container from
// the given network. If either cannot be found, an err is returned.
func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error {
ctr, err := daemon.GetContainer(containerName)
if err != nil {
if force {
return daemon.ForceEndpointDelete(containerName, networkName)
}
return err
}
return daemon.DisconnectFromNetwork(ctr, networkName, force)
}
// GetNetworkDriverList returns the list of plugins drivers
// registered for network.
func (daemon *Daemon) GetNetworkDriverList() []string {
if !daemon.NetworkControllerEnabled() {
return nil
}
pluginList := daemon.netController.BuiltinDrivers()
managedPlugins := daemon.PluginStore.GetAllManagedPluginsByCap(driverapi.NetworkPluginEndpointType)
for _, plugin := range managedPlugins {
pluginList = append(pluginList, plugin.Name())
}
pluginMap := make(map[string]bool)
for _, plugin := range pluginList {
pluginMap[plugin] = true
}
networks := daemon.netController.Networks()
for _, nw := range networks {
if !pluginMap[nw.Type()] {
pluginList = append(pluginList, nw.Type())
pluginMap[nw.Type()] = true
}
}
sort.Strings(pluginList)
return pluginList
}
// DeleteManagedNetwork deletes an agent network.
// The requirement of networkID is enforced.
func (daemon *Daemon) DeleteManagedNetwork(networkID string) error {
n, err := daemon.GetNetworkByID(networkID)
if err != nil {
return err
}
return daemon.deleteNetwork(n, true)
}
// DeleteNetwork destroys a network unless it's one of docker's predefined networks.
func (daemon *Daemon) DeleteNetwork(networkID string) error {
n, err := daemon.GetNetworkByID(networkID)
if err != nil {
return errors.Wrap(err, "could not find network by ID")
}
return daemon.deleteNetwork(n, false)
}
func (daemon *Daemon) deleteNetwork(nw libnetwork.Network, dynamic bool) error {
if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic {
err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name())
return errdefs.Forbidden(err)
}
if dynamic && !nw.Info().Dynamic() {
if runconfig.IsPreDefinedNetwork(nw.Name()) {
// Predefined networks now support swarm services. Make this
// a no-op when cluster requests to remove the predefined network.
return nil
}
err := fmt.Errorf("%s is not a dynamic network", nw.Name())
return errdefs.Forbidden(err)
}
if err := nw.Delete(); err != nil {
return errors.Wrap(err, "error while removing network")
}
// If this is not a configuration only network, we need to
// update the corresponding remote drivers' reference counts
if !nw.Info().ConfigOnly() {
daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.Release)
ipamType, _, _, _ := nw.Info().IpamConfig()
daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.Release)
daemon.LogNetworkEvent(nw, "destroy")
}
return nil
}
// GetNetworks returns a list of all networks
func (daemon *Daemon) GetNetworks(filter filters.Args, config types.NetworkListConfig) ([]types.NetworkResource, error) {
networks := daemon.getAllNetworks()
list := make([]types.NetworkResource, 0, len(networks))
var idx map[string]libnetwork.Network
if config.Detailed {
idx = make(map[string]libnetwork.Network)
}
for _, n := range networks {
nr := buildNetworkResource(n)
list = append(list, nr)
if config.Detailed {
idx[nr.ID] = n
}
}
var err error
list, err = internalnetwork.FilterNetworks(list, filter)
if err != nil {
return nil, err
}
if config.Detailed {
for i := range list {
np := &list[i]
buildDetailedNetworkResources(np, idx[np.ID], config.Verbose)
list[i] = *np
}
}
return list, nil
}
func buildNetworkResource(nw libnetwork.Network) types.NetworkResource {
r := types.NetworkResource{}
if nw == nil {
return r
}
info := nw.Info()
r.Name = nw.Name()
r.ID = nw.ID()
r.Created = info.Created()
r.Scope = info.Scope()
r.Driver = nw.Type()
r.EnableIPv6 = info.IPv6Enabled()
r.Internal = info.Internal()
r.Attachable = info.Attachable()
r.Ingress = info.Ingress()
r.Options = info.DriverOptions()
r.Containers = make(map[string]types.EndpointResource)
buildIpamResources(&r, info)
r.Labels = info.Labels()
r.ConfigOnly = info.ConfigOnly()
if cn := info.ConfigFrom(); cn != "" {
r.ConfigFrom = network.ConfigReference{Network: cn}
}
peers := info.Peers()
if len(peers) != 0 {
r.Peers = buildPeerInfoResources(peers)
}
return r
}
func buildDetailedNetworkResources(r *types.NetworkResource, nw libnetwork.Network, verbose bool) {
if nw == nil {
return
}
epl := nw.Endpoints()
for _, e := range epl {
ei := e.Info()
if ei == nil {
continue
}
sb := ei.Sandbox()
tmpID := e.ID()
key := "ep-" + tmpID
if sb != nil {
key = sb.ContainerID()
}
r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei)
}
if !verbose {
return
}
services := nw.Info().Services()
r.Services = make(map[string]network.ServiceInfo)
for name, service := range services {
tasks := []network.Task{}
for _, t := range service.Tasks {
tasks = append(tasks, network.Task{
Name: t.Name,
EndpointID: t.EndpointID,
EndpointIP: t.EndpointIP,
Info: t.Info,
})
}
r.Services[name] = network.ServiceInfo{
VIP: service.VIP,
Ports: service.Ports,
Tasks: tasks,
LocalLBIndex: service.LocalLBIndex,
}
}
}
func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo {
peerInfo := make([]network.PeerInfo, 0, len(peers))
for _, peer := range peers {
peerInfo = append(peerInfo, network.PeerInfo{
Name: peer.Name,
IP: peer.IP,
})
}
return peerInfo
}
func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) {
id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig()
ipv4Info, ipv6Info := nwInfo.IpamInfo()
r.IPAM.Driver = id
r.IPAM.Options = opts
r.IPAM.Config = []network.IPAMConfig{}
for _, ip4 := range ipv4conf {
if ip4.PreferredPool == "" {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip4.PreferredPool
iData.IPRange = ip4.SubPool
iData.Gateway = ip4.Gateway
iData.AuxAddress = ip4.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if len(r.IPAM.Config) == 0 {
for _, ip4Info := range ipv4Info {
iData := network.IPAMConfig{}
iData.Subnet = ip4Info.IPAMData.Pool.String()
if ip4Info.IPAMData.Gateway != nil {
iData.Gateway = ip4Info.IPAMData.Gateway.IP.String()
}
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
hasIpv6Conf := false
for _, ip6 := range ipv6conf {
if ip6.PreferredPool == "" {
continue
}
hasIpv6Conf = true
iData := network.IPAMConfig{}
iData.Subnet = ip6.PreferredPool
iData.IPRange = ip6.SubPool
iData.Gateway = ip6.Gateway
iData.AuxAddress = ip6.AuxAddresses
r.IPAM.Config = append(r.IPAM.Config, iData)
}
if !hasIpv6Conf {
for _, ip6Info := range ipv6Info {
if ip6Info.IPAMData.Pool == nil {
continue
}
iData := network.IPAMConfig{}
iData.Subnet = ip6Info.IPAMData.Pool.String()
iData.Gateway = ip6Info.IPAMData.Gateway.String()
r.IPAM.Config = append(r.IPAM.Config, iData)
}
}
}
func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource {
er := types.EndpointResource{}
er.EndpointID = id
er.Name = name
ei := info
if ei == nil {
return er
}
if iface := ei.Iface(); iface != nil {
if mac := iface.MacAddress(); mac != nil {
er.MacAddress = mac.String()
}
if ip := iface.Address(); ip != nil && len(ip.IP) > 0 {
er.IPv4Address = ip.String()
}
if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 {
er.IPv6Address = ipv6.String()
}
}
return er
}
// clearAttachableNetworks removes the attachable networks
// after disconnecting any connected container
func (daemon *Daemon) clearAttachableNetworks() {
for _, n := range daemon.getAllNetworks() {
if !n.Info().Attachable() {
continue
}
for _, ep := range n.Endpoints() {
epInfo := ep.Info()
if epInfo == nil {
continue
}
sb := epInfo.Sandbox()
if sb == nil {
continue
}
containerID := sb.ContainerID()
if err := daemon.DisconnectContainerFromNetwork(containerID, n.ID(), true); err != nil {
logrus.Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v",
containerID, n.Name(), err)
}
}
if err := daemon.DeleteManagedNetwork(n.ID()); err != nil {
logrus.Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err)
}
}
}
// buildCreateEndpointOptions builds endpoint options from a given network.
func buildCreateEndpointOptions(c *container.Container, n libnetwork.Network, epConfig *network.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) {
var (
bindings = make(nat.PortMap)
pbList []networktypes.PortBinding
exposeList []networktypes.TransportPort
createOptions []libnetwork.EndpointOption
)
defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName()
if (!serviceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) ||
c.NetworkSettings.IsAnonymousEndpoint {
createOptions = append(createOptions, libnetwork.CreateOptionAnonymous())
}
if epConfig != nil {
ipam := epConfig.IPAMConfig
if ipam != nil {
var (
ipList []net.IP
ip, ip6, linkip net.IP
)
for _, ips := range ipam.LinkLocalIPs {
if linkip = net.ParseIP(ips); linkip == nil && ips != "" {
return nil, errors.Errorf("Invalid link-local IP address: %s", ipam.LinkLocalIPs)
}
ipList = append(ipList, linkip)
}
if ip = net.ParseIP(ipam.IPv4Address); ip == nil && ipam.IPv4Address != "" {
return nil, errors.Errorf("Invalid IPv4 address: %s)", ipam.IPv4Address)
}
if ip6 = net.ParseIP(ipam.IPv6Address); ip6 == nil && ipam.IPv6Address != "" {
return nil, errors.Errorf("Invalid IPv6 address: %s)", ipam.IPv6Address)
}
createOptions = append(createOptions,
libnetwork.CreateOptionIpam(ip, ip6, ipList, nil))
}
for _, alias := range epConfig.Aliases {
createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias))
}
for k, v := range epConfig.DriverOpts {
createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v}))
}
}
if c.NetworkSettings.Service != nil {
svcCfg := c.NetworkSettings.Service
var vip string
if svcCfg.VirtualAddresses[n.ID()] != nil {
vip = svcCfg.VirtualAddresses[n.ID()].IPv4
}
var portConfigs []*libnetwork.PortConfig
for _, portConfig := range svcCfg.ExposedPorts {
portConfigs = append(portConfigs, &libnetwork.PortConfig{
Name: portConfig.Name,
Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol),
TargetPort: portConfig.TargetPort,
PublishedPort: portConfig.PublishedPort,
})
}
createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()]))
}
if !containertypes.NetworkMode(n.Name()).IsUserDefined() {
createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution())
}
// configs that are applicable only for the endpoint in the network
// to which container was connected to on docker run.
// Ideally all these network-specific endpoint configurations must be moved under
// container.NetworkSettings.Networks[n.Name()]
if n.Name() == c.HostConfig.NetworkMode.NetworkName() ||
(n.Name() == defaultNetName && c.HostConfig.NetworkMode.IsDefault()) {
if c.Config.MacAddress != "" {
mac, err := net.ParseMAC(c.Config.MacAddress)
if err != nil {
return nil, err
}
genericOption := options.Generic{
netlabel.MacAddress: mac,
}
createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption))
}
}
// Port-mapping rules belong to the container & applicable only to non-internal networks
portmaps := getSandboxPortMapInfo(sb)
if n.Info().Internal() || len(portmaps) > 0 {
return createOptions, nil
}
if c.HostConfig.PortBindings != nil {
for p, b := range c.HostConfig.PortBindings {
bindings[p] = []nat.PortBinding{}
for _, bb := range b {
bindings[p] = append(bindings[p], nat.PortBinding{
HostIP: bb.HostIP,
HostPort: bb.HostPort,
})
}
}
}
portSpecs := c.Config.ExposedPorts
ports := make([]nat.Port, len(portSpecs))
var i int
for p := range portSpecs {
ports[i] = p
i++
}
nat.SortPortMap(ports, bindings)
for _, port := range ports {
expose := networktypes.TransportPort{}
expose.Proto = networktypes.ParseProtocol(port.Proto())
expose.Port = uint16(port.Int())
exposeList = append(exposeList, expose)
pb := networktypes.PortBinding{Port: expose.Port, Proto: expose.Proto}
binding := bindings[port]
for i := 0; i < len(binding); i++ {
pbCopy := pb.GetCopy()
newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort))
var portStart, portEnd int
if err == nil {
portStart, portEnd, err = newP.Range()
}
if err != nil {
return nil, errors.Wrapf(err, "Error parsing HostPort value (%s)", binding[i].HostPort)
}
pbCopy.HostPort = uint16(portStart)
pbCopy.HostPortEnd = uint16(portEnd)
pbCopy.HostIP = net.ParseIP(binding[i].HostIP)
pbList = append(pbList, pbCopy)
}
if c.HostConfig.PublishAllPorts && len(binding) == 0 {
pbList = append(pbList, pb)
}
}
var dns []string
if len(c.HostConfig.DNS) > 0 {
dns = c.HostConfig.DNS
} else if len(daemonDNS) > 0 {
dns = daemonDNS
}
if len(dns) > 0 {
createOptions = append(createOptions,
libnetwork.CreateOptionDNS(dns))
}
createOptions = append(createOptions,
libnetwork.CreateOptionPortMapping(pbList),
libnetwork.CreateOptionExposedPorts(exposeList))
return createOptions, nil
}
// getSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox
func getSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap {
pm := nat.PortMap{}
if sb == nil {
return pm
}
for _, ep := range sb.Endpoints() {
pm, _ = getEndpointPortMapInfo(ep)
if len(pm) > 0 {
break
}
}
return pm
}
func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) {
pm := nat.PortMap{}
driverInfo, err := ep.DriverInfo()
if err != nil {
return pm, err
}
if driverInfo == nil {
// It is not an error for epInfo to be nil
return pm, nil
}
if expData, ok := driverInfo[netlabel.ExposedPorts]; ok {
if exposedPorts, ok := expData.([]networktypes.TransportPort); ok {
for _, tp := range exposedPorts {
natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port)))
if err != nil {
return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err)
}
pm[natPort] = nil
}
}
}
mapData, ok := driverInfo[netlabel.PortMap]
if !ok {
return pm, nil
}
if portMapping, ok := mapData.([]networktypes.PortBinding); ok {
for _, pp := range portMapping {
natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port)))
if err != nil {
return pm, err
}
natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))}
pm[natPort] = append(pm[natPort], natBndg)
}
}
return pm, nil
}
// buildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint.
func buildEndpointInfo(networkSettings *internalnetwork.Settings, n libnetwork.Network, ep libnetwork.Endpoint) error {
if ep == nil {
return errors.New("endpoint cannot be nil")
}
if networkSettings == nil {
return errors.New("network cannot be nil")
}
epInfo := ep.Info()
if epInfo == nil {
// It is not an error to get an empty endpoint info
return nil
}
if _, ok := networkSettings.Networks[n.Name()]; !ok {
networkSettings.Networks[n.Name()] = &internalnetwork.EndpointSettings{
EndpointSettings: &network.EndpointSettings{},
}
}
networkSettings.Networks[n.Name()].NetworkID = n.ID()
networkSettings.Networks[n.Name()].EndpointID = ep.ID()
iface := epInfo.Iface()
if iface == nil {
return nil
}
if iface.MacAddress() != nil {
networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String()
}
if iface.Address() != nil {
ones, _ := iface.Address().Mask.Size()
networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String()
networkSettings.Networks[n.Name()].IPPrefixLen = ones
}
if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil {
onesv6, _ := iface.AddressIPv6().Mask.Size()
networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String()
networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6
}
return nil
}
// buildJoinOptions builds endpoint Join options from a given network.
func buildJoinOptions(networkSettings *internalnetwork.Settings, n interface {
Name() string
}) ([]libnetwork.EndpointOption, error) {
var joinOptions []libnetwork.EndpointOption
if epConfig, ok := networkSettings.Networks[n.Name()]; ok {
for _, str := range epConfig.Links {
name, alias, err := opts.ParseLink(str)
if err != nil {
return nil, err
}
joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias))
}
for k, v := range epConfig.DriverOpts {
joinOptions = append(joinOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v}))
}
}
return joinOptions, nil
}
| thaJeztah | 5e62ca1a05f2aab49066d91c9070cb7dc7ad2461 | 2773f81aa5e9e34733675a7aa7e4219391caccb0 | Yes, makes sense; updated 👍 | thaJeztah | 4,751 |
moby/moby | 42,445 | [testing] ~update~ fix linting issues found by golangci-lint v1.40.1 | Splitting this from https://github.com/moby/moby/pull/42303, which needs some work (use a custom action instead of the default one; similar to buildkit CI)
This ~updates~ fixes linting issues found by the latest golangci-lint (v1.40.1) | null | 2021-05-31 12:10:41+00:00 | 2021-06-16 20:15:01+00:00 | libcontainerd/remote/client.go | package remote // import "github.com/docker/docker/libcontainerd/remote"
import (
"context"
"encoding/json"
"io"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/containerd/containerd"
apievents "github.com/containerd/containerd/api/events"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/archive"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/content"
containerderrors "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/events"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/runtime/linux/runctypes"
v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
"github.com/containerd/typeurl"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libcontainerd/queue"
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
"github.com/docker/docker/pkg/ioutils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// DockerContainerBundlePath is the label key pointing to the container's bundle path
const DockerContainerBundlePath = "com.docker/engine.bundle.path"
type client struct {
client *containerd.Client
stateDir string
logger *logrus.Entry
ns string
backend libcontainerdtypes.Backend
eventQ queue.Queue
oomMu sync.Mutex
oom map[string]bool
v2runcoptionsMu sync.Mutex
// v2runcoptions is used for copying options specified on Create() to Start()
v2runcoptions map[string]v2runcoptions.Options
}
// NewClient creates a new libcontainerd client from a containerd client
func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
c := &client{
client: cli,
stateDir: stateDir,
logger: logrus.WithField("module", "libcontainerd").WithField("namespace", ns),
ns: ns,
backend: b,
oom: make(map[string]bool),
v2runcoptions: make(map[string]v2runcoptions.Options),
}
go c.processEventStream(ctx, ns)
return c, nil
}
func (c *client) Version(ctx context.Context) (containerd.Version, error) {
return c.client.Version(ctx)
}
// Restore loads the containerd container.
// It should not be called concurrently with any other operation for the given ID.
func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (alive bool, pid int, p libcontainerdtypes.Process, err error) {
var dio *cio.DirectIO
defer func() {
if err != nil && dio != nil {
dio.Cancel()
dio.Close()
}
err = wrapError(err)
}()
ctr, err := c.client.LoadContainer(ctx, id)
if err != nil {
return false, -1, nil, errors.WithStack(wrapError(err))
}
attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
// dio must be assigned to the previously defined dio for the defer above
// to handle cleanup
dio, err = c.newDirectIO(ctx, fifos)
if err != nil {
return nil, err
}
return attachStdio(dio)
}
t, err := ctr.Task(ctx, attachIO)
if err != nil && !containerderrors.IsNotFound(err) {
return false, -1, nil, errors.Wrap(wrapError(err), "error getting containerd task for container")
}
if t != nil {
s, err := t.Status(ctx)
if err != nil {
return false, -1, nil, errors.Wrap(wrapError(err), "error getting task status")
}
alive = s.Status != containerd.Stopped
pid = int(t.Pid())
}
c.logger.WithFields(logrus.Fields{
"container": id,
"alive": alive,
"pid": pid,
}).Debug("restored container")
return alive, pid, &restoredProcess{
p: t,
}, nil
}
func (c *client) Create(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
bdir := c.bundleDir(id)
c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
newOpts := []containerd.NewContainerOpts{
containerd.WithSpec(ociSpec),
containerd.WithRuntime(shim, runtimeOptions),
WithBundle(bdir, ociSpec),
}
opts = append(opts, newOpts...)
_, err := c.client.NewContainer(ctx, id, opts...)
if err != nil {
if containerderrors.IsAlreadyExists(err) {
return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
}
return wrapError(err)
}
if x, ok := runtimeOptions.(*v2runcoptions.Options); ok {
c.v2runcoptionsMu.Lock()
c.v2runcoptions[id] = *x
c.v2runcoptionsMu.Unlock()
}
return nil
}
// Start create and start a task for the specified containerd id
func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
ctr, err := c.getContainer(ctx, id)
if err != nil {
return -1, err
}
var (
cp *types.Descriptor
t containerd.Task
rio cio.IO
stdinCloseSync = make(chan struct{})
)
if checkpointDir != "" {
// write checkpoint to the content store
tar := archive.Diff(ctx, "", checkpointDir)
cp, err = c.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
// remove the checkpoint when we're done
defer func() {
if cp != nil {
err := c.client.ContentStore().Delete(context.Background(), cp.Digest)
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"ref": checkpointDir,
"digest": cp.Digest,
}).Warnf("failed to delete temporary checkpoint entry")
}
}
}()
if err := tar.Close(); err != nil {
return -1, errors.Wrap(err, "failed to close checkpoint tar stream")
}
if err != nil {
return -1, errors.Wrapf(err, "failed to upload checkpoint to containerd")
}
}
spec, err := ctr.Spec(ctx)
if err != nil {
return -1, errors.Wrap(err, "failed to retrieve spec")
}
labels, err := ctr.Labels(ctx)
if err != nil {
return -1, errors.Wrap(err, "failed to retrieve labels")
}
bundle := labels[DockerContainerBundlePath]
uid, gid := getSpecUser(spec)
taskOpts := []containerd.NewTaskOpts{
func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
info.Checkpoint = cp
return nil
},
}
if runtime.GOOS != "windows" {
taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
c.v2runcoptionsMu.Lock()
opts, ok := c.v2runcoptions[id]
c.v2runcoptionsMu.Unlock()
if ok {
opts.IoUid = uint32(uid)
opts.IoGid = uint32(gid)
info.Options = &opts
} else {
info.Options = &runctypes.CreateOptions{
IoUid: uint32(uid),
IoGid: uint32(gid),
NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "",
}
}
return nil
})
} else {
taskOpts = append(taskOpts, withLogLevel(c.logger.Level))
}
t, err = ctr.NewTask(ctx,
func(id string) (cio.IO, error) {
fifos := newFIFOSet(bundle, libcontainerdtypes.InitProcessName, withStdin, spec.Process.Terminal)
rio, err = c.createIO(fifos, id, libcontainerdtypes.InitProcessName, stdinCloseSync, attachStdio)
return rio, err
},
taskOpts...,
)
if err != nil {
close(stdinCloseSync)
if rio != nil {
rio.Cancel()
rio.Close()
}
return -1, wrapError(err)
}
// Signal c.createIO that it can call CloseIO
close(stdinCloseSync)
if err := t.Start(ctx); err != nil {
if _, err := t.Delete(ctx); err != nil {
c.logger.WithError(err).WithField("container", id).
Error("failed to delete task after fail start")
}
return -1, wrapError(err)
}
return int(t.Pid()), nil
}
// Exec creates exec process.
//
// The containerd client calls Exec to register the exec config in the shim side.
// When the client calls Start, the shim will create stdin fifo if needs. But
// for the container main process, the stdin fifo will be created in Create not
// the Start call. stdinCloseSync channel should be closed after Start exec
// process.
func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
ctr, err := c.getContainer(ctx, containerID)
if err != nil {
return -1, err
}
t, err := ctr.Task(ctx, nil)
if err != nil {
if containerderrors.IsNotFound(err) {
return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running")))
}
return -1, wrapError(err)
}
var (
p containerd.Process
rio cio.IO
stdinCloseSync = make(chan struct{})
)
labels, err := ctr.Labels(ctx)
if err != nil {
return -1, wrapError(err)
}
fifos := newFIFOSet(labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal)
defer func() {
if err != nil {
if rio != nil {
rio.Cancel()
rio.Close()
}
}
}()
p, err = t.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio)
return rio, err
})
if err != nil {
close(stdinCloseSync)
if containerderrors.IsAlreadyExists(err) {
return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
}
return -1, wrapError(err)
}
// Signal c.createIO that it can call CloseIO
//
// the stdin of exec process will be created after p.Start in containerd
defer close(stdinCloseSync)
if err = p.Start(ctx); err != nil {
// use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
// we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
// older containerd-shim
ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
defer cancel()
p.Delete(ctx)
return -1, wrapError(err)
}
return int(p.Pid()), nil
}
func (c *client) SignalProcess(ctx context.Context, containerID, processID string, signal int) error {
p, err := c.getProcess(ctx, containerID, processID)
if err != nil {
return err
}
return wrapError(p.Kill(ctx, syscall.Signal(signal)))
}
func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error {
p, err := c.getProcess(ctx, containerID, processID)
if err != nil {
return err
}
return p.Resize(ctx, uint32(width), uint32(height))
}
func (c *client) CloseStdin(ctx context.Context, containerID, processID string) error {
p, err := c.getProcess(ctx, containerID, processID)
if err != nil {
return err
}
return p.CloseIO(ctx, containerd.WithStdinCloser)
}
func (c *client) Pause(ctx context.Context, containerID string) error {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return err
}
return wrapError(p.(containerd.Task).Pause(ctx))
}
func (c *client) Resume(ctx context.Context, containerID string) error {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return err
}
return p.(containerd.Task).Resume(ctx)
}
func (c *client) Stats(ctx context.Context, containerID string) (*libcontainerdtypes.Stats, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return nil, err
}
m, err := p.(containerd.Task).Metrics(ctx)
if err != nil {
return nil, err
}
v, err := typeurl.UnmarshalAny(m.Data)
if err != nil {
return nil, err
}
return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil
}
func (c *client) ListPids(ctx context.Context, containerID string) ([]uint32, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return nil, err
}
pis, err := p.(containerd.Task).Pids(ctx)
if err != nil {
return nil, err
}
var pids []uint32
for _, i := range pis {
pids = append(pids, i.Pid)
}
return pids, nil
}
func (c *client) Summary(ctx context.Context, containerID string) ([]libcontainerdtypes.Summary, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return nil, err
}
pis, err := p.(containerd.Task).Pids(ctx)
if err != nil {
return nil, err
}
var infos []libcontainerdtypes.Summary
for _, pi := range pis {
i, err := typeurl.UnmarshalAny(pi.Info)
if err != nil {
return nil, errors.Wrap(err, "unable to decode process details")
}
s, err := summaryFromInterface(i)
if err != nil {
return nil, err
}
infos = append(infos, *s)
}
return infos, nil
}
type restoredProcess struct {
p containerd.Process
}
func (p *restoredProcess) Delete(ctx context.Context) (uint32, time.Time, error) {
if p.p == nil {
return 255, time.Now(), nil
}
status, err := p.p.Delete(ctx)
if err != nil {
return 255, time.Now(), nil
}
return status.ExitCode(), status.ExitTime(), nil
}
func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return 255, time.Now(), nil
}
status, err := p.Delete(ctx)
if err != nil {
return 255, time.Now(), nil
}
return status.ExitCode(), status.ExitTime(), nil
}
func (c *client) Delete(ctx context.Context, containerID string) error {
ctr, err := c.getContainer(ctx, containerID)
if err != nil {
return err
}
labels, err := ctr.Labels(ctx)
if err != nil {
return err
}
bundle := labels[DockerContainerBundlePath]
if err := ctr.Delete(ctx); err != nil {
return wrapError(err)
}
c.oomMu.Lock()
delete(c.oom, containerID)
c.oomMu.Unlock()
c.v2runcoptionsMu.Lock()
delete(c.v2runcoptions, containerID)
c.v2runcoptionsMu.Unlock()
if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
if err := os.RemoveAll(bundle); err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"container": containerID,
"bundle": bundle,
}).Error("failed to remove state dir")
}
}
return nil
}
func (c *client) Status(ctx context.Context, containerID string) (containerd.ProcessStatus, error) {
t, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return containerd.Unknown, err
}
s, err := t.Status(ctx)
if err != nil {
return containerd.Unknown, wrapError(err)
}
return s.Status, nil
}
func (c *client) getCheckpointOptions(id string, exit bool) containerd.CheckpointTaskOpts {
return func(r *containerd.CheckpointTaskInfo) error {
if r.Options == nil {
c.v2runcoptionsMu.Lock()
_, isV2 := c.v2runcoptions[id]
c.v2runcoptionsMu.Unlock()
if isV2 {
r.Options = &v2runcoptions.CheckpointOptions{Exit: exit}
} else {
r.Options = &runctypes.CheckpointOptions{Exit: exit}
}
return nil
}
switch opts := r.Options.(type) {
case *v2runcoptions.CheckpointOptions:
opts.Exit = exit
case *runctypes.CheckpointOptions:
opts.Exit = exit
}
return nil
}
}
func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return err
}
opts := []containerd.CheckpointTaskOpts{c.getCheckpointOptions(containerID, exit)}
img, err := p.(containerd.Task).Checkpoint(ctx, opts...)
if err != nil {
return wrapError(err)
}
// Whatever happens, delete the checkpoint from containerd
defer func() {
err := c.client.ImageService().Delete(context.Background(), img.Name())
if err != nil {
c.logger.WithError(err).WithField("digest", img.Target().Digest).
Warnf("failed to delete checkpoint image")
}
}()
b, err := content.ReadBlob(ctx, c.client.ContentStore(), img.Target())
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
}
var index v1.Index
if err := json.Unmarshal(b, &index); err != nil {
return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
}
var cpDesc *v1.Descriptor
for _, m := range index.Manifests {
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
cpDesc = &m // nolint:gosec
break
}
}
if cpDesc == nil {
return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
}
rat, err := c.client.ContentStore().ReaderAt(ctx, *cpDesc)
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
}
defer rat.Close()
_, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
}
return err
}
func (c *client) getContainer(ctx context.Context, id string) (containerd.Container, error) {
ctr, err := c.client.LoadContainer(ctx, id)
if err != nil {
if containerderrors.IsNotFound(err) {
return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
}
return nil, wrapError(err)
}
return ctr, nil
}
func (c *client) getProcess(ctx context.Context, containerID, processID string) (containerd.Process, error) {
ctr, err := c.getContainer(ctx, containerID)
if err != nil {
return nil, err
}
t, err := ctr.Task(ctx, nil)
if err != nil {
if containerderrors.IsNotFound(err) {
return nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running")))
}
return nil, wrapError(err)
}
if processID == libcontainerdtypes.InitProcessName {
return t, nil
}
p, err := t.LoadProcess(ctx, processID, nil)
if err != nil {
if containerderrors.IsNotFound(err) {
return nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec")))
}
return nil, wrapError(err)
}
return p, nil
}
// createIO creates the io to be used by a process
// This needs to get a pointer to interface as upon closure the process may not have yet been registered
func (c *client) createIO(fifos *cio.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) {
var (
io *cio.DirectIO
err error
)
io, err = c.newDirectIO(context.Background(), fifos)
if err != nil {
return nil, err
}
if io.Stdin != nil {
var (
err error
stdinOnce sync.Once
)
pipe := io.Stdin
io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
stdinOnce.Do(func() {
err = pipe.Close()
// Do the rest in a new routine to avoid a deadlock if the
// Exec/Start call failed.
go func() {
<-stdinCloseSync
p, err := c.getProcess(context.Background(), containerID, processID)
if err == nil {
err = p.CloseIO(context.Background(), containerd.WithStdinCloser)
if err != nil && strings.Contains(err.Error(), "transport is closing") {
err = nil
}
}
}()
})
return err
})
}
rio, err := attachStdio(io)
if err != nil {
io.Cancel()
io.Close()
}
return rio, err
}
func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) {
c.eventQ.Append(ei.ContainerID, func() {
err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"container": ei.ContainerID,
"event": et,
"event-info": ei,
}).Error("failed to process event")
}
if et == libcontainerdtypes.EventExit && ei.ProcessID != ei.ContainerID {
p, err := c.getProcess(ctx, ei.ContainerID, ei.ProcessID)
if err != nil {
c.logger.WithError(errors.New("no such process")).
WithFields(logrus.Fields{
"error": err,
"container": ei.ContainerID,
"process": ei.ProcessID,
}).Error("exit event")
return
}
ctr, err := c.getContainer(ctx, ei.ContainerID)
if err != nil {
c.logger.WithFields(logrus.Fields{
"container": ei.ContainerID,
"error": err,
}).Error("failed to find container")
} else {
labels, err := ctr.Labels(ctx)
if err != nil {
c.logger.WithFields(logrus.Fields{
"container": ei.ContainerID,
"error": err,
}).Error("failed to get container labels")
return
}
newFIFOSet(labels[DockerContainerBundlePath], ei.ProcessID, true, false).Close()
}
_, err = p.Delete(context.Background())
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"container": ei.ContainerID,
"process": ei.ProcessID,
}).Warn("failed to delete process")
}
}
})
}
func (c *client) waitServe(ctx context.Context) bool {
t := 100 * time.Millisecond
delay := time.NewTimer(t)
if !delay.Stop() {
<-delay.C
}
defer delay.Stop()
// `IsServing` will actually block until the service is ready.
// However it can return early, so we'll loop with a delay to handle it.
for {
serving, err := c.client.IsServing(ctx)
if err != nil {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
return false
}
logrus.WithError(err).Warn("Error while testing if containerd API is ready")
}
if serving {
return true
}
delay.Reset(t)
select {
case <-ctx.Done():
return false
case <-delay.C:
}
}
}
func (c *client) processEventStream(ctx context.Context, ns string) {
var (
err error
ev *events.Envelope
et libcontainerdtypes.EventType
ei libcontainerdtypes.EventInfo
)
// Create a new context specifically for this subscription.
// The context must be cancelled to cancel the subscription.
// In cases where we have to restart event stream processing,
// we'll need the original context b/c this one will be cancelled
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
// Filter on both namespace *and* topic. To create an "and" filter,
// this must be a single, comma-separated string
eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|")
c.logger.Debug("processing event stream")
for {
var oomKilled bool
select {
case err = <-errC:
if err != nil {
errStatus, ok := status.FromError(err)
if !ok || errStatus.Code() != codes.Canceled {
c.logger.WithError(err).Error("Failed to get event")
c.logger.Info("Waiting for containerd to be ready to restart event processing")
if c.waitServe(ctx) {
go c.processEventStream(ctx, ns)
return
}
}
c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown")
}
return
case ev = <-eventStream:
if ev.Event == nil {
c.logger.WithField("event", ev).Warn("invalid event")
continue
}
v, err := typeurl.UnmarshalAny(ev.Event)
if err != nil {
c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
continue
}
c.logger.WithField("topic", ev.Topic).Debug("event")
switch t := v.(type) {
case *apievents.TaskCreate:
et = libcontainerdtypes.EventCreate
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ContainerID,
Pid: t.Pid,
}
case *apievents.TaskStart:
et = libcontainerdtypes.EventStart
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ContainerID,
Pid: t.Pid,
}
case *apievents.TaskExit:
et = libcontainerdtypes.EventExit
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ID,
Pid: t.Pid,
ExitCode: t.ExitStatus,
ExitedAt: t.ExitedAt,
}
case *apievents.TaskOOM:
et = libcontainerdtypes.EventOOM
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
OOMKilled: true,
}
oomKilled = true
case *apievents.TaskExecAdded:
et = libcontainerdtypes.EventExecAdded
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ExecID,
}
case *apievents.TaskExecStarted:
et = libcontainerdtypes.EventExecStarted
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ExecID,
Pid: t.Pid,
}
case *apievents.TaskPaused:
et = libcontainerdtypes.EventPaused
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
}
case *apievents.TaskResumed:
et = libcontainerdtypes.EventResumed
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
}
case *apievents.TaskDelete:
c.logger.WithFields(logrus.Fields{
"topic": ev.Topic,
"type": reflect.TypeOf(t),
"container": t.ContainerID},
).Info("ignoring event")
continue
default:
c.logger.WithFields(logrus.Fields{
"topic": ev.Topic,
"type": reflect.TypeOf(t)},
).Info("ignoring event")
continue
}
c.oomMu.Lock()
if oomKilled {
c.oom[ei.ContainerID] = true
}
ei.OOMKilled = c.oom[ei.ContainerID]
c.oomMu.Unlock()
c.processEvent(ctx, et, ei)
}
}
}
func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
if err != nil {
return nil, err
}
defer writer.Close()
size, err := io.Copy(writer, r)
if err != nil {
return nil, err
}
labels := map[string]string{
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
}
if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
return nil, err
}
return &types.Descriptor{
MediaType: mediaType,
Digest: writer.Digest(),
Size_: size,
}, nil
}
func (c *client) bundleDir(id string) string {
return filepath.Join(c.stateDir, id)
}
func wrapError(err error) error {
switch {
case err == nil:
return nil
case containerderrors.IsNotFound(err):
return errdefs.NotFound(err)
}
msg := err.Error()
for _, s := range []string{"container does not exist", "not found", "no such container"} {
if strings.Contains(msg, s) {
return errdefs.NotFound(err)
}
}
return err
}
| package remote // import "github.com/docker/docker/libcontainerd/remote"
import (
"context"
"encoding/json"
"io"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/containerd/containerd"
apievents "github.com/containerd/containerd/api/events"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/archive"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/content"
containerderrors "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/events"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/runtime/linux/runctypes"
v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
"github.com/containerd/typeurl"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libcontainerd/queue"
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
"github.com/docker/docker/pkg/ioutils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// DockerContainerBundlePath is the label key pointing to the container's bundle path
const DockerContainerBundlePath = "com.docker/engine.bundle.path"
type client struct {
client *containerd.Client
stateDir string
logger *logrus.Entry
ns string
backend libcontainerdtypes.Backend
eventQ queue.Queue
oomMu sync.Mutex
oom map[string]bool
v2runcoptionsMu sync.Mutex
// v2runcoptions is used for copying options specified on Create() to Start()
v2runcoptions map[string]v2runcoptions.Options
}
// NewClient creates a new libcontainerd client from a containerd client
func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
c := &client{
client: cli,
stateDir: stateDir,
logger: logrus.WithField("module", "libcontainerd").WithField("namespace", ns),
ns: ns,
backend: b,
oom: make(map[string]bool),
v2runcoptions: make(map[string]v2runcoptions.Options),
}
go c.processEventStream(ctx, ns)
return c, nil
}
func (c *client) Version(ctx context.Context) (containerd.Version, error) {
return c.client.Version(ctx)
}
// Restore loads the containerd container.
// It should not be called concurrently with any other operation for the given ID.
func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (alive bool, pid int, p libcontainerdtypes.Process, err error) {
var dio *cio.DirectIO
defer func() {
if err != nil && dio != nil {
dio.Cancel()
dio.Close()
}
err = wrapError(err)
}()
ctr, err := c.client.LoadContainer(ctx, id)
if err != nil {
return false, -1, nil, errors.WithStack(wrapError(err))
}
attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
// dio must be assigned to the previously defined dio for the defer above
// to handle cleanup
dio, err = c.newDirectIO(ctx, fifos)
if err != nil {
return nil, err
}
return attachStdio(dio)
}
t, err := ctr.Task(ctx, attachIO)
if err != nil && !containerderrors.IsNotFound(err) {
return false, -1, nil, errors.Wrap(wrapError(err), "error getting containerd task for container")
}
if t != nil {
s, err := t.Status(ctx)
if err != nil {
return false, -1, nil, errors.Wrap(wrapError(err), "error getting task status")
}
alive = s.Status != containerd.Stopped
pid = int(t.Pid())
}
c.logger.WithFields(logrus.Fields{
"container": id,
"alive": alive,
"pid": pid,
}).Debug("restored container")
return alive, pid, &restoredProcess{
p: t,
}, nil
}
func (c *client) Create(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
bdir := c.bundleDir(id)
c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
newOpts := []containerd.NewContainerOpts{
containerd.WithSpec(ociSpec),
containerd.WithRuntime(shim, runtimeOptions),
WithBundle(bdir, ociSpec),
}
opts = append(opts, newOpts...)
_, err := c.client.NewContainer(ctx, id, opts...)
if err != nil {
if containerderrors.IsAlreadyExists(err) {
return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
}
return wrapError(err)
}
if x, ok := runtimeOptions.(*v2runcoptions.Options); ok {
c.v2runcoptionsMu.Lock()
c.v2runcoptions[id] = *x
c.v2runcoptionsMu.Unlock()
}
return nil
}
// Start create and start a task for the specified containerd id
func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
ctr, err := c.getContainer(ctx, id)
if err != nil {
return -1, err
}
var (
cp *types.Descriptor
t containerd.Task
rio cio.IO
stdinCloseSync = make(chan struct{})
)
if checkpointDir != "" {
// write checkpoint to the content store
tar := archive.Diff(ctx, "", checkpointDir)
cp, err = c.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
// remove the checkpoint when we're done
defer func() {
if cp != nil {
err := c.client.ContentStore().Delete(context.Background(), cp.Digest)
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"ref": checkpointDir,
"digest": cp.Digest,
}).Warnf("failed to delete temporary checkpoint entry")
}
}
}()
if err := tar.Close(); err != nil {
return -1, errors.Wrap(err, "failed to close checkpoint tar stream")
}
if err != nil {
return -1, errors.Wrapf(err, "failed to upload checkpoint to containerd")
}
}
spec, err := ctr.Spec(ctx)
if err != nil {
return -1, errors.Wrap(err, "failed to retrieve spec")
}
labels, err := ctr.Labels(ctx)
if err != nil {
return -1, errors.Wrap(err, "failed to retrieve labels")
}
bundle := labels[DockerContainerBundlePath]
uid, gid := getSpecUser(spec)
taskOpts := []containerd.NewTaskOpts{
func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
info.Checkpoint = cp
return nil
},
}
if runtime.GOOS != "windows" {
taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
c.v2runcoptionsMu.Lock()
opts, ok := c.v2runcoptions[id]
c.v2runcoptionsMu.Unlock()
if ok {
opts.IoUid = uint32(uid)
opts.IoGid = uint32(gid)
info.Options = &opts
} else {
info.Options = &runctypes.CreateOptions{
IoUid: uint32(uid),
IoGid: uint32(gid),
NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "",
}
}
return nil
})
} else {
taskOpts = append(taskOpts, withLogLevel(c.logger.Level))
}
t, err = ctr.NewTask(ctx,
func(id string) (cio.IO, error) {
fifos := newFIFOSet(bundle, libcontainerdtypes.InitProcessName, withStdin, spec.Process.Terminal)
rio, err = c.createIO(fifos, id, libcontainerdtypes.InitProcessName, stdinCloseSync, attachStdio)
return rio, err
},
taskOpts...,
)
if err != nil {
close(stdinCloseSync)
if rio != nil {
rio.Cancel()
rio.Close()
}
return -1, wrapError(err)
}
// Signal c.createIO that it can call CloseIO
close(stdinCloseSync)
if err := t.Start(ctx); err != nil {
if _, err := t.Delete(ctx); err != nil {
c.logger.WithError(err).WithField("container", id).
Error("failed to delete task after fail start")
}
return -1, wrapError(err)
}
return int(t.Pid()), nil
}
// Exec creates exec process.
//
// The containerd client calls Exec to register the exec config in the shim side.
// When the client calls Start, the shim will create stdin fifo if needs. But
// for the container main process, the stdin fifo will be created in Create not
// the Start call. stdinCloseSync channel should be closed after Start exec
// process.
func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
ctr, err := c.getContainer(ctx, containerID)
if err != nil {
return -1, err
}
t, err := ctr.Task(ctx, nil)
if err != nil {
if containerderrors.IsNotFound(err) {
return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running")))
}
return -1, wrapError(err)
}
var (
p containerd.Process
rio cio.IO
stdinCloseSync = make(chan struct{})
)
labels, err := ctr.Labels(ctx)
if err != nil {
return -1, wrapError(err)
}
fifos := newFIFOSet(labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal)
defer func() {
if err != nil {
if rio != nil {
rio.Cancel()
rio.Close()
}
}
}()
p, err = t.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio)
return rio, err
})
if err != nil {
close(stdinCloseSync)
if containerderrors.IsAlreadyExists(err) {
return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
}
return -1, wrapError(err)
}
// Signal c.createIO that it can call CloseIO
//
// the stdin of exec process will be created after p.Start in containerd
defer close(stdinCloseSync)
if err = p.Start(ctx); err != nil {
// use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
// we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
// older containerd-shim
ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
defer cancel()
p.Delete(ctx)
return -1, wrapError(err)
}
return int(p.Pid()), nil
}
func (c *client) SignalProcess(ctx context.Context, containerID, processID string, signal int) error {
p, err := c.getProcess(ctx, containerID, processID)
if err != nil {
return err
}
return wrapError(p.Kill(ctx, syscall.Signal(signal)))
}
func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error {
p, err := c.getProcess(ctx, containerID, processID)
if err != nil {
return err
}
return p.Resize(ctx, uint32(width), uint32(height))
}
func (c *client) CloseStdin(ctx context.Context, containerID, processID string) error {
p, err := c.getProcess(ctx, containerID, processID)
if err != nil {
return err
}
return p.CloseIO(ctx, containerd.WithStdinCloser)
}
func (c *client) Pause(ctx context.Context, containerID string) error {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return err
}
return wrapError(p.(containerd.Task).Pause(ctx))
}
func (c *client) Resume(ctx context.Context, containerID string) error {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return err
}
return p.(containerd.Task).Resume(ctx)
}
func (c *client) Stats(ctx context.Context, containerID string) (*libcontainerdtypes.Stats, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return nil, err
}
m, err := p.(containerd.Task).Metrics(ctx)
if err != nil {
return nil, err
}
v, err := typeurl.UnmarshalAny(m.Data)
if err != nil {
return nil, err
}
return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil
}
func (c *client) ListPids(ctx context.Context, containerID string) ([]uint32, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return nil, err
}
pis, err := p.(containerd.Task).Pids(ctx)
if err != nil {
return nil, err
}
var pids []uint32
for _, i := range pis {
pids = append(pids, i.Pid)
}
return pids, nil
}
func (c *client) Summary(ctx context.Context, containerID string) ([]libcontainerdtypes.Summary, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return nil, err
}
pis, err := p.(containerd.Task).Pids(ctx)
if err != nil {
return nil, err
}
var infos []libcontainerdtypes.Summary
for _, pi := range pis {
i, err := typeurl.UnmarshalAny(pi.Info)
if err != nil {
return nil, errors.Wrap(err, "unable to decode process details")
}
s, err := summaryFromInterface(i)
if err != nil {
return nil, err
}
infos = append(infos, *s)
}
return infos, nil
}
type restoredProcess struct {
p containerd.Process
}
func (p *restoredProcess) Delete(ctx context.Context) (uint32, time.Time, error) {
if p.p == nil {
return 255, time.Now(), nil
}
status, err := p.p.Delete(ctx)
if err != nil {
return 255, time.Now(), nil
}
return status.ExitCode(), status.ExitTime(), nil
}
func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return 255, time.Now(), nil
}
status, err := p.Delete(ctx)
if err != nil {
return 255, time.Now(), nil
}
return status.ExitCode(), status.ExitTime(), nil
}
func (c *client) Delete(ctx context.Context, containerID string) error {
ctr, err := c.getContainer(ctx, containerID)
if err != nil {
return err
}
labels, err := ctr.Labels(ctx)
if err != nil {
return err
}
bundle := labels[DockerContainerBundlePath]
if err := ctr.Delete(ctx); err != nil {
return wrapError(err)
}
c.oomMu.Lock()
delete(c.oom, containerID)
c.oomMu.Unlock()
c.v2runcoptionsMu.Lock()
delete(c.v2runcoptions, containerID)
c.v2runcoptionsMu.Unlock()
if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
if err := os.RemoveAll(bundle); err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"container": containerID,
"bundle": bundle,
}).Error("failed to remove state dir")
}
}
return nil
}
func (c *client) Status(ctx context.Context, containerID string) (containerd.ProcessStatus, error) {
t, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return containerd.Unknown, err
}
s, err := t.Status(ctx)
if err != nil {
return containerd.Unknown, wrapError(err)
}
return s.Status, nil
}
func (c *client) getCheckpointOptions(id string, exit bool) containerd.CheckpointTaskOpts {
return func(r *containerd.CheckpointTaskInfo) error {
if r.Options == nil {
c.v2runcoptionsMu.Lock()
_, isV2 := c.v2runcoptions[id]
c.v2runcoptionsMu.Unlock()
if isV2 {
r.Options = &v2runcoptions.CheckpointOptions{Exit: exit}
} else {
r.Options = &runctypes.CheckpointOptions{Exit: exit}
}
return nil
}
switch opts := r.Options.(type) {
case *v2runcoptions.CheckpointOptions:
opts.Exit = exit
case *runctypes.CheckpointOptions:
opts.Exit = exit
}
return nil
}
}
func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return err
}
opts := []containerd.CheckpointTaskOpts{c.getCheckpointOptions(containerID, exit)}
img, err := p.(containerd.Task).Checkpoint(ctx, opts...)
if err != nil {
return wrapError(err)
}
// Whatever happens, delete the checkpoint from containerd
defer func() {
err := c.client.ImageService().Delete(context.Background(), img.Name())
if err != nil {
c.logger.WithError(err).WithField("digest", img.Target().Digest).
Warnf("failed to delete checkpoint image")
}
}()
b, err := content.ReadBlob(ctx, c.client.ContentStore(), img.Target())
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
}
var index v1.Index
if err := json.Unmarshal(b, &index); err != nil {
return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
}
var cpDesc *v1.Descriptor
for _, m := range index.Manifests {
m := m
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
cpDesc = &m // nolint:gosec
break
}
}
if cpDesc == nil {
return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
}
rat, err := c.client.ContentStore().ReaderAt(ctx, *cpDesc)
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
}
defer rat.Close()
_, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
}
return err
}
func (c *client) getContainer(ctx context.Context, id string) (containerd.Container, error) {
ctr, err := c.client.LoadContainer(ctx, id)
if err != nil {
if containerderrors.IsNotFound(err) {
return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
}
return nil, wrapError(err)
}
return ctr, nil
}
func (c *client) getProcess(ctx context.Context, containerID, processID string) (containerd.Process, error) {
ctr, err := c.getContainer(ctx, containerID)
if err != nil {
return nil, err
}
t, err := ctr.Task(ctx, nil)
if err != nil {
if containerderrors.IsNotFound(err) {
return nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running")))
}
return nil, wrapError(err)
}
if processID == libcontainerdtypes.InitProcessName {
return t, nil
}
p, err := t.LoadProcess(ctx, processID, nil)
if err != nil {
if containerderrors.IsNotFound(err) {
return nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec")))
}
return nil, wrapError(err)
}
return p, nil
}
// createIO creates the io to be used by a process
// This needs to get a pointer to interface as upon closure the process may not have yet been registered
func (c *client) createIO(fifos *cio.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) {
var (
io *cio.DirectIO
err error
)
io, err = c.newDirectIO(context.Background(), fifos)
if err != nil {
return nil, err
}
if io.Stdin != nil {
var (
err error
stdinOnce sync.Once
)
pipe := io.Stdin
io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
stdinOnce.Do(func() {
err = pipe.Close()
// Do the rest in a new routine to avoid a deadlock if the
// Exec/Start call failed.
go func() {
<-stdinCloseSync
p, err := c.getProcess(context.Background(), containerID, processID)
if err == nil {
err = p.CloseIO(context.Background(), containerd.WithStdinCloser)
if err != nil && strings.Contains(err.Error(), "transport is closing") {
err = nil
}
}
}()
})
return err
})
}
rio, err := attachStdio(io)
if err != nil {
io.Cancel()
io.Close()
}
return rio, err
}
func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) {
c.eventQ.Append(ei.ContainerID, func() {
err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"container": ei.ContainerID,
"event": et,
"event-info": ei,
}).Error("failed to process event")
}
if et == libcontainerdtypes.EventExit && ei.ProcessID != ei.ContainerID {
p, err := c.getProcess(ctx, ei.ContainerID, ei.ProcessID)
if err != nil {
c.logger.WithError(errors.New("no such process")).
WithFields(logrus.Fields{
"error": err,
"container": ei.ContainerID,
"process": ei.ProcessID,
}).Error("exit event")
return
}
ctr, err := c.getContainer(ctx, ei.ContainerID)
if err != nil {
c.logger.WithFields(logrus.Fields{
"container": ei.ContainerID,
"error": err,
}).Error("failed to find container")
} else {
labels, err := ctr.Labels(ctx)
if err != nil {
c.logger.WithFields(logrus.Fields{
"container": ei.ContainerID,
"error": err,
}).Error("failed to get container labels")
return
}
newFIFOSet(labels[DockerContainerBundlePath], ei.ProcessID, true, false).Close()
}
_, err = p.Delete(context.Background())
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"container": ei.ContainerID,
"process": ei.ProcessID,
}).Warn("failed to delete process")
}
}
})
}
func (c *client) waitServe(ctx context.Context) bool {
t := 100 * time.Millisecond
delay := time.NewTimer(t)
if !delay.Stop() {
<-delay.C
}
defer delay.Stop()
// `IsServing` will actually block until the service is ready.
// However it can return early, so we'll loop with a delay to handle it.
for {
serving, err := c.client.IsServing(ctx)
if err != nil {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
return false
}
logrus.WithError(err).Warn("Error while testing if containerd API is ready")
}
if serving {
return true
}
delay.Reset(t)
select {
case <-ctx.Done():
return false
case <-delay.C:
}
}
}
func (c *client) processEventStream(ctx context.Context, ns string) {
var (
err error
ev *events.Envelope
et libcontainerdtypes.EventType
ei libcontainerdtypes.EventInfo
)
// Create a new context specifically for this subscription.
// The context must be cancelled to cancel the subscription.
// In cases where we have to restart event stream processing,
// we'll need the original context b/c this one will be cancelled
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
// Filter on both namespace *and* topic. To create an "and" filter,
// this must be a single, comma-separated string
eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|")
c.logger.Debug("processing event stream")
for {
var oomKilled bool
select {
case err = <-errC:
if err != nil {
errStatus, ok := status.FromError(err)
if !ok || errStatus.Code() != codes.Canceled {
c.logger.WithError(err).Error("Failed to get event")
c.logger.Info("Waiting for containerd to be ready to restart event processing")
if c.waitServe(ctx) {
go c.processEventStream(ctx, ns)
return
}
}
c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown")
}
return
case ev = <-eventStream:
if ev.Event == nil {
c.logger.WithField("event", ev).Warn("invalid event")
continue
}
v, err := typeurl.UnmarshalAny(ev.Event)
if err != nil {
c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
continue
}
c.logger.WithField("topic", ev.Topic).Debug("event")
switch t := v.(type) {
case *apievents.TaskCreate:
et = libcontainerdtypes.EventCreate
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ContainerID,
Pid: t.Pid,
}
case *apievents.TaskStart:
et = libcontainerdtypes.EventStart
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ContainerID,
Pid: t.Pid,
}
case *apievents.TaskExit:
et = libcontainerdtypes.EventExit
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ID,
Pid: t.Pid,
ExitCode: t.ExitStatus,
ExitedAt: t.ExitedAt,
}
case *apievents.TaskOOM:
et = libcontainerdtypes.EventOOM
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
OOMKilled: true,
}
oomKilled = true
case *apievents.TaskExecAdded:
et = libcontainerdtypes.EventExecAdded
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ExecID,
}
case *apievents.TaskExecStarted:
et = libcontainerdtypes.EventExecStarted
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ExecID,
Pid: t.Pid,
}
case *apievents.TaskPaused:
et = libcontainerdtypes.EventPaused
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
}
case *apievents.TaskResumed:
et = libcontainerdtypes.EventResumed
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
}
case *apievents.TaskDelete:
c.logger.WithFields(logrus.Fields{
"topic": ev.Topic,
"type": reflect.TypeOf(t),
"container": t.ContainerID},
).Info("ignoring event")
continue
default:
c.logger.WithFields(logrus.Fields{
"topic": ev.Topic,
"type": reflect.TypeOf(t)},
).Info("ignoring event")
continue
}
c.oomMu.Lock()
if oomKilled {
c.oom[ei.ContainerID] = true
}
ei.OOMKilled = c.oom[ei.ContainerID]
c.oomMu.Unlock()
c.processEvent(ctx, et, ei)
}
}
}
func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
if err != nil {
return nil, err
}
defer writer.Close()
size, err := io.Copy(writer, r)
if err != nil {
return nil, err
}
labels := map[string]string{
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
}
if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
return nil, err
}
return &types.Descriptor{
MediaType: mediaType,
Digest: writer.Digest(),
Size_: size,
}, nil
}
func (c *client) bundleDir(id string) string {
return filepath.Join(c.stateDir, id)
}
func wrapError(err error) error {
switch {
case err == nil:
return nil
case containerderrors.IsNotFound(err):
return errdefs.NotFound(err)
}
msg := err.Error()
for _, s := range []string{"container does not exist", "not found", "no such container"} {
if strings.Contains(msg, s) {
return errdefs.NotFound(err)
}
}
return err
}
| thaJeztah | 5e62ca1a05f2aab49066d91c9070cb7dc7ad2461 | 2773f81aa5e9e34733675a7aa7e4219391caccb0 | nit: `m := m` would work just as well here and wouldn't require a variable called `m2` | samuelkarp | 4,752 |
moby/moby | 42,445 | [testing] ~update~ fix linting issues found by golangci-lint v1.40.1 | Splitting this from https://github.com/moby/moby/pull/42303, which needs some work (use a custom action instead of the default one; similar to buildkit CI)
This ~updates~ fixes linting issues found by the latest golangci-lint (v1.40.1) | null | 2021-05-31 12:10:41+00:00 | 2021-06-16 20:15:01+00:00 | libcontainerd/remote/client.go | package remote // import "github.com/docker/docker/libcontainerd/remote"
import (
"context"
"encoding/json"
"io"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/containerd/containerd"
apievents "github.com/containerd/containerd/api/events"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/archive"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/content"
containerderrors "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/events"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/runtime/linux/runctypes"
v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
"github.com/containerd/typeurl"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libcontainerd/queue"
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
"github.com/docker/docker/pkg/ioutils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// DockerContainerBundlePath is the label key pointing to the container's bundle path
const DockerContainerBundlePath = "com.docker/engine.bundle.path"
type client struct {
client *containerd.Client
stateDir string
logger *logrus.Entry
ns string
backend libcontainerdtypes.Backend
eventQ queue.Queue
oomMu sync.Mutex
oom map[string]bool
v2runcoptionsMu sync.Mutex
// v2runcoptions is used for copying options specified on Create() to Start()
v2runcoptions map[string]v2runcoptions.Options
}
// NewClient creates a new libcontainerd client from a containerd client
func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
c := &client{
client: cli,
stateDir: stateDir,
logger: logrus.WithField("module", "libcontainerd").WithField("namespace", ns),
ns: ns,
backend: b,
oom: make(map[string]bool),
v2runcoptions: make(map[string]v2runcoptions.Options),
}
go c.processEventStream(ctx, ns)
return c, nil
}
func (c *client) Version(ctx context.Context) (containerd.Version, error) {
return c.client.Version(ctx)
}
// Restore loads the containerd container.
// It should not be called concurrently with any other operation for the given ID.
func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (alive bool, pid int, p libcontainerdtypes.Process, err error) {
var dio *cio.DirectIO
defer func() {
if err != nil && dio != nil {
dio.Cancel()
dio.Close()
}
err = wrapError(err)
}()
ctr, err := c.client.LoadContainer(ctx, id)
if err != nil {
return false, -1, nil, errors.WithStack(wrapError(err))
}
attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
// dio must be assigned to the previously defined dio for the defer above
// to handle cleanup
dio, err = c.newDirectIO(ctx, fifos)
if err != nil {
return nil, err
}
return attachStdio(dio)
}
t, err := ctr.Task(ctx, attachIO)
if err != nil && !containerderrors.IsNotFound(err) {
return false, -1, nil, errors.Wrap(wrapError(err), "error getting containerd task for container")
}
if t != nil {
s, err := t.Status(ctx)
if err != nil {
return false, -1, nil, errors.Wrap(wrapError(err), "error getting task status")
}
alive = s.Status != containerd.Stopped
pid = int(t.Pid())
}
c.logger.WithFields(logrus.Fields{
"container": id,
"alive": alive,
"pid": pid,
}).Debug("restored container")
return alive, pid, &restoredProcess{
p: t,
}, nil
}
func (c *client) Create(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
bdir := c.bundleDir(id)
c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
newOpts := []containerd.NewContainerOpts{
containerd.WithSpec(ociSpec),
containerd.WithRuntime(shim, runtimeOptions),
WithBundle(bdir, ociSpec),
}
opts = append(opts, newOpts...)
_, err := c.client.NewContainer(ctx, id, opts...)
if err != nil {
if containerderrors.IsAlreadyExists(err) {
return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
}
return wrapError(err)
}
if x, ok := runtimeOptions.(*v2runcoptions.Options); ok {
c.v2runcoptionsMu.Lock()
c.v2runcoptions[id] = *x
c.v2runcoptionsMu.Unlock()
}
return nil
}
// Start create and start a task for the specified containerd id
func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
ctr, err := c.getContainer(ctx, id)
if err != nil {
return -1, err
}
var (
cp *types.Descriptor
t containerd.Task
rio cio.IO
stdinCloseSync = make(chan struct{})
)
if checkpointDir != "" {
// write checkpoint to the content store
tar := archive.Diff(ctx, "", checkpointDir)
cp, err = c.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
// remove the checkpoint when we're done
defer func() {
if cp != nil {
err := c.client.ContentStore().Delete(context.Background(), cp.Digest)
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"ref": checkpointDir,
"digest": cp.Digest,
}).Warnf("failed to delete temporary checkpoint entry")
}
}
}()
if err := tar.Close(); err != nil {
return -1, errors.Wrap(err, "failed to close checkpoint tar stream")
}
if err != nil {
return -1, errors.Wrapf(err, "failed to upload checkpoint to containerd")
}
}
spec, err := ctr.Spec(ctx)
if err != nil {
return -1, errors.Wrap(err, "failed to retrieve spec")
}
labels, err := ctr.Labels(ctx)
if err != nil {
return -1, errors.Wrap(err, "failed to retrieve labels")
}
bundle := labels[DockerContainerBundlePath]
uid, gid := getSpecUser(spec)
taskOpts := []containerd.NewTaskOpts{
func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
info.Checkpoint = cp
return nil
},
}
if runtime.GOOS != "windows" {
taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
c.v2runcoptionsMu.Lock()
opts, ok := c.v2runcoptions[id]
c.v2runcoptionsMu.Unlock()
if ok {
opts.IoUid = uint32(uid)
opts.IoGid = uint32(gid)
info.Options = &opts
} else {
info.Options = &runctypes.CreateOptions{
IoUid: uint32(uid),
IoGid: uint32(gid),
NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "",
}
}
return nil
})
} else {
taskOpts = append(taskOpts, withLogLevel(c.logger.Level))
}
t, err = ctr.NewTask(ctx,
func(id string) (cio.IO, error) {
fifos := newFIFOSet(bundle, libcontainerdtypes.InitProcessName, withStdin, spec.Process.Terminal)
rio, err = c.createIO(fifos, id, libcontainerdtypes.InitProcessName, stdinCloseSync, attachStdio)
return rio, err
},
taskOpts...,
)
if err != nil {
close(stdinCloseSync)
if rio != nil {
rio.Cancel()
rio.Close()
}
return -1, wrapError(err)
}
// Signal c.createIO that it can call CloseIO
close(stdinCloseSync)
if err := t.Start(ctx); err != nil {
if _, err := t.Delete(ctx); err != nil {
c.logger.WithError(err).WithField("container", id).
Error("failed to delete task after fail start")
}
return -1, wrapError(err)
}
return int(t.Pid()), nil
}
// Exec creates exec process.
//
// The containerd client calls Exec to register the exec config in the shim side.
// When the client calls Start, the shim will create stdin fifo if needs. But
// for the container main process, the stdin fifo will be created in Create not
// the Start call. stdinCloseSync channel should be closed after Start exec
// process.
func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
ctr, err := c.getContainer(ctx, containerID)
if err != nil {
return -1, err
}
t, err := ctr.Task(ctx, nil)
if err != nil {
if containerderrors.IsNotFound(err) {
return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running")))
}
return -1, wrapError(err)
}
var (
p containerd.Process
rio cio.IO
stdinCloseSync = make(chan struct{})
)
labels, err := ctr.Labels(ctx)
if err != nil {
return -1, wrapError(err)
}
fifos := newFIFOSet(labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal)
defer func() {
if err != nil {
if rio != nil {
rio.Cancel()
rio.Close()
}
}
}()
p, err = t.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio)
return rio, err
})
if err != nil {
close(stdinCloseSync)
if containerderrors.IsAlreadyExists(err) {
return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
}
return -1, wrapError(err)
}
// Signal c.createIO that it can call CloseIO
//
// the stdin of exec process will be created after p.Start in containerd
defer close(stdinCloseSync)
if err = p.Start(ctx); err != nil {
// use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
// we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
// older containerd-shim
ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
defer cancel()
p.Delete(ctx)
return -1, wrapError(err)
}
return int(p.Pid()), nil
}
func (c *client) SignalProcess(ctx context.Context, containerID, processID string, signal int) error {
p, err := c.getProcess(ctx, containerID, processID)
if err != nil {
return err
}
return wrapError(p.Kill(ctx, syscall.Signal(signal)))
}
func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error {
p, err := c.getProcess(ctx, containerID, processID)
if err != nil {
return err
}
return p.Resize(ctx, uint32(width), uint32(height))
}
func (c *client) CloseStdin(ctx context.Context, containerID, processID string) error {
p, err := c.getProcess(ctx, containerID, processID)
if err != nil {
return err
}
return p.CloseIO(ctx, containerd.WithStdinCloser)
}
func (c *client) Pause(ctx context.Context, containerID string) error {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return err
}
return wrapError(p.(containerd.Task).Pause(ctx))
}
func (c *client) Resume(ctx context.Context, containerID string) error {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return err
}
return p.(containerd.Task).Resume(ctx)
}
func (c *client) Stats(ctx context.Context, containerID string) (*libcontainerdtypes.Stats, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return nil, err
}
m, err := p.(containerd.Task).Metrics(ctx)
if err != nil {
return nil, err
}
v, err := typeurl.UnmarshalAny(m.Data)
if err != nil {
return nil, err
}
return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil
}
func (c *client) ListPids(ctx context.Context, containerID string) ([]uint32, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return nil, err
}
pis, err := p.(containerd.Task).Pids(ctx)
if err != nil {
return nil, err
}
var pids []uint32
for _, i := range pis {
pids = append(pids, i.Pid)
}
return pids, nil
}
func (c *client) Summary(ctx context.Context, containerID string) ([]libcontainerdtypes.Summary, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return nil, err
}
pis, err := p.(containerd.Task).Pids(ctx)
if err != nil {
return nil, err
}
var infos []libcontainerdtypes.Summary
for _, pi := range pis {
i, err := typeurl.UnmarshalAny(pi.Info)
if err != nil {
return nil, errors.Wrap(err, "unable to decode process details")
}
s, err := summaryFromInterface(i)
if err != nil {
return nil, err
}
infos = append(infos, *s)
}
return infos, nil
}
type restoredProcess struct {
p containerd.Process
}
func (p *restoredProcess) Delete(ctx context.Context) (uint32, time.Time, error) {
if p.p == nil {
return 255, time.Now(), nil
}
status, err := p.p.Delete(ctx)
if err != nil {
return 255, time.Now(), nil
}
return status.ExitCode(), status.ExitTime(), nil
}
func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return 255, time.Now(), nil
}
status, err := p.Delete(ctx)
if err != nil {
return 255, time.Now(), nil
}
return status.ExitCode(), status.ExitTime(), nil
}
func (c *client) Delete(ctx context.Context, containerID string) error {
ctr, err := c.getContainer(ctx, containerID)
if err != nil {
return err
}
labels, err := ctr.Labels(ctx)
if err != nil {
return err
}
bundle := labels[DockerContainerBundlePath]
if err := ctr.Delete(ctx); err != nil {
return wrapError(err)
}
c.oomMu.Lock()
delete(c.oom, containerID)
c.oomMu.Unlock()
c.v2runcoptionsMu.Lock()
delete(c.v2runcoptions, containerID)
c.v2runcoptionsMu.Unlock()
if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
if err := os.RemoveAll(bundle); err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"container": containerID,
"bundle": bundle,
}).Error("failed to remove state dir")
}
}
return nil
}
func (c *client) Status(ctx context.Context, containerID string) (containerd.ProcessStatus, error) {
t, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return containerd.Unknown, err
}
s, err := t.Status(ctx)
if err != nil {
return containerd.Unknown, wrapError(err)
}
return s.Status, nil
}
func (c *client) getCheckpointOptions(id string, exit bool) containerd.CheckpointTaskOpts {
return func(r *containerd.CheckpointTaskInfo) error {
if r.Options == nil {
c.v2runcoptionsMu.Lock()
_, isV2 := c.v2runcoptions[id]
c.v2runcoptionsMu.Unlock()
if isV2 {
r.Options = &v2runcoptions.CheckpointOptions{Exit: exit}
} else {
r.Options = &runctypes.CheckpointOptions{Exit: exit}
}
return nil
}
switch opts := r.Options.(type) {
case *v2runcoptions.CheckpointOptions:
opts.Exit = exit
case *runctypes.CheckpointOptions:
opts.Exit = exit
}
return nil
}
}
func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return err
}
opts := []containerd.CheckpointTaskOpts{c.getCheckpointOptions(containerID, exit)}
img, err := p.(containerd.Task).Checkpoint(ctx, opts...)
if err != nil {
return wrapError(err)
}
// Whatever happens, delete the checkpoint from containerd
defer func() {
err := c.client.ImageService().Delete(context.Background(), img.Name())
if err != nil {
c.logger.WithError(err).WithField("digest", img.Target().Digest).
Warnf("failed to delete checkpoint image")
}
}()
b, err := content.ReadBlob(ctx, c.client.ContentStore(), img.Target())
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
}
var index v1.Index
if err := json.Unmarshal(b, &index); err != nil {
return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
}
var cpDesc *v1.Descriptor
for _, m := range index.Manifests {
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
cpDesc = &m // nolint:gosec
break
}
}
if cpDesc == nil {
return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
}
rat, err := c.client.ContentStore().ReaderAt(ctx, *cpDesc)
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
}
defer rat.Close()
_, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
}
return err
}
func (c *client) getContainer(ctx context.Context, id string) (containerd.Container, error) {
ctr, err := c.client.LoadContainer(ctx, id)
if err != nil {
if containerderrors.IsNotFound(err) {
return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
}
return nil, wrapError(err)
}
return ctr, nil
}
func (c *client) getProcess(ctx context.Context, containerID, processID string) (containerd.Process, error) {
ctr, err := c.getContainer(ctx, containerID)
if err != nil {
return nil, err
}
t, err := ctr.Task(ctx, nil)
if err != nil {
if containerderrors.IsNotFound(err) {
return nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running")))
}
return nil, wrapError(err)
}
if processID == libcontainerdtypes.InitProcessName {
return t, nil
}
p, err := t.LoadProcess(ctx, processID, nil)
if err != nil {
if containerderrors.IsNotFound(err) {
return nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec")))
}
return nil, wrapError(err)
}
return p, nil
}
// createIO creates the io to be used by a process
// This needs to get a pointer to interface as upon closure the process may not have yet been registered
func (c *client) createIO(fifos *cio.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) {
var (
io *cio.DirectIO
err error
)
io, err = c.newDirectIO(context.Background(), fifos)
if err != nil {
return nil, err
}
if io.Stdin != nil {
var (
err error
stdinOnce sync.Once
)
pipe := io.Stdin
io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
stdinOnce.Do(func() {
err = pipe.Close()
// Do the rest in a new routine to avoid a deadlock if the
// Exec/Start call failed.
go func() {
<-stdinCloseSync
p, err := c.getProcess(context.Background(), containerID, processID)
if err == nil {
err = p.CloseIO(context.Background(), containerd.WithStdinCloser)
if err != nil && strings.Contains(err.Error(), "transport is closing") {
err = nil
}
}
}()
})
return err
})
}
rio, err := attachStdio(io)
if err != nil {
io.Cancel()
io.Close()
}
return rio, err
}
func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) {
c.eventQ.Append(ei.ContainerID, func() {
err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"container": ei.ContainerID,
"event": et,
"event-info": ei,
}).Error("failed to process event")
}
if et == libcontainerdtypes.EventExit && ei.ProcessID != ei.ContainerID {
p, err := c.getProcess(ctx, ei.ContainerID, ei.ProcessID)
if err != nil {
c.logger.WithError(errors.New("no such process")).
WithFields(logrus.Fields{
"error": err,
"container": ei.ContainerID,
"process": ei.ProcessID,
}).Error("exit event")
return
}
ctr, err := c.getContainer(ctx, ei.ContainerID)
if err != nil {
c.logger.WithFields(logrus.Fields{
"container": ei.ContainerID,
"error": err,
}).Error("failed to find container")
} else {
labels, err := ctr.Labels(ctx)
if err != nil {
c.logger.WithFields(logrus.Fields{
"container": ei.ContainerID,
"error": err,
}).Error("failed to get container labels")
return
}
newFIFOSet(labels[DockerContainerBundlePath], ei.ProcessID, true, false).Close()
}
_, err = p.Delete(context.Background())
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"container": ei.ContainerID,
"process": ei.ProcessID,
}).Warn("failed to delete process")
}
}
})
}
func (c *client) waitServe(ctx context.Context) bool {
t := 100 * time.Millisecond
delay := time.NewTimer(t)
if !delay.Stop() {
<-delay.C
}
defer delay.Stop()
// `IsServing` will actually block until the service is ready.
// However it can return early, so we'll loop with a delay to handle it.
for {
serving, err := c.client.IsServing(ctx)
if err != nil {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
return false
}
logrus.WithError(err).Warn("Error while testing if containerd API is ready")
}
if serving {
return true
}
delay.Reset(t)
select {
case <-ctx.Done():
return false
case <-delay.C:
}
}
}
func (c *client) processEventStream(ctx context.Context, ns string) {
var (
err error
ev *events.Envelope
et libcontainerdtypes.EventType
ei libcontainerdtypes.EventInfo
)
// Create a new context specifically for this subscription.
// The context must be cancelled to cancel the subscription.
// In cases where we have to restart event stream processing,
// we'll need the original context b/c this one will be cancelled
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
// Filter on both namespace *and* topic. To create an "and" filter,
// this must be a single, comma-separated string
eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|")
c.logger.Debug("processing event stream")
for {
var oomKilled bool
select {
case err = <-errC:
if err != nil {
errStatus, ok := status.FromError(err)
if !ok || errStatus.Code() != codes.Canceled {
c.logger.WithError(err).Error("Failed to get event")
c.logger.Info("Waiting for containerd to be ready to restart event processing")
if c.waitServe(ctx) {
go c.processEventStream(ctx, ns)
return
}
}
c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown")
}
return
case ev = <-eventStream:
if ev.Event == nil {
c.logger.WithField("event", ev).Warn("invalid event")
continue
}
v, err := typeurl.UnmarshalAny(ev.Event)
if err != nil {
c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
continue
}
c.logger.WithField("topic", ev.Topic).Debug("event")
switch t := v.(type) {
case *apievents.TaskCreate:
et = libcontainerdtypes.EventCreate
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ContainerID,
Pid: t.Pid,
}
case *apievents.TaskStart:
et = libcontainerdtypes.EventStart
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ContainerID,
Pid: t.Pid,
}
case *apievents.TaskExit:
et = libcontainerdtypes.EventExit
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ID,
Pid: t.Pid,
ExitCode: t.ExitStatus,
ExitedAt: t.ExitedAt,
}
case *apievents.TaskOOM:
et = libcontainerdtypes.EventOOM
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
OOMKilled: true,
}
oomKilled = true
case *apievents.TaskExecAdded:
et = libcontainerdtypes.EventExecAdded
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ExecID,
}
case *apievents.TaskExecStarted:
et = libcontainerdtypes.EventExecStarted
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ExecID,
Pid: t.Pid,
}
case *apievents.TaskPaused:
et = libcontainerdtypes.EventPaused
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
}
case *apievents.TaskResumed:
et = libcontainerdtypes.EventResumed
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
}
case *apievents.TaskDelete:
c.logger.WithFields(logrus.Fields{
"topic": ev.Topic,
"type": reflect.TypeOf(t),
"container": t.ContainerID},
).Info("ignoring event")
continue
default:
c.logger.WithFields(logrus.Fields{
"topic": ev.Topic,
"type": reflect.TypeOf(t)},
).Info("ignoring event")
continue
}
c.oomMu.Lock()
if oomKilled {
c.oom[ei.ContainerID] = true
}
ei.OOMKilled = c.oom[ei.ContainerID]
c.oomMu.Unlock()
c.processEvent(ctx, et, ei)
}
}
}
func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
if err != nil {
return nil, err
}
defer writer.Close()
size, err := io.Copy(writer, r)
if err != nil {
return nil, err
}
labels := map[string]string{
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
}
if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
return nil, err
}
return &types.Descriptor{
MediaType: mediaType,
Digest: writer.Digest(),
Size_: size,
}, nil
}
func (c *client) bundleDir(id string) string {
return filepath.Join(c.stateDir, id)
}
func wrapError(err error) error {
switch {
case err == nil:
return nil
case containerderrors.IsNotFound(err):
return errdefs.NotFound(err)
}
msg := err.Error()
for _, s := range []string{"container does not exist", "not found", "no such container"} {
if strings.Contains(msg, s) {
return errdefs.NotFound(err)
}
}
return err
}
| package remote // import "github.com/docker/docker/libcontainerd/remote"
import (
"context"
"encoding/json"
"io"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/containerd/containerd"
apievents "github.com/containerd/containerd/api/events"
"github.com/containerd/containerd/api/types"
"github.com/containerd/containerd/archive"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/content"
containerderrors "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/events"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/runtime/linux/runctypes"
v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
"github.com/containerd/typeurl"
"github.com/docker/docker/errdefs"
"github.com/docker/docker/libcontainerd/queue"
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
"github.com/docker/docker/pkg/ioutils"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// DockerContainerBundlePath is the label key pointing to the container's bundle path
const DockerContainerBundlePath = "com.docker/engine.bundle.path"
type client struct {
client *containerd.Client
stateDir string
logger *logrus.Entry
ns string
backend libcontainerdtypes.Backend
eventQ queue.Queue
oomMu sync.Mutex
oom map[string]bool
v2runcoptionsMu sync.Mutex
// v2runcoptions is used for copying options specified on Create() to Start()
v2runcoptions map[string]v2runcoptions.Options
}
// NewClient creates a new libcontainerd client from a containerd client
func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
c := &client{
client: cli,
stateDir: stateDir,
logger: logrus.WithField("module", "libcontainerd").WithField("namespace", ns),
ns: ns,
backend: b,
oom: make(map[string]bool),
v2runcoptions: make(map[string]v2runcoptions.Options),
}
go c.processEventStream(ctx, ns)
return c, nil
}
func (c *client) Version(ctx context.Context) (containerd.Version, error) {
return c.client.Version(ctx)
}
// Restore loads the containerd container.
// It should not be called concurrently with any other operation for the given ID.
func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (alive bool, pid int, p libcontainerdtypes.Process, err error) {
var dio *cio.DirectIO
defer func() {
if err != nil && dio != nil {
dio.Cancel()
dio.Close()
}
err = wrapError(err)
}()
ctr, err := c.client.LoadContainer(ctx, id)
if err != nil {
return false, -1, nil, errors.WithStack(wrapError(err))
}
attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
// dio must be assigned to the previously defined dio for the defer above
// to handle cleanup
dio, err = c.newDirectIO(ctx, fifos)
if err != nil {
return nil, err
}
return attachStdio(dio)
}
t, err := ctr.Task(ctx, attachIO)
if err != nil && !containerderrors.IsNotFound(err) {
return false, -1, nil, errors.Wrap(wrapError(err), "error getting containerd task for container")
}
if t != nil {
s, err := t.Status(ctx)
if err != nil {
return false, -1, nil, errors.Wrap(wrapError(err), "error getting task status")
}
alive = s.Status != containerd.Stopped
pid = int(t.Pid())
}
c.logger.WithFields(logrus.Fields{
"container": id,
"alive": alive,
"pid": pid,
}).Debug("restored container")
return alive, pid, &restoredProcess{
p: t,
}, nil
}
func (c *client) Create(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
bdir := c.bundleDir(id)
c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
newOpts := []containerd.NewContainerOpts{
containerd.WithSpec(ociSpec),
containerd.WithRuntime(shim, runtimeOptions),
WithBundle(bdir, ociSpec),
}
opts = append(opts, newOpts...)
_, err := c.client.NewContainer(ctx, id, opts...)
if err != nil {
if containerderrors.IsAlreadyExists(err) {
return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
}
return wrapError(err)
}
if x, ok := runtimeOptions.(*v2runcoptions.Options); ok {
c.v2runcoptionsMu.Lock()
c.v2runcoptions[id] = *x
c.v2runcoptionsMu.Unlock()
}
return nil
}
// Start create and start a task for the specified containerd id
func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
ctr, err := c.getContainer(ctx, id)
if err != nil {
return -1, err
}
var (
cp *types.Descriptor
t containerd.Task
rio cio.IO
stdinCloseSync = make(chan struct{})
)
if checkpointDir != "" {
// write checkpoint to the content store
tar := archive.Diff(ctx, "", checkpointDir)
cp, err = c.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
// remove the checkpoint when we're done
defer func() {
if cp != nil {
err := c.client.ContentStore().Delete(context.Background(), cp.Digest)
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"ref": checkpointDir,
"digest": cp.Digest,
}).Warnf("failed to delete temporary checkpoint entry")
}
}
}()
if err := tar.Close(); err != nil {
return -1, errors.Wrap(err, "failed to close checkpoint tar stream")
}
if err != nil {
return -1, errors.Wrapf(err, "failed to upload checkpoint to containerd")
}
}
spec, err := ctr.Spec(ctx)
if err != nil {
return -1, errors.Wrap(err, "failed to retrieve spec")
}
labels, err := ctr.Labels(ctx)
if err != nil {
return -1, errors.Wrap(err, "failed to retrieve labels")
}
bundle := labels[DockerContainerBundlePath]
uid, gid := getSpecUser(spec)
taskOpts := []containerd.NewTaskOpts{
func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
info.Checkpoint = cp
return nil
},
}
if runtime.GOOS != "windows" {
taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
c.v2runcoptionsMu.Lock()
opts, ok := c.v2runcoptions[id]
c.v2runcoptionsMu.Unlock()
if ok {
opts.IoUid = uint32(uid)
opts.IoGid = uint32(gid)
info.Options = &opts
} else {
info.Options = &runctypes.CreateOptions{
IoUid: uint32(uid),
IoGid: uint32(gid),
NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "",
}
}
return nil
})
} else {
taskOpts = append(taskOpts, withLogLevel(c.logger.Level))
}
t, err = ctr.NewTask(ctx,
func(id string) (cio.IO, error) {
fifos := newFIFOSet(bundle, libcontainerdtypes.InitProcessName, withStdin, spec.Process.Terminal)
rio, err = c.createIO(fifos, id, libcontainerdtypes.InitProcessName, stdinCloseSync, attachStdio)
return rio, err
},
taskOpts...,
)
if err != nil {
close(stdinCloseSync)
if rio != nil {
rio.Cancel()
rio.Close()
}
return -1, wrapError(err)
}
// Signal c.createIO that it can call CloseIO
close(stdinCloseSync)
if err := t.Start(ctx); err != nil {
if _, err := t.Delete(ctx); err != nil {
c.logger.WithError(err).WithField("container", id).
Error("failed to delete task after fail start")
}
return -1, wrapError(err)
}
return int(t.Pid()), nil
}
// Exec creates exec process.
//
// The containerd client calls Exec to register the exec config in the shim side.
// When the client calls Start, the shim will create stdin fifo if needs. But
// for the container main process, the stdin fifo will be created in Create not
// the Start call. stdinCloseSync channel should be closed after Start exec
// process.
func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
ctr, err := c.getContainer(ctx, containerID)
if err != nil {
return -1, err
}
t, err := ctr.Task(ctx, nil)
if err != nil {
if containerderrors.IsNotFound(err) {
return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running")))
}
return -1, wrapError(err)
}
var (
p containerd.Process
rio cio.IO
stdinCloseSync = make(chan struct{})
)
labels, err := ctr.Labels(ctx)
if err != nil {
return -1, wrapError(err)
}
fifos := newFIFOSet(labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal)
defer func() {
if err != nil {
if rio != nil {
rio.Cancel()
rio.Close()
}
}
}()
p, err = t.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio)
return rio, err
})
if err != nil {
close(stdinCloseSync)
if containerderrors.IsAlreadyExists(err) {
return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
}
return -1, wrapError(err)
}
// Signal c.createIO that it can call CloseIO
//
// the stdin of exec process will be created after p.Start in containerd
defer close(stdinCloseSync)
if err = p.Start(ctx); err != nil {
// use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
// we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
// older containerd-shim
ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
defer cancel()
p.Delete(ctx)
return -1, wrapError(err)
}
return int(p.Pid()), nil
}
func (c *client) SignalProcess(ctx context.Context, containerID, processID string, signal int) error {
p, err := c.getProcess(ctx, containerID, processID)
if err != nil {
return err
}
return wrapError(p.Kill(ctx, syscall.Signal(signal)))
}
func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error {
p, err := c.getProcess(ctx, containerID, processID)
if err != nil {
return err
}
return p.Resize(ctx, uint32(width), uint32(height))
}
func (c *client) CloseStdin(ctx context.Context, containerID, processID string) error {
p, err := c.getProcess(ctx, containerID, processID)
if err != nil {
return err
}
return p.CloseIO(ctx, containerd.WithStdinCloser)
}
func (c *client) Pause(ctx context.Context, containerID string) error {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return err
}
return wrapError(p.(containerd.Task).Pause(ctx))
}
func (c *client) Resume(ctx context.Context, containerID string) error {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return err
}
return p.(containerd.Task).Resume(ctx)
}
func (c *client) Stats(ctx context.Context, containerID string) (*libcontainerdtypes.Stats, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return nil, err
}
m, err := p.(containerd.Task).Metrics(ctx)
if err != nil {
return nil, err
}
v, err := typeurl.UnmarshalAny(m.Data)
if err != nil {
return nil, err
}
return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil
}
func (c *client) ListPids(ctx context.Context, containerID string) ([]uint32, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return nil, err
}
pis, err := p.(containerd.Task).Pids(ctx)
if err != nil {
return nil, err
}
var pids []uint32
for _, i := range pis {
pids = append(pids, i.Pid)
}
return pids, nil
}
func (c *client) Summary(ctx context.Context, containerID string) ([]libcontainerdtypes.Summary, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return nil, err
}
pis, err := p.(containerd.Task).Pids(ctx)
if err != nil {
return nil, err
}
var infos []libcontainerdtypes.Summary
for _, pi := range pis {
i, err := typeurl.UnmarshalAny(pi.Info)
if err != nil {
return nil, errors.Wrap(err, "unable to decode process details")
}
s, err := summaryFromInterface(i)
if err != nil {
return nil, err
}
infos = append(infos, *s)
}
return infos, nil
}
type restoredProcess struct {
p containerd.Process
}
func (p *restoredProcess) Delete(ctx context.Context) (uint32, time.Time, error) {
if p.p == nil {
return 255, time.Now(), nil
}
status, err := p.p.Delete(ctx)
if err != nil {
return 255, time.Now(), nil
}
return status.ExitCode(), status.ExitTime(), nil
}
func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return 255, time.Now(), nil
}
status, err := p.Delete(ctx)
if err != nil {
return 255, time.Now(), nil
}
return status.ExitCode(), status.ExitTime(), nil
}
func (c *client) Delete(ctx context.Context, containerID string) error {
ctr, err := c.getContainer(ctx, containerID)
if err != nil {
return err
}
labels, err := ctr.Labels(ctx)
if err != nil {
return err
}
bundle := labels[DockerContainerBundlePath]
if err := ctr.Delete(ctx); err != nil {
return wrapError(err)
}
c.oomMu.Lock()
delete(c.oom, containerID)
c.oomMu.Unlock()
c.v2runcoptionsMu.Lock()
delete(c.v2runcoptions, containerID)
c.v2runcoptionsMu.Unlock()
if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
if err := os.RemoveAll(bundle); err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"container": containerID,
"bundle": bundle,
}).Error("failed to remove state dir")
}
}
return nil
}
func (c *client) Status(ctx context.Context, containerID string) (containerd.ProcessStatus, error) {
t, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return containerd.Unknown, err
}
s, err := t.Status(ctx)
if err != nil {
return containerd.Unknown, wrapError(err)
}
return s.Status, nil
}
func (c *client) getCheckpointOptions(id string, exit bool) containerd.CheckpointTaskOpts {
return func(r *containerd.CheckpointTaskInfo) error {
if r.Options == nil {
c.v2runcoptionsMu.Lock()
_, isV2 := c.v2runcoptions[id]
c.v2runcoptionsMu.Unlock()
if isV2 {
r.Options = &v2runcoptions.CheckpointOptions{Exit: exit}
} else {
r.Options = &runctypes.CheckpointOptions{Exit: exit}
}
return nil
}
switch opts := r.Options.(type) {
case *v2runcoptions.CheckpointOptions:
opts.Exit = exit
case *runctypes.CheckpointOptions:
opts.Exit = exit
}
return nil
}
}
func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName)
if err != nil {
return err
}
opts := []containerd.CheckpointTaskOpts{c.getCheckpointOptions(containerID, exit)}
img, err := p.(containerd.Task).Checkpoint(ctx, opts...)
if err != nil {
return wrapError(err)
}
// Whatever happens, delete the checkpoint from containerd
defer func() {
err := c.client.ImageService().Delete(context.Background(), img.Name())
if err != nil {
c.logger.WithError(err).WithField("digest", img.Target().Digest).
Warnf("failed to delete checkpoint image")
}
}()
b, err := content.ReadBlob(ctx, c.client.ContentStore(), img.Target())
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
}
var index v1.Index
if err := json.Unmarshal(b, &index); err != nil {
return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
}
var cpDesc *v1.Descriptor
for _, m := range index.Manifests {
m := m
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
cpDesc = &m // nolint:gosec
break
}
}
if cpDesc == nil {
return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
}
rat, err := c.client.ContentStore().ReaderAt(ctx, *cpDesc)
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
}
defer rat.Close()
_, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
if err != nil {
return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
}
return err
}
func (c *client) getContainer(ctx context.Context, id string) (containerd.Container, error) {
ctr, err := c.client.LoadContainer(ctx, id)
if err != nil {
if containerderrors.IsNotFound(err) {
return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
}
return nil, wrapError(err)
}
return ctr, nil
}
func (c *client) getProcess(ctx context.Context, containerID, processID string) (containerd.Process, error) {
ctr, err := c.getContainer(ctx, containerID)
if err != nil {
return nil, err
}
t, err := ctr.Task(ctx, nil)
if err != nil {
if containerderrors.IsNotFound(err) {
return nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running")))
}
return nil, wrapError(err)
}
if processID == libcontainerdtypes.InitProcessName {
return t, nil
}
p, err := t.LoadProcess(ctx, processID, nil)
if err != nil {
if containerderrors.IsNotFound(err) {
return nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec")))
}
return nil, wrapError(err)
}
return p, nil
}
// createIO creates the io to be used by a process
// This needs to get a pointer to interface as upon closure the process may not have yet been registered
func (c *client) createIO(fifos *cio.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) {
var (
io *cio.DirectIO
err error
)
io, err = c.newDirectIO(context.Background(), fifos)
if err != nil {
return nil, err
}
if io.Stdin != nil {
var (
err error
stdinOnce sync.Once
)
pipe := io.Stdin
io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
stdinOnce.Do(func() {
err = pipe.Close()
// Do the rest in a new routine to avoid a deadlock if the
// Exec/Start call failed.
go func() {
<-stdinCloseSync
p, err := c.getProcess(context.Background(), containerID, processID)
if err == nil {
err = p.CloseIO(context.Background(), containerd.WithStdinCloser)
if err != nil && strings.Contains(err.Error(), "transport is closing") {
err = nil
}
}
}()
})
return err
})
}
rio, err := attachStdio(io)
if err != nil {
io.Cancel()
io.Close()
}
return rio, err
}
func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) {
c.eventQ.Append(ei.ContainerID, func() {
err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"container": ei.ContainerID,
"event": et,
"event-info": ei,
}).Error("failed to process event")
}
if et == libcontainerdtypes.EventExit && ei.ProcessID != ei.ContainerID {
p, err := c.getProcess(ctx, ei.ContainerID, ei.ProcessID)
if err != nil {
c.logger.WithError(errors.New("no such process")).
WithFields(logrus.Fields{
"error": err,
"container": ei.ContainerID,
"process": ei.ProcessID,
}).Error("exit event")
return
}
ctr, err := c.getContainer(ctx, ei.ContainerID)
if err != nil {
c.logger.WithFields(logrus.Fields{
"container": ei.ContainerID,
"error": err,
}).Error("failed to find container")
} else {
labels, err := ctr.Labels(ctx)
if err != nil {
c.logger.WithFields(logrus.Fields{
"container": ei.ContainerID,
"error": err,
}).Error("failed to get container labels")
return
}
newFIFOSet(labels[DockerContainerBundlePath], ei.ProcessID, true, false).Close()
}
_, err = p.Delete(context.Background())
if err != nil {
c.logger.WithError(err).WithFields(logrus.Fields{
"container": ei.ContainerID,
"process": ei.ProcessID,
}).Warn("failed to delete process")
}
}
})
}
func (c *client) waitServe(ctx context.Context) bool {
t := 100 * time.Millisecond
delay := time.NewTimer(t)
if !delay.Stop() {
<-delay.C
}
defer delay.Stop()
// `IsServing` will actually block until the service is ready.
// However it can return early, so we'll loop with a delay to handle it.
for {
serving, err := c.client.IsServing(ctx)
if err != nil {
if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
return false
}
logrus.WithError(err).Warn("Error while testing if containerd API is ready")
}
if serving {
return true
}
delay.Reset(t)
select {
case <-ctx.Done():
return false
case <-delay.C:
}
}
}
func (c *client) processEventStream(ctx context.Context, ns string) {
var (
err error
ev *events.Envelope
et libcontainerdtypes.EventType
ei libcontainerdtypes.EventInfo
)
// Create a new context specifically for this subscription.
// The context must be cancelled to cancel the subscription.
// In cases where we have to restart event stream processing,
// we'll need the original context b/c this one will be cancelled
subCtx, cancel := context.WithCancel(ctx)
defer cancel()
// Filter on both namespace *and* topic. To create an "and" filter,
// this must be a single, comma-separated string
eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|")
c.logger.Debug("processing event stream")
for {
var oomKilled bool
select {
case err = <-errC:
if err != nil {
errStatus, ok := status.FromError(err)
if !ok || errStatus.Code() != codes.Canceled {
c.logger.WithError(err).Error("Failed to get event")
c.logger.Info("Waiting for containerd to be ready to restart event processing")
if c.waitServe(ctx) {
go c.processEventStream(ctx, ns)
return
}
}
c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown")
}
return
case ev = <-eventStream:
if ev.Event == nil {
c.logger.WithField("event", ev).Warn("invalid event")
continue
}
v, err := typeurl.UnmarshalAny(ev.Event)
if err != nil {
c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
continue
}
c.logger.WithField("topic", ev.Topic).Debug("event")
switch t := v.(type) {
case *apievents.TaskCreate:
et = libcontainerdtypes.EventCreate
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ContainerID,
Pid: t.Pid,
}
case *apievents.TaskStart:
et = libcontainerdtypes.EventStart
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ContainerID,
Pid: t.Pid,
}
case *apievents.TaskExit:
et = libcontainerdtypes.EventExit
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ID,
Pid: t.Pid,
ExitCode: t.ExitStatus,
ExitedAt: t.ExitedAt,
}
case *apievents.TaskOOM:
et = libcontainerdtypes.EventOOM
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
OOMKilled: true,
}
oomKilled = true
case *apievents.TaskExecAdded:
et = libcontainerdtypes.EventExecAdded
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ExecID,
}
case *apievents.TaskExecStarted:
et = libcontainerdtypes.EventExecStarted
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
ProcessID: t.ExecID,
Pid: t.Pid,
}
case *apievents.TaskPaused:
et = libcontainerdtypes.EventPaused
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
}
case *apievents.TaskResumed:
et = libcontainerdtypes.EventResumed
ei = libcontainerdtypes.EventInfo{
ContainerID: t.ContainerID,
}
case *apievents.TaskDelete:
c.logger.WithFields(logrus.Fields{
"topic": ev.Topic,
"type": reflect.TypeOf(t),
"container": t.ContainerID},
).Info("ignoring event")
continue
default:
c.logger.WithFields(logrus.Fields{
"topic": ev.Topic,
"type": reflect.TypeOf(t)},
).Info("ignoring event")
continue
}
c.oomMu.Lock()
if oomKilled {
c.oom[ei.ContainerID] = true
}
ei.OOMKilled = c.oom[ei.ContainerID]
c.oomMu.Unlock()
c.processEvent(ctx, et, ei)
}
}
}
func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
if err != nil {
return nil, err
}
defer writer.Close()
size, err := io.Copy(writer, r)
if err != nil {
return nil, err
}
labels := map[string]string{
"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
}
if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
return nil, err
}
return &types.Descriptor{
MediaType: mediaType,
Digest: writer.Digest(),
Size_: size,
}, nil
}
func (c *client) bundleDir(id string) string {
return filepath.Join(c.stateDir, id)
}
func wrapError(err error) error {
switch {
case err == nil:
return nil
case containerderrors.IsNotFound(err):
return errdefs.NotFound(err)
}
msg := err.Error()
for _, s := range []string{"container does not exist", "not found", "no such container"} {
if strings.Contains(msg, s) {
return errdefs.NotFound(err)
}
}
return err
}
| thaJeztah | 5e62ca1a05f2aab49066d91c9070cb7dc7ad2461 | 2773f81aa5e9e34733675a7aa7e4219391caccb0 | Agreed. Don't know why I didn't do so 👍 let me update | thaJeztah | 4,753 |
moby/moby | 42,432 | Fix possible overlapping IPs | **- What I did**
Fix a possibility where overlapping IP addresses could exist as a result of the node failing to clean up its old loadbalancer IPs.
Fixes #40989, supercedes #41062.
closes #41062
**- How I did it**
A node is no longer using its load balancer IP address when it no longer has tasks that use the network that requires that load balancer. When this occurs, the swarmkit manager will free that IP in IPAM, and may reassign it.
When a task shuts down cleanly, it attempts removal of the networks it uses, and if it is the last task using those networks, this removal will succeed:
https://github.com/dperny/docker/blob/abe0a10c6839ce301bc5a0e43bde4633af01f50d/daemon/cluster/executor/container/controller.go#L378-L384
However, this behavior is absent if the container _fails_. Removal of the network is never attempted.
To address this issue, I amend the executor. Whenever a node load balancer IP is removed or changed, that information is passed to the executor by way of the Configure method. By keeping track of the set of node NetworkAttachments from the previous call to Configure, we can determine which, if any, have been removed or changed.
At first, this seems to create a race, by which a task can be attempting to start and the network removed right out from under it. However, closer inspection of the code shows that this is already addressed in the controller:
https://github.com/dperny/docker/blob/abe0a10c6839ce301bc5a0e43bde4633af01f50d/daemon/cluster/executor/container/controller.go#L207-L225
The controller will attempt again to create the network as long as the network is missing.
**- How to verify it**
I'm not certain how to write a sane test for this. I've mostly verified it by logic.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Fixed a bug where swarm could have duplicate IPs when the last task on a network on a node failed.
| null | 2021-05-27 18:21:43+00:00 | 2021-06-18 16:07:27+00:00 | daemon/cluster/executor/container/executor.go | package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/controllers/plugin"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
networktypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/swarmkit/agent"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/naming"
"github.com/docker/swarmkit/template"
"github.com/sirupsen/logrus"
)
type executor struct {
backend executorpkg.Backend
imageBackend executorpkg.ImageBackend
pluginBackend plugin.Backend
volumeBackend executorpkg.VolumeBackend
dependencies exec.DependencyManager
mutex sync.Mutex // This mutex protects the following node field
node *api.NodeDescription
}
// NewExecutor returns an executor from the docker client.
func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend) exec.Executor {
return &executor{
backend: b,
pluginBackend: p,
imageBackend: i,
volumeBackend: v,
dependencies: agent.NewDependencyManager(),
}
}
// Describe returns the underlying node description from the docker client.
func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
info := e.backend.SystemInfo()
plugins := map[api.PluginDescription]struct{}{}
addPlugins := func(typ string, names []string) {
for _, name := range names {
plugins[api.PluginDescription{
Type: typ,
Name: name,
}] = struct{}{}
}
}
// add v1 plugins
addPlugins("Volume", info.Plugins.Volume)
// Add builtin driver "overlay" (the only builtin multi-host driver) to
// the plugin list by default.
addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...))
addPlugins("Authorization", info.Plugins.Authorization)
addPlugins("Log", info.Plugins.Log)
// add v2 plugins
v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs())
if err == nil {
for _, plgn := range v2Plugins {
for _, typ := range plgn.Config.Interface.Types {
if typ.Prefix != "docker" || !plgn.Enabled {
continue
}
plgnTyp := typ.Capability
switch typ.Capability {
case "volumedriver":
plgnTyp = "Volume"
case "networkdriver":
plgnTyp = "Network"
case "logdriver":
plgnTyp = "Log"
}
plugins[api.PluginDescription{
Type: plgnTyp,
Name: plgn.Name,
}] = struct{}{}
}
}
}
pluginFields := make([]api.PluginDescription, 0, len(plugins))
for k := range plugins {
pluginFields = append(pluginFields, k)
}
sort.Sort(sortedPlugins(pluginFields))
// parse []string labels into a map[string]string
labels := map[string]string{}
for _, l := range info.Labels {
stringSlice := strings.SplitN(l, "=", 2)
// this will take the last value in the list for a given key
// ideally, one shouldn't assign multiple values to the same key
if len(stringSlice) > 1 {
labels[stringSlice[0]] = stringSlice[1]
}
}
description := &api.NodeDescription{
Hostname: info.Name,
Platform: &api.Platform{
Architecture: info.Architecture,
OS: info.OSType,
},
Engine: &api.EngineDescription{
EngineVersion: info.ServerVersion,
Labels: labels,
Plugins: pluginFields,
},
Resources: &api.Resources{
NanoCPUs: int64(info.NCPU) * 1e9,
MemoryBytes: info.MemTotal,
Generic: convert.GenericResourcesToGRPC(info.GenericResources),
},
}
// Save the node information in the executor field
e.mutex.Lock()
e.node = description
e.mutex.Unlock()
return description, nil
}
func (e *executor) Configure(ctx context.Context, node *api.Node) error {
var ingressNA *api.NetworkAttachment
attachments := make(map[string]string)
for _, na := range node.Attachments {
if na == nil || na.Network == nil || len(na.Addresses) == 0 {
// this should not happen, but we got a panic here and don't have a
// good idea about what the underlying data structure looks like.
logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
Warnf("skipping nil or malformed node network attachment entry")
continue
}
if na.Network.Spec.Ingress {
ingressNA = na
}
attachments[na.Network.ID] = na.Addresses[0]
}
if (ingressNA == nil) && (node.Attachment != nil) && (len(node.Attachment.Addresses) > 0) {
ingressNA = node.Attachment
attachments[ingressNA.Network.ID] = ingressNA.Addresses[0]
}
if ingressNA == nil {
e.backend.ReleaseIngress()
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
options := types.NetworkCreate{
Driver: ingressNA.Network.DriverState.Name,
IPAM: &network.IPAM{
Driver: ingressNA.Network.IPAM.Driver.Name,
},
Options: ingressNA.Network.DriverState.Options,
Ingress: true,
CheckDuplicate: true,
}
for _, ic := range ingressNA.Network.IPAM.Configs {
c := network.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
}
options.IPAM.Config = append(options.IPAM.Config, c)
}
_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
ID: ingressNA.Network.ID,
NetworkCreateRequest: types.NetworkCreateRequest{
Name: ingressNA.Network.Spec.Annotations.Name,
NetworkCreate: options,
},
}, ingressNA.Addresses[0])
if err != nil {
return err
}
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
// Controller returns a docker container runner.
func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
dependencyGetter := template.NewTemplatedDependencyGetter(agent.Restrict(e.dependencies, t), t, nil)
// Get the node description from the executor field
e.mutex.Lock()
nodeDescription := e.node
e.mutex.Unlock()
if t.Spec.GetAttachment() != nil {
return newNetworkAttacherController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
}
var ctlr exec.Controller
switch r := t.Spec.GetRuntime().(type) {
case *api.TaskSpec_Generic:
logrus.WithFields(logrus.Fields{
"kind": r.Generic.Kind,
"type_url": r.Generic.Payload.TypeUrl,
}).Debug("custom runtime requested")
runtimeKind, err := naming.Runtime(t.Spec)
if err != nil {
return ctlr, err
}
switch runtimeKind {
case string(swarmtypes.RuntimePlugin):
if !e.backend.HasExperimental() {
return ctlr, fmt.Errorf("runtime type %q only supported in experimental", swarmtypes.RuntimePlugin)
}
c, err := plugin.NewController(e.pluginBackend, t)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind)
}
case *api.TaskSpec_Container:
c, err := newController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime: %q", r)
}
return ctlr, nil
}
func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error {
nwKeys := []*networktypes.EncryptionKey{}
for _, key := range keys {
nwKey := &networktypes.EncryptionKey{
Subsystem: key.Subsystem,
Algorithm: int32(key.Algorithm),
Key: make([]byte, len(key.Key)),
LamportTime: key.LamportTime,
}
copy(nwKey.Key, key.Key)
nwKeys = append(nwKeys, nwKey)
}
e.backend.SetNetworkBootstrapKeys(nwKeys)
return nil
}
func (e *executor) Secrets() exec.SecretsManager {
return e.dependencies.Secrets()
}
func (e *executor) Configs() exec.ConfigsManager {
return e.dependencies.Configs()
}
type sortedPlugins []api.PluginDescription
func (sp sortedPlugins) Len() int { return len(sp) }
func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] }
func (sp sortedPlugins) Less(i, j int) bool {
if sp[i].Type != sp[j].Type {
return sp[i].Type < sp[j].Type
}
return sp[i].Name < sp[j].Name
}
| package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/controllers/plugin"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/libnetwork"
networktypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/swarmkit/agent"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/naming"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/template"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type executor struct {
backend executorpkg.Backend
imageBackend executorpkg.ImageBackend
pluginBackend plugin.Backend
volumeBackend executorpkg.VolumeBackend
dependencies exec.DependencyManager
mutex sync.Mutex // This mutex protects the following node field
node *api.NodeDescription
// nodeObj holds a copy of the swarmkit Node object from the time of the
// last call to executor.Configure. This allows us to discover which
// network attachments the node previously had, which further allows us to
// determine which, if any, need to be removed. nodeObj is not protected by
// a mutex, because it is only written to in the method (Configure) that it
// is read from. If that changes, it may need to be guarded.
nodeObj *api.Node
}
// NewExecutor returns an executor from the docker client.
func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend) exec.Executor {
return &executor{
backend: b,
pluginBackend: p,
imageBackend: i,
volumeBackend: v,
dependencies: agent.NewDependencyManager(),
}
}
// Describe returns the underlying node description from the docker client.
func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
info := e.backend.SystemInfo()
plugins := map[api.PluginDescription]struct{}{}
addPlugins := func(typ string, names []string) {
for _, name := range names {
plugins[api.PluginDescription{
Type: typ,
Name: name,
}] = struct{}{}
}
}
// add v1 plugins
addPlugins("Volume", info.Plugins.Volume)
// Add builtin driver "overlay" (the only builtin multi-host driver) to
// the plugin list by default.
addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...))
addPlugins("Authorization", info.Plugins.Authorization)
addPlugins("Log", info.Plugins.Log)
// add v2 plugins
v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs())
if err == nil {
for _, plgn := range v2Plugins {
for _, typ := range plgn.Config.Interface.Types {
if typ.Prefix != "docker" || !plgn.Enabled {
continue
}
plgnTyp := typ.Capability
switch typ.Capability {
case "volumedriver":
plgnTyp = "Volume"
case "networkdriver":
plgnTyp = "Network"
case "logdriver":
plgnTyp = "Log"
}
plugins[api.PluginDescription{
Type: plgnTyp,
Name: plgn.Name,
}] = struct{}{}
}
}
}
pluginFields := make([]api.PluginDescription, 0, len(plugins))
for k := range plugins {
pluginFields = append(pluginFields, k)
}
sort.Sort(sortedPlugins(pluginFields))
// parse []string labels into a map[string]string
labels := map[string]string{}
for _, l := range info.Labels {
stringSlice := strings.SplitN(l, "=", 2)
// this will take the last value in the list for a given key
// ideally, one shouldn't assign multiple values to the same key
if len(stringSlice) > 1 {
labels[stringSlice[0]] = stringSlice[1]
}
}
description := &api.NodeDescription{
Hostname: info.Name,
Platform: &api.Platform{
Architecture: info.Architecture,
OS: info.OSType,
},
Engine: &api.EngineDescription{
EngineVersion: info.ServerVersion,
Labels: labels,
Plugins: pluginFields,
},
Resources: &api.Resources{
NanoCPUs: int64(info.NCPU) * 1e9,
MemoryBytes: info.MemTotal,
Generic: convert.GenericResourcesToGRPC(info.GenericResources),
},
}
// Save the node information in the executor field
e.mutex.Lock()
e.node = description
e.mutex.Unlock()
return description, nil
}
func (e *executor) Configure(ctx context.Context, node *api.Node) error {
var ingressNA *api.NetworkAttachment
attachments := make(map[string]string)
for _, na := range node.Attachments {
if na == nil || na.Network == nil || len(na.Addresses) == 0 {
// this should not happen, but we got a panic here and don't have a
// good idea about what the underlying data structure looks like.
logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
Warnf("skipping nil or malformed node network attachment entry")
continue
}
if na.Network.Spec.Ingress {
ingressNA = na
}
attachments[na.Network.ID] = na.Addresses[0]
}
// discover which, if any, attachments have been removed.
//
// we aren't responsible directly for creating these networks. that is
// handled indirectly when a container using that network is created.
// however, when it comes time to remove the network, none of the relevant
// tasks may exist anymore. this means we should go ahead and try to remove
// any network we know to no longer be in use.
// removeAttachments maps the network ID to a boolean. This boolean
// indicates whether the attachment in question is totally removed (true),
// or has just had its IP changed (false)
removeAttachments := make(map[string]bool)
// the first time we Configure, nodeObj wil be nil, because it will not be
// set yet. in that case, skip this check.
if e.nodeObj != nil {
for _, na := range e.nodeObj.Attachments {
// same thing as above, check sanity of the attachments so we don't
// get a panic.
if na == nil || na.Network == nil || len(na.Addresses) == 0 {
logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
Warnf("skipping nil or malformed node network attachment entry")
continue
}
// now, check if the attachment exists and shares the same IP address.
if ip, ok := attachments[na.Network.ID]; !ok || na.Addresses[0] != ip {
// if the map entry exists, then the network still exists, and the
// IP must be what has changed
removeAttachments[na.Network.ID] = !ok
}
}
}
if (ingressNA == nil) && (node.Attachment != nil) && (len(node.Attachment.Addresses) > 0) {
ingressNA = node.Attachment
attachments[ingressNA.Network.ID] = ingressNA.Addresses[0]
}
if ingressNA == nil {
e.backend.ReleaseIngress()
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
options := types.NetworkCreate{
Driver: ingressNA.Network.DriverState.Name,
IPAM: &network.IPAM{
Driver: ingressNA.Network.IPAM.Driver.Name,
},
Options: ingressNA.Network.DriverState.Options,
Ingress: true,
CheckDuplicate: true,
}
for _, ic := range ingressNA.Network.IPAM.Configs {
c := network.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
}
options.IPAM.Config = append(options.IPAM.Config, c)
}
_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
ID: ingressNA.Network.ID,
NetworkCreateRequest: types.NetworkCreateRequest{
Name: ingressNA.Network.Spec.Annotations.Name,
NetworkCreate: options,
},
}, ingressNA.Addresses[0])
if err != nil {
return err
}
var (
activeEndpointsError *libnetwork.ActiveEndpointsError
errNoSuchNetwork libnetwork.ErrNoSuchNetwork
)
// now, finally, remove any network LB attachments that we no longer have.
for nw, gone := range removeAttachments {
err := e.backend.DeleteManagedNetwork(nw)
switch {
case err == nil:
continue
case errors.As(err, &activeEndpointsError):
// this is the purpose of the boolean in the map. it's literally
// just to log an appropriate, informative error. i'm unsure if
// this can ever actually occur, but we need to know if it does.
if gone {
log.G(ctx).Warnf("network %s should be removed, but still has active attachments", nw)
} else {
log.G(ctx).Warnf(
"network %s should have its node LB IP changed, but cannot be removed because of active attachments",
nw,
)
}
continue
case errors.As(err, &errNoSuchNetwork):
// NoSuchNetworkError indicates the network is already gone.
continue
default:
log.G(ctx).Errorf("network %s remove failed: %v", nw, err)
}
}
// now update our copy of the node object, reset the attachment store, and
// return
e.nodeObj = node
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
// Controller returns a docker container runner.
func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
dependencyGetter := template.NewTemplatedDependencyGetter(agent.Restrict(e.dependencies, t), t, nil)
// Get the node description from the executor field
e.mutex.Lock()
nodeDescription := e.node
e.mutex.Unlock()
if t.Spec.GetAttachment() != nil {
return newNetworkAttacherController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
}
var ctlr exec.Controller
switch r := t.Spec.GetRuntime().(type) {
case *api.TaskSpec_Generic:
logrus.WithFields(logrus.Fields{
"kind": r.Generic.Kind,
"type_url": r.Generic.Payload.TypeUrl,
}).Debug("custom runtime requested")
runtimeKind, err := naming.Runtime(t.Spec)
if err != nil {
return ctlr, err
}
switch runtimeKind {
case string(swarmtypes.RuntimePlugin):
if !e.backend.HasExperimental() {
return ctlr, fmt.Errorf("runtime type %q only supported in experimental", swarmtypes.RuntimePlugin)
}
c, err := plugin.NewController(e.pluginBackend, t)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind)
}
case *api.TaskSpec_Container:
c, err := newController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime: %q", r)
}
return ctlr, nil
}
func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error {
nwKeys := []*networktypes.EncryptionKey{}
for _, key := range keys {
nwKey := &networktypes.EncryptionKey{
Subsystem: key.Subsystem,
Algorithm: int32(key.Algorithm),
Key: make([]byte, len(key.Key)),
LamportTime: key.LamportTime,
}
copy(nwKey.Key, key.Key)
nwKeys = append(nwKeys, nwKey)
}
e.backend.SetNetworkBootstrapKeys(nwKeys)
return nil
}
func (e *executor) Secrets() exec.SecretsManager {
return e.dependencies.Secrets()
}
func (e *executor) Configs() exec.ConfigsManager {
return e.dependencies.Configs()
}
type sortedPlugins []api.PluginDescription
func (sp sortedPlugins) Len() int { return len(sp) }
func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] }
func (sp sortedPlugins) Less(i, j int) bool {
if sp[i].Type != sp[j].Type {
return sp[i].Type < sp[j].Type
}
return sp[i].Name < sp[j].Name
}
| dperny | f32fc350ce4f0373efa5fc646dc24ebc226bf168 | 7729ebfa1b292e74c5d65d3130cde1f8b302a9f2 | Do we need to check if `e.nodeObj` is not `nil` here? (I guess it would panic in that case) | thaJeztah | 4,754 |
moby/moby | 42,432 | Fix possible overlapping IPs | **- What I did**
Fix a possibility where overlapping IP addresses could exist as a result of the node failing to clean up its old loadbalancer IPs.
Fixes #40989, supercedes #41062.
closes #41062
**- How I did it**
A node is no longer using its load balancer IP address when it no longer has tasks that use the network that requires that load balancer. When this occurs, the swarmkit manager will free that IP in IPAM, and may reassign it.
When a task shuts down cleanly, it attempts removal of the networks it uses, and if it is the last task using those networks, this removal will succeed:
https://github.com/dperny/docker/blob/abe0a10c6839ce301bc5a0e43bde4633af01f50d/daemon/cluster/executor/container/controller.go#L378-L384
However, this behavior is absent if the container _fails_. Removal of the network is never attempted.
To address this issue, I amend the executor. Whenever a node load balancer IP is removed or changed, that information is passed to the executor by way of the Configure method. By keeping track of the set of node NetworkAttachments from the previous call to Configure, we can determine which, if any, have been removed or changed.
At first, this seems to create a race, by which a task can be attempting to start and the network removed right out from under it. However, closer inspection of the code shows that this is already addressed in the controller:
https://github.com/dperny/docker/blob/abe0a10c6839ce301bc5a0e43bde4633af01f50d/daemon/cluster/executor/container/controller.go#L207-L225
The controller will attempt again to create the network as long as the network is missing.
**- How to verify it**
I'm not certain how to write a sane test for this. I've mostly verified it by logic.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Fixed a bug where swarm could have duplicate IPs when the last task on a network on a node failed.
| null | 2021-05-27 18:21:43+00:00 | 2021-06-18 16:07:27+00:00 | daemon/cluster/executor/container/executor.go | package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/controllers/plugin"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
networktypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/swarmkit/agent"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/naming"
"github.com/docker/swarmkit/template"
"github.com/sirupsen/logrus"
)
type executor struct {
backend executorpkg.Backend
imageBackend executorpkg.ImageBackend
pluginBackend plugin.Backend
volumeBackend executorpkg.VolumeBackend
dependencies exec.DependencyManager
mutex sync.Mutex // This mutex protects the following node field
node *api.NodeDescription
}
// NewExecutor returns an executor from the docker client.
func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend) exec.Executor {
return &executor{
backend: b,
pluginBackend: p,
imageBackend: i,
volumeBackend: v,
dependencies: agent.NewDependencyManager(),
}
}
// Describe returns the underlying node description from the docker client.
func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
info := e.backend.SystemInfo()
plugins := map[api.PluginDescription]struct{}{}
addPlugins := func(typ string, names []string) {
for _, name := range names {
plugins[api.PluginDescription{
Type: typ,
Name: name,
}] = struct{}{}
}
}
// add v1 plugins
addPlugins("Volume", info.Plugins.Volume)
// Add builtin driver "overlay" (the only builtin multi-host driver) to
// the plugin list by default.
addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...))
addPlugins("Authorization", info.Plugins.Authorization)
addPlugins("Log", info.Plugins.Log)
// add v2 plugins
v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs())
if err == nil {
for _, plgn := range v2Plugins {
for _, typ := range plgn.Config.Interface.Types {
if typ.Prefix != "docker" || !plgn.Enabled {
continue
}
plgnTyp := typ.Capability
switch typ.Capability {
case "volumedriver":
plgnTyp = "Volume"
case "networkdriver":
plgnTyp = "Network"
case "logdriver":
plgnTyp = "Log"
}
plugins[api.PluginDescription{
Type: plgnTyp,
Name: plgn.Name,
}] = struct{}{}
}
}
}
pluginFields := make([]api.PluginDescription, 0, len(plugins))
for k := range plugins {
pluginFields = append(pluginFields, k)
}
sort.Sort(sortedPlugins(pluginFields))
// parse []string labels into a map[string]string
labels := map[string]string{}
for _, l := range info.Labels {
stringSlice := strings.SplitN(l, "=", 2)
// this will take the last value in the list for a given key
// ideally, one shouldn't assign multiple values to the same key
if len(stringSlice) > 1 {
labels[stringSlice[0]] = stringSlice[1]
}
}
description := &api.NodeDescription{
Hostname: info.Name,
Platform: &api.Platform{
Architecture: info.Architecture,
OS: info.OSType,
},
Engine: &api.EngineDescription{
EngineVersion: info.ServerVersion,
Labels: labels,
Plugins: pluginFields,
},
Resources: &api.Resources{
NanoCPUs: int64(info.NCPU) * 1e9,
MemoryBytes: info.MemTotal,
Generic: convert.GenericResourcesToGRPC(info.GenericResources),
},
}
// Save the node information in the executor field
e.mutex.Lock()
e.node = description
e.mutex.Unlock()
return description, nil
}
func (e *executor) Configure(ctx context.Context, node *api.Node) error {
var ingressNA *api.NetworkAttachment
attachments := make(map[string]string)
for _, na := range node.Attachments {
if na == nil || na.Network == nil || len(na.Addresses) == 0 {
// this should not happen, but we got a panic here and don't have a
// good idea about what the underlying data structure looks like.
logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
Warnf("skipping nil or malformed node network attachment entry")
continue
}
if na.Network.Spec.Ingress {
ingressNA = na
}
attachments[na.Network.ID] = na.Addresses[0]
}
if (ingressNA == nil) && (node.Attachment != nil) && (len(node.Attachment.Addresses) > 0) {
ingressNA = node.Attachment
attachments[ingressNA.Network.ID] = ingressNA.Addresses[0]
}
if ingressNA == nil {
e.backend.ReleaseIngress()
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
options := types.NetworkCreate{
Driver: ingressNA.Network.DriverState.Name,
IPAM: &network.IPAM{
Driver: ingressNA.Network.IPAM.Driver.Name,
},
Options: ingressNA.Network.DriverState.Options,
Ingress: true,
CheckDuplicate: true,
}
for _, ic := range ingressNA.Network.IPAM.Configs {
c := network.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
}
options.IPAM.Config = append(options.IPAM.Config, c)
}
_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
ID: ingressNA.Network.ID,
NetworkCreateRequest: types.NetworkCreateRequest{
Name: ingressNA.Network.Spec.Annotations.Name,
NetworkCreate: options,
},
}, ingressNA.Addresses[0])
if err != nil {
return err
}
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
// Controller returns a docker container runner.
func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
dependencyGetter := template.NewTemplatedDependencyGetter(agent.Restrict(e.dependencies, t), t, nil)
// Get the node description from the executor field
e.mutex.Lock()
nodeDescription := e.node
e.mutex.Unlock()
if t.Spec.GetAttachment() != nil {
return newNetworkAttacherController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
}
var ctlr exec.Controller
switch r := t.Spec.GetRuntime().(type) {
case *api.TaskSpec_Generic:
logrus.WithFields(logrus.Fields{
"kind": r.Generic.Kind,
"type_url": r.Generic.Payload.TypeUrl,
}).Debug("custom runtime requested")
runtimeKind, err := naming.Runtime(t.Spec)
if err != nil {
return ctlr, err
}
switch runtimeKind {
case string(swarmtypes.RuntimePlugin):
if !e.backend.HasExperimental() {
return ctlr, fmt.Errorf("runtime type %q only supported in experimental", swarmtypes.RuntimePlugin)
}
c, err := plugin.NewController(e.pluginBackend, t)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind)
}
case *api.TaskSpec_Container:
c, err := newController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime: %q", r)
}
return ctlr, nil
}
func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error {
nwKeys := []*networktypes.EncryptionKey{}
for _, key := range keys {
nwKey := &networktypes.EncryptionKey{
Subsystem: key.Subsystem,
Algorithm: int32(key.Algorithm),
Key: make([]byte, len(key.Key)),
LamportTime: key.LamportTime,
}
copy(nwKey.Key, key.Key)
nwKeys = append(nwKeys, nwKey)
}
e.backend.SetNetworkBootstrapKeys(nwKeys)
return nil
}
func (e *executor) Secrets() exec.SecretsManager {
return e.dependencies.Secrets()
}
func (e *executor) Configs() exec.ConfigsManager {
return e.dependencies.Configs()
}
type sortedPlugins []api.PluginDescription
func (sp sortedPlugins) Len() int { return len(sp) }
func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] }
func (sp sortedPlugins) Less(i, j int) bool {
if sp[i].Type != sp[j].Type {
return sp[i].Type < sp[j].Type
}
return sp[i].Name < sp[j].Name
}
| package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/controllers/plugin"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/libnetwork"
networktypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/swarmkit/agent"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/naming"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/template"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type executor struct {
backend executorpkg.Backend
imageBackend executorpkg.ImageBackend
pluginBackend plugin.Backend
volumeBackend executorpkg.VolumeBackend
dependencies exec.DependencyManager
mutex sync.Mutex // This mutex protects the following node field
node *api.NodeDescription
// nodeObj holds a copy of the swarmkit Node object from the time of the
// last call to executor.Configure. This allows us to discover which
// network attachments the node previously had, which further allows us to
// determine which, if any, need to be removed. nodeObj is not protected by
// a mutex, because it is only written to in the method (Configure) that it
// is read from. If that changes, it may need to be guarded.
nodeObj *api.Node
}
// NewExecutor returns an executor from the docker client.
func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend) exec.Executor {
return &executor{
backend: b,
pluginBackend: p,
imageBackend: i,
volumeBackend: v,
dependencies: agent.NewDependencyManager(),
}
}
// Describe returns the underlying node description from the docker client.
func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
info := e.backend.SystemInfo()
plugins := map[api.PluginDescription]struct{}{}
addPlugins := func(typ string, names []string) {
for _, name := range names {
plugins[api.PluginDescription{
Type: typ,
Name: name,
}] = struct{}{}
}
}
// add v1 plugins
addPlugins("Volume", info.Plugins.Volume)
// Add builtin driver "overlay" (the only builtin multi-host driver) to
// the plugin list by default.
addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...))
addPlugins("Authorization", info.Plugins.Authorization)
addPlugins("Log", info.Plugins.Log)
// add v2 plugins
v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs())
if err == nil {
for _, plgn := range v2Plugins {
for _, typ := range plgn.Config.Interface.Types {
if typ.Prefix != "docker" || !plgn.Enabled {
continue
}
plgnTyp := typ.Capability
switch typ.Capability {
case "volumedriver":
plgnTyp = "Volume"
case "networkdriver":
plgnTyp = "Network"
case "logdriver":
plgnTyp = "Log"
}
plugins[api.PluginDescription{
Type: plgnTyp,
Name: plgn.Name,
}] = struct{}{}
}
}
}
pluginFields := make([]api.PluginDescription, 0, len(plugins))
for k := range plugins {
pluginFields = append(pluginFields, k)
}
sort.Sort(sortedPlugins(pluginFields))
// parse []string labels into a map[string]string
labels := map[string]string{}
for _, l := range info.Labels {
stringSlice := strings.SplitN(l, "=", 2)
// this will take the last value in the list for a given key
// ideally, one shouldn't assign multiple values to the same key
if len(stringSlice) > 1 {
labels[stringSlice[0]] = stringSlice[1]
}
}
description := &api.NodeDescription{
Hostname: info.Name,
Platform: &api.Platform{
Architecture: info.Architecture,
OS: info.OSType,
},
Engine: &api.EngineDescription{
EngineVersion: info.ServerVersion,
Labels: labels,
Plugins: pluginFields,
},
Resources: &api.Resources{
NanoCPUs: int64(info.NCPU) * 1e9,
MemoryBytes: info.MemTotal,
Generic: convert.GenericResourcesToGRPC(info.GenericResources),
},
}
// Save the node information in the executor field
e.mutex.Lock()
e.node = description
e.mutex.Unlock()
return description, nil
}
func (e *executor) Configure(ctx context.Context, node *api.Node) error {
var ingressNA *api.NetworkAttachment
attachments := make(map[string]string)
for _, na := range node.Attachments {
if na == nil || na.Network == nil || len(na.Addresses) == 0 {
// this should not happen, but we got a panic here and don't have a
// good idea about what the underlying data structure looks like.
logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
Warnf("skipping nil or malformed node network attachment entry")
continue
}
if na.Network.Spec.Ingress {
ingressNA = na
}
attachments[na.Network.ID] = na.Addresses[0]
}
// discover which, if any, attachments have been removed.
//
// we aren't responsible directly for creating these networks. that is
// handled indirectly when a container using that network is created.
// however, when it comes time to remove the network, none of the relevant
// tasks may exist anymore. this means we should go ahead and try to remove
// any network we know to no longer be in use.
// removeAttachments maps the network ID to a boolean. This boolean
// indicates whether the attachment in question is totally removed (true),
// or has just had its IP changed (false)
removeAttachments := make(map[string]bool)
// the first time we Configure, nodeObj wil be nil, because it will not be
// set yet. in that case, skip this check.
if e.nodeObj != nil {
for _, na := range e.nodeObj.Attachments {
// same thing as above, check sanity of the attachments so we don't
// get a panic.
if na == nil || na.Network == nil || len(na.Addresses) == 0 {
logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
Warnf("skipping nil or malformed node network attachment entry")
continue
}
// now, check if the attachment exists and shares the same IP address.
if ip, ok := attachments[na.Network.ID]; !ok || na.Addresses[0] != ip {
// if the map entry exists, then the network still exists, and the
// IP must be what has changed
removeAttachments[na.Network.ID] = !ok
}
}
}
if (ingressNA == nil) && (node.Attachment != nil) && (len(node.Attachment.Addresses) > 0) {
ingressNA = node.Attachment
attachments[ingressNA.Network.ID] = ingressNA.Addresses[0]
}
if ingressNA == nil {
e.backend.ReleaseIngress()
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
options := types.NetworkCreate{
Driver: ingressNA.Network.DriverState.Name,
IPAM: &network.IPAM{
Driver: ingressNA.Network.IPAM.Driver.Name,
},
Options: ingressNA.Network.DriverState.Options,
Ingress: true,
CheckDuplicate: true,
}
for _, ic := range ingressNA.Network.IPAM.Configs {
c := network.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
}
options.IPAM.Config = append(options.IPAM.Config, c)
}
_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
ID: ingressNA.Network.ID,
NetworkCreateRequest: types.NetworkCreateRequest{
Name: ingressNA.Network.Spec.Annotations.Name,
NetworkCreate: options,
},
}, ingressNA.Addresses[0])
if err != nil {
return err
}
var (
activeEndpointsError *libnetwork.ActiveEndpointsError
errNoSuchNetwork libnetwork.ErrNoSuchNetwork
)
// now, finally, remove any network LB attachments that we no longer have.
for nw, gone := range removeAttachments {
err := e.backend.DeleteManagedNetwork(nw)
switch {
case err == nil:
continue
case errors.As(err, &activeEndpointsError):
// this is the purpose of the boolean in the map. it's literally
// just to log an appropriate, informative error. i'm unsure if
// this can ever actually occur, but we need to know if it does.
if gone {
log.G(ctx).Warnf("network %s should be removed, but still has active attachments", nw)
} else {
log.G(ctx).Warnf(
"network %s should have its node LB IP changed, but cannot be removed because of active attachments",
nw,
)
}
continue
case errors.As(err, &errNoSuchNetwork):
// NoSuchNetworkError indicates the network is already gone.
continue
default:
log.G(ctx).Errorf("network %s remove failed: %v", nw, err)
}
}
// now update our copy of the node object, reset the attachment store, and
// return
e.nodeObj = node
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
// Controller returns a docker container runner.
func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
dependencyGetter := template.NewTemplatedDependencyGetter(agent.Restrict(e.dependencies, t), t, nil)
// Get the node description from the executor field
e.mutex.Lock()
nodeDescription := e.node
e.mutex.Unlock()
if t.Spec.GetAttachment() != nil {
return newNetworkAttacherController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
}
var ctlr exec.Controller
switch r := t.Spec.GetRuntime().(type) {
case *api.TaskSpec_Generic:
logrus.WithFields(logrus.Fields{
"kind": r.Generic.Kind,
"type_url": r.Generic.Payload.TypeUrl,
}).Debug("custom runtime requested")
runtimeKind, err := naming.Runtime(t.Spec)
if err != nil {
return ctlr, err
}
switch runtimeKind {
case string(swarmtypes.RuntimePlugin):
if !e.backend.HasExperimental() {
return ctlr, fmt.Errorf("runtime type %q only supported in experimental", swarmtypes.RuntimePlugin)
}
c, err := plugin.NewController(e.pluginBackend, t)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind)
}
case *api.TaskSpec_Container:
c, err := newController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime: %q", r)
}
return ctlr, nil
}
func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error {
nwKeys := []*networktypes.EncryptionKey{}
for _, key := range keys {
nwKey := &networktypes.EncryptionKey{
Subsystem: key.Subsystem,
Algorithm: int32(key.Algorithm),
Key: make([]byte, len(key.Key)),
LamportTime: key.LamportTime,
}
copy(nwKey.Key, key.Key)
nwKeys = append(nwKeys, nwKey)
}
e.backend.SetNetworkBootstrapKeys(nwKeys)
return nil
}
func (e *executor) Secrets() exec.SecretsManager {
return e.dependencies.Secrets()
}
func (e *executor) Configs() exec.ConfigsManager {
return e.dependencies.Configs()
}
type sortedPlugins []api.PluginDescription
func (sp sortedPlugins) Len() int { return len(sp) }
func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] }
func (sp sortedPlugins) Less(i, j int) bool {
if sp[i].Type != sp[j].Type {
return sp[i].Type < sp[j].Type
}
return sp[i].Name < sp[j].Name
}
| dperny | f32fc350ce4f0373efa5fc646dc24ebc226bf168 | 7729ebfa1b292e74c5d65d3130cde1f8b302a9f2 | Also wondering if there's concurrency here (if a `(RW)Mutex` is needed for accessing `e.nodeObj` (no idea, but putting it here as a comment 😅 | thaJeztah | 4,755 |
moby/moby | 42,432 | Fix possible overlapping IPs | **- What I did**
Fix a possibility where overlapping IP addresses could exist as a result of the node failing to clean up its old loadbalancer IPs.
Fixes #40989, supercedes #41062.
closes #41062
**- How I did it**
A node is no longer using its load balancer IP address when it no longer has tasks that use the network that requires that load balancer. When this occurs, the swarmkit manager will free that IP in IPAM, and may reassign it.
When a task shuts down cleanly, it attempts removal of the networks it uses, and if it is the last task using those networks, this removal will succeed:
https://github.com/dperny/docker/blob/abe0a10c6839ce301bc5a0e43bde4633af01f50d/daemon/cluster/executor/container/controller.go#L378-L384
However, this behavior is absent if the container _fails_. Removal of the network is never attempted.
To address this issue, I amend the executor. Whenever a node load balancer IP is removed or changed, that information is passed to the executor by way of the Configure method. By keeping track of the set of node NetworkAttachments from the previous call to Configure, we can determine which, if any, have been removed or changed.
At first, this seems to create a race, by which a task can be attempting to start and the network removed right out from under it. However, closer inspection of the code shows that this is already addressed in the controller:
https://github.com/dperny/docker/blob/abe0a10c6839ce301bc5a0e43bde4633af01f50d/daemon/cluster/executor/container/controller.go#L207-L225
The controller will attempt again to create the network as long as the network is missing.
**- How to verify it**
I'm not certain how to write a sane test for this. I've mostly verified it by logic.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Fixed a bug where swarm could have duplicate IPs when the last task on a network on a node failed.
| null | 2021-05-27 18:21:43+00:00 | 2021-06-18 16:07:27+00:00 | daemon/cluster/executor/container/executor.go | package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/controllers/plugin"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
networktypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/swarmkit/agent"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/naming"
"github.com/docker/swarmkit/template"
"github.com/sirupsen/logrus"
)
type executor struct {
backend executorpkg.Backend
imageBackend executorpkg.ImageBackend
pluginBackend plugin.Backend
volumeBackend executorpkg.VolumeBackend
dependencies exec.DependencyManager
mutex sync.Mutex // This mutex protects the following node field
node *api.NodeDescription
}
// NewExecutor returns an executor from the docker client.
func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend) exec.Executor {
return &executor{
backend: b,
pluginBackend: p,
imageBackend: i,
volumeBackend: v,
dependencies: agent.NewDependencyManager(),
}
}
// Describe returns the underlying node description from the docker client.
func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
info := e.backend.SystemInfo()
plugins := map[api.PluginDescription]struct{}{}
addPlugins := func(typ string, names []string) {
for _, name := range names {
plugins[api.PluginDescription{
Type: typ,
Name: name,
}] = struct{}{}
}
}
// add v1 plugins
addPlugins("Volume", info.Plugins.Volume)
// Add builtin driver "overlay" (the only builtin multi-host driver) to
// the plugin list by default.
addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...))
addPlugins("Authorization", info.Plugins.Authorization)
addPlugins("Log", info.Plugins.Log)
// add v2 plugins
v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs())
if err == nil {
for _, plgn := range v2Plugins {
for _, typ := range plgn.Config.Interface.Types {
if typ.Prefix != "docker" || !plgn.Enabled {
continue
}
plgnTyp := typ.Capability
switch typ.Capability {
case "volumedriver":
plgnTyp = "Volume"
case "networkdriver":
plgnTyp = "Network"
case "logdriver":
plgnTyp = "Log"
}
plugins[api.PluginDescription{
Type: plgnTyp,
Name: plgn.Name,
}] = struct{}{}
}
}
}
pluginFields := make([]api.PluginDescription, 0, len(plugins))
for k := range plugins {
pluginFields = append(pluginFields, k)
}
sort.Sort(sortedPlugins(pluginFields))
// parse []string labels into a map[string]string
labels := map[string]string{}
for _, l := range info.Labels {
stringSlice := strings.SplitN(l, "=", 2)
// this will take the last value in the list for a given key
// ideally, one shouldn't assign multiple values to the same key
if len(stringSlice) > 1 {
labels[stringSlice[0]] = stringSlice[1]
}
}
description := &api.NodeDescription{
Hostname: info.Name,
Platform: &api.Platform{
Architecture: info.Architecture,
OS: info.OSType,
},
Engine: &api.EngineDescription{
EngineVersion: info.ServerVersion,
Labels: labels,
Plugins: pluginFields,
},
Resources: &api.Resources{
NanoCPUs: int64(info.NCPU) * 1e9,
MemoryBytes: info.MemTotal,
Generic: convert.GenericResourcesToGRPC(info.GenericResources),
},
}
// Save the node information in the executor field
e.mutex.Lock()
e.node = description
e.mutex.Unlock()
return description, nil
}
func (e *executor) Configure(ctx context.Context, node *api.Node) error {
var ingressNA *api.NetworkAttachment
attachments := make(map[string]string)
for _, na := range node.Attachments {
if na == nil || na.Network == nil || len(na.Addresses) == 0 {
// this should not happen, but we got a panic here and don't have a
// good idea about what the underlying data structure looks like.
logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
Warnf("skipping nil or malformed node network attachment entry")
continue
}
if na.Network.Spec.Ingress {
ingressNA = na
}
attachments[na.Network.ID] = na.Addresses[0]
}
if (ingressNA == nil) && (node.Attachment != nil) && (len(node.Attachment.Addresses) > 0) {
ingressNA = node.Attachment
attachments[ingressNA.Network.ID] = ingressNA.Addresses[0]
}
if ingressNA == nil {
e.backend.ReleaseIngress()
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
options := types.NetworkCreate{
Driver: ingressNA.Network.DriverState.Name,
IPAM: &network.IPAM{
Driver: ingressNA.Network.IPAM.Driver.Name,
},
Options: ingressNA.Network.DriverState.Options,
Ingress: true,
CheckDuplicate: true,
}
for _, ic := range ingressNA.Network.IPAM.Configs {
c := network.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
}
options.IPAM.Config = append(options.IPAM.Config, c)
}
_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
ID: ingressNA.Network.ID,
NetworkCreateRequest: types.NetworkCreateRequest{
Name: ingressNA.Network.Spec.Annotations.Name,
NetworkCreate: options,
},
}, ingressNA.Addresses[0])
if err != nil {
return err
}
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
// Controller returns a docker container runner.
func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
dependencyGetter := template.NewTemplatedDependencyGetter(agent.Restrict(e.dependencies, t), t, nil)
// Get the node description from the executor field
e.mutex.Lock()
nodeDescription := e.node
e.mutex.Unlock()
if t.Spec.GetAttachment() != nil {
return newNetworkAttacherController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
}
var ctlr exec.Controller
switch r := t.Spec.GetRuntime().(type) {
case *api.TaskSpec_Generic:
logrus.WithFields(logrus.Fields{
"kind": r.Generic.Kind,
"type_url": r.Generic.Payload.TypeUrl,
}).Debug("custom runtime requested")
runtimeKind, err := naming.Runtime(t.Spec)
if err != nil {
return ctlr, err
}
switch runtimeKind {
case string(swarmtypes.RuntimePlugin):
if !e.backend.HasExperimental() {
return ctlr, fmt.Errorf("runtime type %q only supported in experimental", swarmtypes.RuntimePlugin)
}
c, err := plugin.NewController(e.pluginBackend, t)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind)
}
case *api.TaskSpec_Container:
c, err := newController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime: %q", r)
}
return ctlr, nil
}
func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error {
nwKeys := []*networktypes.EncryptionKey{}
for _, key := range keys {
nwKey := &networktypes.EncryptionKey{
Subsystem: key.Subsystem,
Algorithm: int32(key.Algorithm),
Key: make([]byte, len(key.Key)),
LamportTime: key.LamportTime,
}
copy(nwKey.Key, key.Key)
nwKeys = append(nwKeys, nwKey)
}
e.backend.SetNetworkBootstrapKeys(nwKeys)
return nil
}
func (e *executor) Secrets() exec.SecretsManager {
return e.dependencies.Secrets()
}
func (e *executor) Configs() exec.ConfigsManager {
return e.dependencies.Configs()
}
type sortedPlugins []api.PluginDescription
func (sp sortedPlugins) Len() int { return len(sp) }
func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] }
func (sp sortedPlugins) Less(i, j int) bool {
if sp[i].Type != sp[j].Type {
return sp[i].Type < sp[j].Type
}
return sp[i].Name < sp[j].Name
}
| package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/controllers/plugin"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/libnetwork"
networktypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/swarmkit/agent"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/naming"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/template"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type executor struct {
backend executorpkg.Backend
imageBackend executorpkg.ImageBackend
pluginBackend plugin.Backend
volumeBackend executorpkg.VolumeBackend
dependencies exec.DependencyManager
mutex sync.Mutex // This mutex protects the following node field
node *api.NodeDescription
// nodeObj holds a copy of the swarmkit Node object from the time of the
// last call to executor.Configure. This allows us to discover which
// network attachments the node previously had, which further allows us to
// determine which, if any, need to be removed. nodeObj is not protected by
// a mutex, because it is only written to in the method (Configure) that it
// is read from. If that changes, it may need to be guarded.
nodeObj *api.Node
}
// NewExecutor returns an executor from the docker client.
func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend) exec.Executor {
return &executor{
backend: b,
pluginBackend: p,
imageBackend: i,
volumeBackend: v,
dependencies: agent.NewDependencyManager(),
}
}
// Describe returns the underlying node description from the docker client.
func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
info := e.backend.SystemInfo()
plugins := map[api.PluginDescription]struct{}{}
addPlugins := func(typ string, names []string) {
for _, name := range names {
plugins[api.PluginDescription{
Type: typ,
Name: name,
}] = struct{}{}
}
}
// add v1 plugins
addPlugins("Volume", info.Plugins.Volume)
// Add builtin driver "overlay" (the only builtin multi-host driver) to
// the plugin list by default.
addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...))
addPlugins("Authorization", info.Plugins.Authorization)
addPlugins("Log", info.Plugins.Log)
// add v2 plugins
v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs())
if err == nil {
for _, plgn := range v2Plugins {
for _, typ := range plgn.Config.Interface.Types {
if typ.Prefix != "docker" || !plgn.Enabled {
continue
}
plgnTyp := typ.Capability
switch typ.Capability {
case "volumedriver":
plgnTyp = "Volume"
case "networkdriver":
plgnTyp = "Network"
case "logdriver":
plgnTyp = "Log"
}
plugins[api.PluginDescription{
Type: plgnTyp,
Name: plgn.Name,
}] = struct{}{}
}
}
}
pluginFields := make([]api.PluginDescription, 0, len(plugins))
for k := range plugins {
pluginFields = append(pluginFields, k)
}
sort.Sort(sortedPlugins(pluginFields))
// parse []string labels into a map[string]string
labels := map[string]string{}
for _, l := range info.Labels {
stringSlice := strings.SplitN(l, "=", 2)
// this will take the last value in the list for a given key
// ideally, one shouldn't assign multiple values to the same key
if len(stringSlice) > 1 {
labels[stringSlice[0]] = stringSlice[1]
}
}
description := &api.NodeDescription{
Hostname: info.Name,
Platform: &api.Platform{
Architecture: info.Architecture,
OS: info.OSType,
},
Engine: &api.EngineDescription{
EngineVersion: info.ServerVersion,
Labels: labels,
Plugins: pluginFields,
},
Resources: &api.Resources{
NanoCPUs: int64(info.NCPU) * 1e9,
MemoryBytes: info.MemTotal,
Generic: convert.GenericResourcesToGRPC(info.GenericResources),
},
}
// Save the node information in the executor field
e.mutex.Lock()
e.node = description
e.mutex.Unlock()
return description, nil
}
func (e *executor) Configure(ctx context.Context, node *api.Node) error {
var ingressNA *api.NetworkAttachment
attachments := make(map[string]string)
for _, na := range node.Attachments {
if na == nil || na.Network == nil || len(na.Addresses) == 0 {
// this should not happen, but we got a panic here and don't have a
// good idea about what the underlying data structure looks like.
logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
Warnf("skipping nil or malformed node network attachment entry")
continue
}
if na.Network.Spec.Ingress {
ingressNA = na
}
attachments[na.Network.ID] = na.Addresses[0]
}
// discover which, if any, attachments have been removed.
//
// we aren't responsible directly for creating these networks. that is
// handled indirectly when a container using that network is created.
// however, when it comes time to remove the network, none of the relevant
// tasks may exist anymore. this means we should go ahead and try to remove
// any network we know to no longer be in use.
// removeAttachments maps the network ID to a boolean. This boolean
// indicates whether the attachment in question is totally removed (true),
// or has just had its IP changed (false)
removeAttachments := make(map[string]bool)
// the first time we Configure, nodeObj wil be nil, because it will not be
// set yet. in that case, skip this check.
if e.nodeObj != nil {
for _, na := range e.nodeObj.Attachments {
// same thing as above, check sanity of the attachments so we don't
// get a panic.
if na == nil || na.Network == nil || len(na.Addresses) == 0 {
logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
Warnf("skipping nil or malformed node network attachment entry")
continue
}
// now, check if the attachment exists and shares the same IP address.
if ip, ok := attachments[na.Network.ID]; !ok || na.Addresses[0] != ip {
// if the map entry exists, then the network still exists, and the
// IP must be what has changed
removeAttachments[na.Network.ID] = !ok
}
}
}
if (ingressNA == nil) && (node.Attachment != nil) && (len(node.Attachment.Addresses) > 0) {
ingressNA = node.Attachment
attachments[ingressNA.Network.ID] = ingressNA.Addresses[0]
}
if ingressNA == nil {
e.backend.ReleaseIngress()
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
options := types.NetworkCreate{
Driver: ingressNA.Network.DriverState.Name,
IPAM: &network.IPAM{
Driver: ingressNA.Network.IPAM.Driver.Name,
},
Options: ingressNA.Network.DriverState.Options,
Ingress: true,
CheckDuplicate: true,
}
for _, ic := range ingressNA.Network.IPAM.Configs {
c := network.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
}
options.IPAM.Config = append(options.IPAM.Config, c)
}
_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
ID: ingressNA.Network.ID,
NetworkCreateRequest: types.NetworkCreateRequest{
Name: ingressNA.Network.Spec.Annotations.Name,
NetworkCreate: options,
},
}, ingressNA.Addresses[0])
if err != nil {
return err
}
var (
activeEndpointsError *libnetwork.ActiveEndpointsError
errNoSuchNetwork libnetwork.ErrNoSuchNetwork
)
// now, finally, remove any network LB attachments that we no longer have.
for nw, gone := range removeAttachments {
err := e.backend.DeleteManagedNetwork(nw)
switch {
case err == nil:
continue
case errors.As(err, &activeEndpointsError):
// this is the purpose of the boolean in the map. it's literally
// just to log an appropriate, informative error. i'm unsure if
// this can ever actually occur, but we need to know if it does.
if gone {
log.G(ctx).Warnf("network %s should be removed, but still has active attachments", nw)
} else {
log.G(ctx).Warnf(
"network %s should have its node LB IP changed, but cannot be removed because of active attachments",
nw,
)
}
continue
case errors.As(err, &errNoSuchNetwork):
// NoSuchNetworkError indicates the network is already gone.
continue
default:
log.G(ctx).Errorf("network %s remove failed: %v", nw, err)
}
}
// now update our copy of the node object, reset the attachment store, and
// return
e.nodeObj = node
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
// Controller returns a docker container runner.
func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
dependencyGetter := template.NewTemplatedDependencyGetter(agent.Restrict(e.dependencies, t), t, nil)
// Get the node description from the executor field
e.mutex.Lock()
nodeDescription := e.node
e.mutex.Unlock()
if t.Spec.GetAttachment() != nil {
return newNetworkAttacherController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
}
var ctlr exec.Controller
switch r := t.Spec.GetRuntime().(type) {
case *api.TaskSpec_Generic:
logrus.WithFields(logrus.Fields{
"kind": r.Generic.Kind,
"type_url": r.Generic.Payload.TypeUrl,
}).Debug("custom runtime requested")
runtimeKind, err := naming.Runtime(t.Spec)
if err != nil {
return ctlr, err
}
switch runtimeKind {
case string(swarmtypes.RuntimePlugin):
if !e.backend.HasExperimental() {
return ctlr, fmt.Errorf("runtime type %q only supported in experimental", swarmtypes.RuntimePlugin)
}
c, err := plugin.NewController(e.pluginBackend, t)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind)
}
case *api.TaskSpec_Container:
c, err := newController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime: %q", r)
}
return ctlr, nil
}
func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error {
nwKeys := []*networktypes.EncryptionKey{}
for _, key := range keys {
nwKey := &networktypes.EncryptionKey{
Subsystem: key.Subsystem,
Algorithm: int32(key.Algorithm),
Key: make([]byte, len(key.Key)),
LamportTime: key.LamportTime,
}
copy(nwKey.Key, key.Key)
nwKeys = append(nwKeys, nwKey)
}
e.backend.SetNetworkBootstrapKeys(nwKeys)
return nil
}
func (e *executor) Secrets() exec.SecretsManager {
return e.dependencies.Secrets()
}
func (e *executor) Configs() exec.ConfigsManager {
return e.dependencies.Configs()
}
type sortedPlugins []api.PluginDescription
func (sp sortedPlugins) Len() int { return len(sp) }
func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] }
func (sp sortedPlugins) Less(i, j int) bool {
if sp[i].Type != sp[j].Type {
return sp[i].Type < sp[j].Type
}
return sp[i].Name < sp[j].Name
}
| dperny | f32fc350ce4f0373efa5fc646dc24ebc226bf168 | 7729ebfa1b292e74c5d65d3130cde1f8b302a9f2 | Good catch on the nil-check. `e.nodeObj` will be nil on the first call to `Configure`. Incredibly embarrassed to have missed that.
I've added a comment explaining why `nodeObj` does not need to be guarded by a mutex. | dperny | 4,756 |
moby/moby | 42,432 | Fix possible overlapping IPs | **- What I did**
Fix a possibility where overlapping IP addresses could exist as a result of the node failing to clean up its old loadbalancer IPs.
Fixes #40989, supercedes #41062.
closes #41062
**- How I did it**
A node is no longer using its load balancer IP address when it no longer has tasks that use the network that requires that load balancer. When this occurs, the swarmkit manager will free that IP in IPAM, and may reassign it.
When a task shuts down cleanly, it attempts removal of the networks it uses, and if it is the last task using those networks, this removal will succeed:
https://github.com/dperny/docker/blob/abe0a10c6839ce301bc5a0e43bde4633af01f50d/daemon/cluster/executor/container/controller.go#L378-L384
However, this behavior is absent if the container _fails_. Removal of the network is never attempted.
To address this issue, I amend the executor. Whenever a node load balancer IP is removed or changed, that information is passed to the executor by way of the Configure method. By keeping track of the set of node NetworkAttachments from the previous call to Configure, we can determine which, if any, have been removed or changed.
At first, this seems to create a race, by which a task can be attempting to start and the network removed right out from under it. However, closer inspection of the code shows that this is already addressed in the controller:
https://github.com/dperny/docker/blob/abe0a10c6839ce301bc5a0e43bde4633af01f50d/daemon/cluster/executor/container/controller.go#L207-L225
The controller will attempt again to create the network as long as the network is missing.
**- How to verify it**
I'm not certain how to write a sane test for this. I've mostly verified it by logic.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Fixed a bug where swarm could have duplicate IPs when the last task on a network on a node failed.
| null | 2021-05-27 18:21:43+00:00 | 2021-06-18 16:07:27+00:00 | daemon/cluster/executor/container/executor.go | package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/controllers/plugin"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
networktypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/swarmkit/agent"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/naming"
"github.com/docker/swarmkit/template"
"github.com/sirupsen/logrus"
)
type executor struct {
backend executorpkg.Backend
imageBackend executorpkg.ImageBackend
pluginBackend plugin.Backend
volumeBackend executorpkg.VolumeBackend
dependencies exec.DependencyManager
mutex sync.Mutex // This mutex protects the following node field
node *api.NodeDescription
}
// NewExecutor returns an executor from the docker client.
func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend) exec.Executor {
return &executor{
backend: b,
pluginBackend: p,
imageBackend: i,
volumeBackend: v,
dependencies: agent.NewDependencyManager(),
}
}
// Describe returns the underlying node description from the docker client.
func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
info := e.backend.SystemInfo()
plugins := map[api.PluginDescription]struct{}{}
addPlugins := func(typ string, names []string) {
for _, name := range names {
plugins[api.PluginDescription{
Type: typ,
Name: name,
}] = struct{}{}
}
}
// add v1 plugins
addPlugins("Volume", info.Plugins.Volume)
// Add builtin driver "overlay" (the only builtin multi-host driver) to
// the plugin list by default.
addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...))
addPlugins("Authorization", info.Plugins.Authorization)
addPlugins("Log", info.Plugins.Log)
// add v2 plugins
v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs())
if err == nil {
for _, plgn := range v2Plugins {
for _, typ := range plgn.Config.Interface.Types {
if typ.Prefix != "docker" || !plgn.Enabled {
continue
}
plgnTyp := typ.Capability
switch typ.Capability {
case "volumedriver":
plgnTyp = "Volume"
case "networkdriver":
plgnTyp = "Network"
case "logdriver":
plgnTyp = "Log"
}
plugins[api.PluginDescription{
Type: plgnTyp,
Name: plgn.Name,
}] = struct{}{}
}
}
}
pluginFields := make([]api.PluginDescription, 0, len(plugins))
for k := range plugins {
pluginFields = append(pluginFields, k)
}
sort.Sort(sortedPlugins(pluginFields))
// parse []string labels into a map[string]string
labels := map[string]string{}
for _, l := range info.Labels {
stringSlice := strings.SplitN(l, "=", 2)
// this will take the last value in the list for a given key
// ideally, one shouldn't assign multiple values to the same key
if len(stringSlice) > 1 {
labels[stringSlice[0]] = stringSlice[1]
}
}
description := &api.NodeDescription{
Hostname: info.Name,
Platform: &api.Platform{
Architecture: info.Architecture,
OS: info.OSType,
},
Engine: &api.EngineDescription{
EngineVersion: info.ServerVersion,
Labels: labels,
Plugins: pluginFields,
},
Resources: &api.Resources{
NanoCPUs: int64(info.NCPU) * 1e9,
MemoryBytes: info.MemTotal,
Generic: convert.GenericResourcesToGRPC(info.GenericResources),
},
}
// Save the node information in the executor field
e.mutex.Lock()
e.node = description
e.mutex.Unlock()
return description, nil
}
func (e *executor) Configure(ctx context.Context, node *api.Node) error {
var ingressNA *api.NetworkAttachment
attachments := make(map[string]string)
for _, na := range node.Attachments {
if na == nil || na.Network == nil || len(na.Addresses) == 0 {
// this should not happen, but we got a panic here and don't have a
// good idea about what the underlying data structure looks like.
logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
Warnf("skipping nil or malformed node network attachment entry")
continue
}
if na.Network.Spec.Ingress {
ingressNA = na
}
attachments[na.Network.ID] = na.Addresses[0]
}
if (ingressNA == nil) && (node.Attachment != nil) && (len(node.Attachment.Addresses) > 0) {
ingressNA = node.Attachment
attachments[ingressNA.Network.ID] = ingressNA.Addresses[0]
}
if ingressNA == nil {
e.backend.ReleaseIngress()
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
options := types.NetworkCreate{
Driver: ingressNA.Network.DriverState.Name,
IPAM: &network.IPAM{
Driver: ingressNA.Network.IPAM.Driver.Name,
},
Options: ingressNA.Network.DriverState.Options,
Ingress: true,
CheckDuplicate: true,
}
for _, ic := range ingressNA.Network.IPAM.Configs {
c := network.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
}
options.IPAM.Config = append(options.IPAM.Config, c)
}
_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
ID: ingressNA.Network.ID,
NetworkCreateRequest: types.NetworkCreateRequest{
Name: ingressNA.Network.Spec.Annotations.Name,
NetworkCreate: options,
},
}, ingressNA.Addresses[0])
if err != nil {
return err
}
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
// Controller returns a docker container runner.
func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
dependencyGetter := template.NewTemplatedDependencyGetter(agent.Restrict(e.dependencies, t), t, nil)
// Get the node description from the executor field
e.mutex.Lock()
nodeDescription := e.node
e.mutex.Unlock()
if t.Spec.GetAttachment() != nil {
return newNetworkAttacherController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
}
var ctlr exec.Controller
switch r := t.Spec.GetRuntime().(type) {
case *api.TaskSpec_Generic:
logrus.WithFields(logrus.Fields{
"kind": r.Generic.Kind,
"type_url": r.Generic.Payload.TypeUrl,
}).Debug("custom runtime requested")
runtimeKind, err := naming.Runtime(t.Spec)
if err != nil {
return ctlr, err
}
switch runtimeKind {
case string(swarmtypes.RuntimePlugin):
if !e.backend.HasExperimental() {
return ctlr, fmt.Errorf("runtime type %q only supported in experimental", swarmtypes.RuntimePlugin)
}
c, err := plugin.NewController(e.pluginBackend, t)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind)
}
case *api.TaskSpec_Container:
c, err := newController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime: %q", r)
}
return ctlr, nil
}
func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error {
nwKeys := []*networktypes.EncryptionKey{}
for _, key := range keys {
nwKey := &networktypes.EncryptionKey{
Subsystem: key.Subsystem,
Algorithm: int32(key.Algorithm),
Key: make([]byte, len(key.Key)),
LamportTime: key.LamportTime,
}
copy(nwKey.Key, key.Key)
nwKeys = append(nwKeys, nwKey)
}
e.backend.SetNetworkBootstrapKeys(nwKeys)
return nil
}
func (e *executor) Secrets() exec.SecretsManager {
return e.dependencies.Secrets()
}
func (e *executor) Configs() exec.ConfigsManager {
return e.dependencies.Configs()
}
type sortedPlugins []api.PluginDescription
func (sp sortedPlugins) Len() int { return len(sp) }
func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] }
func (sp sortedPlugins) Less(i, j int) bool {
if sp[i].Type != sp[j].Type {
return sp[i].Type < sp[j].Type
}
return sp[i].Name < sp[j].Name
}
| package container // import "github.com/docker/docker/daemon/cluster/executor/container"
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
swarmtypes "github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/daemon/cluster/controllers/plugin"
"github.com/docker/docker/daemon/cluster/convert"
executorpkg "github.com/docker/docker/daemon/cluster/executor"
clustertypes "github.com/docker/docker/daemon/cluster/provider"
"github.com/docker/docker/libnetwork"
networktypes "github.com/docker/docker/libnetwork/types"
"github.com/docker/swarmkit/agent"
"github.com/docker/swarmkit/agent/exec"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/naming"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/template"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type executor struct {
backend executorpkg.Backend
imageBackend executorpkg.ImageBackend
pluginBackend plugin.Backend
volumeBackend executorpkg.VolumeBackend
dependencies exec.DependencyManager
mutex sync.Mutex // This mutex protects the following node field
node *api.NodeDescription
// nodeObj holds a copy of the swarmkit Node object from the time of the
// last call to executor.Configure. This allows us to discover which
// network attachments the node previously had, which further allows us to
// determine which, if any, need to be removed. nodeObj is not protected by
// a mutex, because it is only written to in the method (Configure) that it
// is read from. If that changes, it may need to be guarded.
nodeObj *api.Node
}
// NewExecutor returns an executor from the docker client.
func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend) exec.Executor {
return &executor{
backend: b,
pluginBackend: p,
imageBackend: i,
volumeBackend: v,
dependencies: agent.NewDependencyManager(),
}
}
// Describe returns the underlying node description from the docker client.
func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) {
info := e.backend.SystemInfo()
plugins := map[api.PluginDescription]struct{}{}
addPlugins := func(typ string, names []string) {
for _, name := range names {
plugins[api.PluginDescription{
Type: typ,
Name: name,
}] = struct{}{}
}
}
// add v1 plugins
addPlugins("Volume", info.Plugins.Volume)
// Add builtin driver "overlay" (the only builtin multi-host driver) to
// the plugin list by default.
addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...))
addPlugins("Authorization", info.Plugins.Authorization)
addPlugins("Log", info.Plugins.Log)
// add v2 plugins
v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs())
if err == nil {
for _, plgn := range v2Plugins {
for _, typ := range plgn.Config.Interface.Types {
if typ.Prefix != "docker" || !plgn.Enabled {
continue
}
plgnTyp := typ.Capability
switch typ.Capability {
case "volumedriver":
plgnTyp = "Volume"
case "networkdriver":
plgnTyp = "Network"
case "logdriver":
plgnTyp = "Log"
}
plugins[api.PluginDescription{
Type: plgnTyp,
Name: plgn.Name,
}] = struct{}{}
}
}
}
pluginFields := make([]api.PluginDescription, 0, len(plugins))
for k := range plugins {
pluginFields = append(pluginFields, k)
}
sort.Sort(sortedPlugins(pluginFields))
// parse []string labels into a map[string]string
labels := map[string]string{}
for _, l := range info.Labels {
stringSlice := strings.SplitN(l, "=", 2)
// this will take the last value in the list for a given key
// ideally, one shouldn't assign multiple values to the same key
if len(stringSlice) > 1 {
labels[stringSlice[0]] = stringSlice[1]
}
}
description := &api.NodeDescription{
Hostname: info.Name,
Platform: &api.Platform{
Architecture: info.Architecture,
OS: info.OSType,
},
Engine: &api.EngineDescription{
EngineVersion: info.ServerVersion,
Labels: labels,
Plugins: pluginFields,
},
Resources: &api.Resources{
NanoCPUs: int64(info.NCPU) * 1e9,
MemoryBytes: info.MemTotal,
Generic: convert.GenericResourcesToGRPC(info.GenericResources),
},
}
// Save the node information in the executor field
e.mutex.Lock()
e.node = description
e.mutex.Unlock()
return description, nil
}
func (e *executor) Configure(ctx context.Context, node *api.Node) error {
var ingressNA *api.NetworkAttachment
attachments := make(map[string]string)
for _, na := range node.Attachments {
if na == nil || na.Network == nil || len(na.Addresses) == 0 {
// this should not happen, but we got a panic here and don't have a
// good idea about what the underlying data structure looks like.
logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
Warnf("skipping nil or malformed node network attachment entry")
continue
}
if na.Network.Spec.Ingress {
ingressNA = na
}
attachments[na.Network.ID] = na.Addresses[0]
}
// discover which, if any, attachments have been removed.
//
// we aren't responsible directly for creating these networks. that is
// handled indirectly when a container using that network is created.
// however, when it comes time to remove the network, none of the relevant
// tasks may exist anymore. this means we should go ahead and try to remove
// any network we know to no longer be in use.
// removeAttachments maps the network ID to a boolean. This boolean
// indicates whether the attachment in question is totally removed (true),
// or has just had its IP changed (false)
removeAttachments := make(map[string]bool)
// the first time we Configure, nodeObj wil be nil, because it will not be
// set yet. in that case, skip this check.
if e.nodeObj != nil {
for _, na := range e.nodeObj.Attachments {
// same thing as above, check sanity of the attachments so we don't
// get a panic.
if na == nil || na.Network == nil || len(na.Addresses) == 0 {
logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)).
Warnf("skipping nil or malformed node network attachment entry")
continue
}
// now, check if the attachment exists and shares the same IP address.
if ip, ok := attachments[na.Network.ID]; !ok || na.Addresses[0] != ip {
// if the map entry exists, then the network still exists, and the
// IP must be what has changed
removeAttachments[na.Network.ID] = !ok
}
}
}
if (ingressNA == nil) && (node.Attachment != nil) && (len(node.Attachment.Addresses) > 0) {
ingressNA = node.Attachment
attachments[ingressNA.Network.ID] = ingressNA.Addresses[0]
}
if ingressNA == nil {
e.backend.ReleaseIngress()
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
options := types.NetworkCreate{
Driver: ingressNA.Network.DriverState.Name,
IPAM: &network.IPAM{
Driver: ingressNA.Network.IPAM.Driver.Name,
},
Options: ingressNA.Network.DriverState.Options,
Ingress: true,
CheckDuplicate: true,
}
for _, ic := range ingressNA.Network.IPAM.Configs {
c := network.IPAMConfig{
Subnet: ic.Subnet,
IPRange: ic.Range,
Gateway: ic.Gateway,
}
options.IPAM.Config = append(options.IPAM.Config, c)
}
_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
ID: ingressNA.Network.ID,
NetworkCreateRequest: types.NetworkCreateRequest{
Name: ingressNA.Network.Spec.Annotations.Name,
NetworkCreate: options,
},
}, ingressNA.Addresses[0])
if err != nil {
return err
}
var (
activeEndpointsError *libnetwork.ActiveEndpointsError
errNoSuchNetwork libnetwork.ErrNoSuchNetwork
)
// now, finally, remove any network LB attachments that we no longer have.
for nw, gone := range removeAttachments {
err := e.backend.DeleteManagedNetwork(nw)
switch {
case err == nil:
continue
case errors.As(err, &activeEndpointsError):
// this is the purpose of the boolean in the map. it's literally
// just to log an appropriate, informative error. i'm unsure if
// this can ever actually occur, but we need to know if it does.
if gone {
log.G(ctx).Warnf("network %s should be removed, but still has active attachments", nw)
} else {
log.G(ctx).Warnf(
"network %s should have its node LB IP changed, but cannot be removed because of active attachments",
nw,
)
}
continue
case errors.As(err, &errNoSuchNetwork):
// NoSuchNetworkError indicates the network is already gone.
continue
default:
log.G(ctx).Errorf("network %s remove failed: %v", nw, err)
}
}
// now update our copy of the node object, reset the attachment store, and
// return
e.nodeObj = node
return e.backend.GetAttachmentStore().ResetAttachments(attachments)
}
// Controller returns a docker container runner.
func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
dependencyGetter := template.NewTemplatedDependencyGetter(agent.Restrict(e.dependencies, t), t, nil)
// Get the node description from the executor field
e.mutex.Lock()
nodeDescription := e.node
e.mutex.Unlock()
if t.Spec.GetAttachment() != nil {
return newNetworkAttacherController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
}
var ctlr exec.Controller
switch r := t.Spec.GetRuntime().(type) {
case *api.TaskSpec_Generic:
logrus.WithFields(logrus.Fields{
"kind": r.Generic.Kind,
"type_url": r.Generic.Payload.TypeUrl,
}).Debug("custom runtime requested")
runtimeKind, err := naming.Runtime(t.Spec)
if err != nil {
return ctlr, err
}
switch runtimeKind {
case string(swarmtypes.RuntimePlugin):
if !e.backend.HasExperimental() {
return ctlr, fmt.Errorf("runtime type %q only supported in experimental", swarmtypes.RuntimePlugin)
}
c, err := plugin.NewController(e.pluginBackend, t)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind)
}
case *api.TaskSpec_Container:
c, err := newController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter)
if err != nil {
return ctlr, err
}
ctlr = c
default:
return ctlr, fmt.Errorf("unsupported runtime: %q", r)
}
return ctlr, nil
}
func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error {
nwKeys := []*networktypes.EncryptionKey{}
for _, key := range keys {
nwKey := &networktypes.EncryptionKey{
Subsystem: key.Subsystem,
Algorithm: int32(key.Algorithm),
Key: make([]byte, len(key.Key)),
LamportTime: key.LamportTime,
}
copy(nwKey.Key, key.Key)
nwKeys = append(nwKeys, nwKey)
}
e.backend.SetNetworkBootstrapKeys(nwKeys)
return nil
}
func (e *executor) Secrets() exec.SecretsManager {
return e.dependencies.Secrets()
}
func (e *executor) Configs() exec.ConfigsManager {
return e.dependencies.Configs()
}
type sortedPlugins []api.PluginDescription
func (sp sortedPlugins) Len() int { return len(sp) }
func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] }
func (sp sortedPlugins) Less(i, j int) bool {
if sp[i].Type != sp[j].Type {
return sp[i].Type < sp[j].Type
}
return sp[i].Name < sp[j].Name
}
| dperny | f32fc350ce4f0373efa5fc646dc24ebc226bf168 | 7729ebfa1b292e74c5d65d3130cde1f8b302a9f2 | ```suggestion
"github.com/docker/docker/libnetwork"
``` | thaJeztah | 4,757 |
moby/moby | 42,424 | vendor: github.com/moby/buildkit v0.8.3-3-g244e8cde | ### vendor: github.com/moby/buildkit v0.8.3-3-g244e8cde
full diff: https://github.com/moby/buildkit/compare/v0.8.3...v0.8.3-3-g244e8cde
- Transform relative mountpoints for exec mounts in the executor
- Add test for handling relative mountpoints
### vendor: github.com/moby/buildkit v0.8.3
full diff: https://github.com/moby/buildkit/compare/v0.8.2...v0.8.3
- vendor containerd (required for rootless overlayfs on kernel 5.11)
- not included to avoid depending on a fork
- Add retry on image push 5xx errors
- contenthash: include basename in content checksum for wildcards
- Fix missing mounts in execOp cache map
- Add regression test for run cache not considering mounts
- Add hack to preserve Dockerfile RUN cache compatibility after mount cache bugfix
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-05-26 11:34:16+00:00 | 2021-06-01 17:49:42+00:00 | vendor/github.com/moby/buildkit/go.mod | module github.com/moby/buildkit
go 1.13
require (
github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0
github.com/BurntSushi/toml v0.3.1
github.com/Microsoft/go-winio v0.4.15
github.com/Microsoft/hcsshim v0.8.10
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect
github.com/containerd/console v1.0.1
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe
github.com/containerd/go-cni v1.0.1
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116
github.com/containerd/typeurl v1.0.1
github.com/coreos/go-systemd/v22 v22.1.0
github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
github.com/docker/go-connections v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f
github.com/gofrs/flock v0.7.3
github.com/gogo/googleapis v1.3.2
github.com/gogo/protobuf v1.3.1
// protobuf: the actual version is replaced in replace()
github.com/golang/protobuf v1.4.2
github.com/google/go-cmp v0.4.1
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/gorilla/mux v1.8.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
github.com/hashicorp/go-immutable-radix v1.0.0
github.com/hashicorp/golang-lru v0.5.3
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c // indirect
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea
github.com/mitchellh/hashstructure v1.0.0
github.com/moby/locker v1.0.1
github.com/moby/sys/mount v0.1.1 // indirect; force more current version of sys/mount than go mod selects automatically
github.com/moby/sys/mountinfo v0.4.0 // indirect; force more current version of sys/mountinfo than go mod selects automatically
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 // indirect
github.com/morikuni/aec v1.0.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc v1.0.0-rc92
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6
github.com/opencontainers/selinux v1.8.0
github.com/opentracing-contrib/go-stdlib v1.0.0
github.com/opentracing/opentracing-go v1.2.0
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.5.0
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
github.com/sirupsen/logrus v1.7.0
github.com/stretchr/testify v1.5.1
github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
github.com/uber/jaeger-client-go v2.25.0+incompatible
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
github.com/urfave/cli v1.22.2
go.etcd.io/bbolt v1.3.5
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9
golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
golang.org/x/sys v0.0.0-20201013081832-0aaa2718063a
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
// genproto: the actual version is replaced in replace()
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece
google.golang.org/grpc v1.29.1
)
replace (
// protobuf: corresponds to containerd
github.com/golang/protobuf => github.com/golang/protobuf v1.3.5
github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
// genproto: corresponds to containerd
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
)
| module github.com/moby/buildkit
go 1.13
require (
github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0
github.com/BurntSushi/toml v0.3.1
github.com/Microsoft/go-winio v0.4.15
github.com/Microsoft/hcsshim v0.8.10
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect
github.com/containerd/console v1.0.1
// containerd: the actual version is replaced in replace()
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe
github.com/containerd/go-cni v1.0.1
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116
github.com/containerd/typeurl v1.0.1
github.com/coreos/go-systemd/v22 v22.1.0
github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
github.com/docker/go-connections v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f
github.com/gofrs/flock v0.7.3
github.com/gogo/googleapis v1.3.2
github.com/gogo/protobuf v1.3.1
// protobuf: the actual version is replaced in replace()
github.com/golang/protobuf v1.4.2
github.com/google/go-cmp v0.4.1
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/gorilla/mux v1.8.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
github.com/hashicorp/go-immutable-radix v1.0.0
github.com/hashicorp/golang-lru v0.5.3
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c // indirect
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea
github.com/mitchellh/hashstructure v1.0.0
github.com/moby/locker v1.0.1
github.com/moby/sys/mount v0.1.1 // indirect; force more current version of sys/mount than go mod selects automatically
github.com/moby/sys/mountinfo v0.4.0 // indirect; force more current version of sys/mountinfo than go mod selects automatically
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 // indirect
github.com/morikuni/aec v1.0.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc v1.0.0-rc92
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6
github.com/opencontainers/selinux v1.8.0
github.com/opentracing-contrib/go-stdlib v1.0.0
github.com/opentracing/opentracing-go v1.2.0
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.5.0
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
github.com/sirupsen/logrus v1.7.0
github.com/stretchr/testify v1.5.1
github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
github.com/uber/jaeger-client-go v2.25.0+incompatible
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
github.com/urfave/cli v1.22.2
go.etcd.io/bbolt v1.3.5
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9
golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
golang.org/x/sys v0.0.0-20201013081832-0aaa2718063a
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
// genproto: the actual version is replaced in replace()
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece
google.golang.org/grpc v1.29.1
)
replace (
// containerd: Forked from 0edc412565dcc6e3d6125ff9e4b009ad4b89c638 (20201117) with:
// - `Adjust overlay tests to expect "index=off"` (#4719, for ease of cherry-picking #5076)
// - `overlay: support "userxattr" option (kernel 5.11)` (#5076)
// - `docker: avoid concurrent map access panic` (#4855)
github.com/containerd/containerd => github.com/AkihiroSuda/containerd v1.1.1-0.20210312044057-48f85a131bb8
// protobuf: corresponds to containerd
github.com/golang/protobuf => github.com/golang/protobuf v1.3.5
github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
// genproto: corresponds to containerd
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
)
| thaJeztah | a8a769f04f7ae44117772943ef0f4607b54e05ff | 7c6a9484ee37b28fa314f716581bdb268cd78ce2 | Is this related to https://github.com/moby/moby/issues/42422 ? | thaJeztah | 4,758 |
moby/moby | 42,424 | vendor: github.com/moby/buildkit v0.8.3-3-g244e8cde | ### vendor: github.com/moby/buildkit v0.8.3-3-g244e8cde
full diff: https://github.com/moby/buildkit/compare/v0.8.3...v0.8.3-3-g244e8cde
- Transform relative mountpoints for exec mounts in the executor
- Add test for handling relative mountpoints
### vendor: github.com/moby/buildkit v0.8.3
full diff: https://github.com/moby/buildkit/compare/v0.8.2...v0.8.3
- vendor containerd (required for rootless overlayfs on kernel 5.11)
- not included to avoid depending on a fork
- Add retry on image push 5xx errors
- contenthash: include basename in content checksum for wildcards
- Fix missing mounts in execOp cache map
- Add regression test for run cache not considering mounts
- Add hack to preserve Dockerfile RUN cache compatibility after mount cache bugfix
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
**- A picture of a cute animal (not mandatory but encouraged)**
| null | 2021-05-26 11:34:16+00:00 | 2021-06-01 17:49:42+00:00 | vendor/github.com/moby/buildkit/go.mod | module github.com/moby/buildkit
go 1.13
require (
github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0
github.com/BurntSushi/toml v0.3.1
github.com/Microsoft/go-winio v0.4.15
github.com/Microsoft/hcsshim v0.8.10
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect
github.com/containerd/console v1.0.1
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe
github.com/containerd/go-cni v1.0.1
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116
github.com/containerd/typeurl v1.0.1
github.com/coreos/go-systemd/v22 v22.1.0
github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
github.com/docker/go-connections v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f
github.com/gofrs/flock v0.7.3
github.com/gogo/googleapis v1.3.2
github.com/gogo/protobuf v1.3.1
// protobuf: the actual version is replaced in replace()
github.com/golang/protobuf v1.4.2
github.com/google/go-cmp v0.4.1
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/gorilla/mux v1.8.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
github.com/hashicorp/go-immutable-radix v1.0.0
github.com/hashicorp/golang-lru v0.5.3
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c // indirect
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea
github.com/mitchellh/hashstructure v1.0.0
github.com/moby/locker v1.0.1
github.com/moby/sys/mount v0.1.1 // indirect; force more current version of sys/mount than go mod selects automatically
github.com/moby/sys/mountinfo v0.4.0 // indirect; force more current version of sys/mountinfo than go mod selects automatically
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 // indirect
github.com/morikuni/aec v1.0.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc v1.0.0-rc92
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6
github.com/opencontainers/selinux v1.8.0
github.com/opentracing-contrib/go-stdlib v1.0.0
github.com/opentracing/opentracing-go v1.2.0
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.5.0
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
github.com/sirupsen/logrus v1.7.0
github.com/stretchr/testify v1.5.1
github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
github.com/uber/jaeger-client-go v2.25.0+incompatible
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
github.com/urfave/cli v1.22.2
go.etcd.io/bbolt v1.3.5
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9
golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
golang.org/x/sys v0.0.0-20201013081832-0aaa2718063a
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
// genproto: the actual version is replaced in replace()
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece
google.golang.org/grpc v1.29.1
)
replace (
// protobuf: corresponds to containerd
github.com/golang/protobuf => github.com/golang/protobuf v1.3.5
github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
// genproto: corresponds to containerd
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
)
| module github.com/moby/buildkit
go 1.13
require (
github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0
github.com/BurntSushi/toml v0.3.1
github.com/Microsoft/go-winio v0.4.15
github.com/Microsoft/hcsshim v0.8.10
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect
github.com/containerd/console v1.0.1
// containerd: the actual version is replaced in replace()
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe
github.com/containerd/go-cni v1.0.1
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116
github.com/containerd/typeurl v1.0.1
github.com/coreos/go-systemd/v22 v22.1.0
github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
github.com/docker/go-connections v0.4.0
github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f
github.com/gofrs/flock v0.7.3
github.com/gogo/googleapis v1.3.2
github.com/gogo/protobuf v1.3.1
// protobuf: the actual version is replaced in replace()
github.com/golang/protobuf v1.4.2
github.com/google/go-cmp v0.4.1
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/gorilla/mux v1.8.0 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
github.com/hashicorp/go-immutable-radix v1.0.0
github.com/hashicorp/golang-lru v0.5.3
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c // indirect
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea
github.com/mitchellh/hashstructure v1.0.0
github.com/moby/locker v1.0.1
github.com/moby/sys/mount v0.1.1 // indirect; force more current version of sys/mount than go mod selects automatically
github.com/moby/sys/mountinfo v0.4.0 // indirect; force more current version of sys/mountinfo than go mod selects automatically
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 // indirect
github.com/morikuni/aec v1.0.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc v1.0.0-rc92
github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6
github.com/opencontainers/selinux v1.8.0
github.com/opentracing-contrib/go-stdlib v1.0.0
github.com/opentracing/opentracing-go v1.2.0
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.5.0
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
github.com/sirupsen/logrus v1.7.0
github.com/stretchr/testify v1.5.1
github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
github.com/uber/jaeger-client-go v2.25.0+incompatible
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
github.com/urfave/cli v1.22.2
go.etcd.io/bbolt v1.3.5
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9
golang.org/x/net v0.0.0-20200707034311-ab3426394381
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
golang.org/x/sys v0.0.0-20201013081832-0aaa2718063a
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1
// genproto: the actual version is replaced in replace()
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece
google.golang.org/grpc v1.29.1
)
replace (
// containerd: Forked from 0edc412565dcc6e3d6125ff9e4b009ad4b89c638 (20201117) with:
// - `Adjust overlay tests to expect "index=off"` (#4719, for ease of cherry-picking #5076)
// - `overlay: support "userxattr" option (kernel 5.11)` (#5076)
// - `docker: avoid concurrent map access panic` (#4855)
github.com/containerd/containerd => github.com/AkihiroSuda/containerd v1.1.1-0.20210312044057-48f85a131bb8
// protobuf: corresponds to containerd
github.com/golang/protobuf => github.com/golang/protobuf v1.3.5
github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
// genproto: corresponds to containerd
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
)
| thaJeztah | a8a769f04f7ae44117772943ef0f4607b54e05ff | 7c6a9484ee37b28fa314f716581bdb268cd78ce2 | no | tonistiigi | 4,759 |
moby/moby | 42,410 | contrib/check-config.sh: Make script POSIX | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
I needed to run `check-config.sh` on a small system that doesn't have `bash`. So I did the changes needed to make it a POSIX shell script and thus remove the bash dependency. Having done that for my own needs, I thought I'd share this more portable version upstreams.
**- How I did it**
Replaced bashisms with POSIX
**- How to verify it**
Explicitly run `check-config.sh` with a POSIX shell such as e.g. `dash`.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
`check-config.sh` is now a POSIX shell script (and not depending on bash).
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 10:22:00+00:00 | 2021-05-27 20:33:39+00:00 | contrib/check-config.sh | #!/usr/bin/env bash
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs=(
'/proc/config.gz'
"/boot/config-$(uname -r)"
"/usr/src/linux-$(uname -r)/.config"
'/usr/src/linux/.config'
)
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:="${possibleConfigs[0]}"}"
fi
if ! command -v zgrep &> /dev/null; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
local codes=()
if [ "$1" = 'bold' ]; then
codes=("${codes[@]}" '1')
shift
fi
if [ "$#" -gt 0 ]; then
local code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes=("${codes[@]}" "$code")
fi
fi
local IFS=';'
echo -en '\033['"${codes[*]}"'m'
}
wrap_color() {
text="$1"
shift
color "$@"
echo -n "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
echo -n "- "
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
source /etc/os-release 2> /dev/null || /bin/true
if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then
# this is a CentOS7 or RHEL7 system
grep -q "user_namespace.enable=1" /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
fi
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in "${possibleConfigs[@]}"; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
echo -n '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
echo "$(wrap_good 'cgroup hierarchy' 'cgroupv2')"
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
echo -n '- '
if command -v apparmor_parser &> /dev/null; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
echo -n ' '
if command -v apt-get &> /dev/null; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum &> /dev/null; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
flags=(
NAMESPACES {NET,PID,IPC,UTS}_NS
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG
KEYS
VETH BRIDGE BRIDGE_NETFILTER
IP_NF_FILTER IP_NF_TARGET_MASQUERADE
NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK,IPVS}
NETFILTER_XT_MARK
IP_NF_NAT NF_NAT
# required for bind-mounting /dev/mqueue into containers
POSIX_MQUEUE
)
check_flags "${flags[@]}"
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
echo -n "- "
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
echo -n "- "
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
echo -n "- "
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
flags=(
BLK_CGROUP BLK_DEV_THROTTLING
CGROUP_PERF
CGROUP_HUGETLB
NET_CLS_CGROUP $netprio
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED
IP_NF_TARGET_REDIRECT
IP_VS
IP_VS_NFCT
IP_VS_PROTO_TCP
IP_VS_PROTO_UDP
IP_VS_RR
SECURITY_SELINUX
SECURITY_APPARMOR
)
check_flags "${flags[@]}"
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
echo -n " - "
check_device /dev/zfs
echo -n " - "
check_command zfs
echo -n " - "
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | faa2d887f226cc96f08d3f78340bdb69fd9c3ed2 | cb9f425df9bf8da6cd3556d45a32aac050e38512 | ```suggestion
codes="${codes:+$codes;}$code"
``` | tianon | 4,760 |
moby/moby | 42,410 | contrib/check-config.sh: Make script POSIX | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
I needed to run `check-config.sh` on a small system that doesn't have `bash`. So I did the changes needed to make it a POSIX shell script and thus remove the bash dependency. Having done that for my own needs, I thought I'd share this more portable version upstreams.
**- How I did it**
Replaced bashisms with POSIX
**- How to verify it**
Explicitly run `check-config.sh` with a POSIX shell such as e.g. `dash`.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
`check-config.sh` is now a POSIX shell script (and not depending on bash).
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 10:22:00+00:00 | 2021-05-27 20:33:39+00:00 | contrib/check-config.sh | #!/usr/bin/env bash
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs=(
'/proc/config.gz'
"/boot/config-$(uname -r)"
"/usr/src/linux-$(uname -r)/.config"
'/usr/src/linux/.config'
)
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:="${possibleConfigs[0]}"}"
fi
if ! command -v zgrep &> /dev/null; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
local codes=()
if [ "$1" = 'bold' ]; then
codes=("${codes[@]}" '1')
shift
fi
if [ "$#" -gt 0 ]; then
local code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes=("${codes[@]}" "$code")
fi
fi
local IFS=';'
echo -en '\033['"${codes[*]}"'m'
}
wrap_color() {
text="$1"
shift
color "$@"
echo -n "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
echo -n "- "
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
source /etc/os-release 2> /dev/null || /bin/true
if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then
# this is a CentOS7 or RHEL7 system
grep -q "user_namespace.enable=1" /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
fi
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in "${possibleConfigs[@]}"; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
echo -n '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
echo "$(wrap_good 'cgroup hierarchy' 'cgroupv2')"
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
echo -n '- '
if command -v apparmor_parser &> /dev/null; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
echo -n ' '
if command -v apt-get &> /dev/null; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum &> /dev/null; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
flags=(
NAMESPACES {NET,PID,IPC,UTS}_NS
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG
KEYS
VETH BRIDGE BRIDGE_NETFILTER
IP_NF_FILTER IP_NF_TARGET_MASQUERADE
NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK,IPVS}
NETFILTER_XT_MARK
IP_NF_NAT NF_NAT
# required for bind-mounting /dev/mqueue into containers
POSIX_MQUEUE
)
check_flags "${flags[@]}"
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
echo -n "- "
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
echo -n "- "
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
echo -n "- "
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
flags=(
BLK_CGROUP BLK_DEV_THROTTLING
CGROUP_PERF
CGROUP_HUGETLB
NET_CLS_CGROUP $netprio
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED
IP_NF_TARGET_REDIRECT
IP_VS
IP_VS_NFCT
IP_VS_PROTO_TCP
IP_VS_PROTO_UDP
IP_VS_RR
SECURITY_SELINUX
SECURITY_APPARMOR
)
check_flags "${flags[@]}"
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
echo -n " - "
check_device /dev/zfs
echo -n " - "
check_command zfs
echo -n " - "
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | faa2d887f226cc96f08d3f78340bdb69fd9c3ed2 | cb9f425df9bf8da6cd3556d45a32aac050e38512 | Adjusting the instance above that adds a second code to use `;` instead of ` ` makes this not need `IFS` at all:
```suggestion
printf '\033[%sm' "$codes"
``` | tianon | 4,761 |
moby/moby | 42,410 | contrib/check-config.sh: Make script POSIX | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
I needed to run `check-config.sh` on a small system that doesn't have `bash`. So I did the changes needed to make it a POSIX shell script and thus remove the bash dependency. Having done that for my own needs, I thought I'd share this more portable version upstreams.
**- How I did it**
Replaced bashisms with POSIX
**- How to verify it**
Explicitly run `check-config.sh` with a POSIX shell such as e.g. `dash`.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
`check-config.sh` is now a POSIX shell script (and not depending on bash).
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 10:22:00+00:00 | 2021-05-27 20:33:39+00:00 | contrib/check-config.sh | #!/usr/bin/env bash
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs=(
'/proc/config.gz'
"/boot/config-$(uname -r)"
"/usr/src/linux-$(uname -r)/.config"
'/usr/src/linux/.config'
)
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:="${possibleConfigs[0]}"}"
fi
if ! command -v zgrep &> /dev/null; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
local codes=()
if [ "$1" = 'bold' ]; then
codes=("${codes[@]}" '1')
shift
fi
if [ "$#" -gt 0 ]; then
local code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes=("${codes[@]}" "$code")
fi
fi
local IFS=';'
echo -en '\033['"${codes[*]}"'m'
}
wrap_color() {
text="$1"
shift
color "$@"
echo -n "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
echo -n "- "
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
source /etc/os-release 2> /dev/null || /bin/true
if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then
# this is a CentOS7 or RHEL7 system
grep -q "user_namespace.enable=1" /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
fi
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in "${possibleConfigs[@]}"; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
echo -n '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
echo "$(wrap_good 'cgroup hierarchy' 'cgroupv2')"
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
echo -n '- '
if command -v apparmor_parser &> /dev/null; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
echo -n ' '
if command -v apt-get &> /dev/null; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum &> /dev/null; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
flags=(
NAMESPACES {NET,PID,IPC,UTS}_NS
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG
KEYS
VETH BRIDGE BRIDGE_NETFILTER
IP_NF_FILTER IP_NF_TARGET_MASQUERADE
NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK,IPVS}
NETFILTER_XT_MARK
IP_NF_NAT NF_NAT
# required for bind-mounting /dev/mqueue into containers
POSIX_MQUEUE
)
check_flags "${flags[@]}"
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
echo -n "- "
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
echo -n "- "
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
echo -n "- "
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
flags=(
BLK_CGROUP BLK_DEV_THROTTLING
CGROUP_PERF
CGROUP_HUGETLB
NET_CLS_CGROUP $netprio
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED
IP_NF_TARGET_REDIRECT
IP_VS
IP_VS_NFCT
IP_VS_PROTO_TCP
IP_VS_PROTO_UDP
IP_VS_RR
SECURITY_SELINUX
SECURITY_APPARMOR
)
check_flags "${flags[@]}"
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
echo -n " - "
check_device /dev/zfs
echo -n " - "
check_command zfs
echo -n " - "
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | faa2d887f226cc96f08d3f78340bdb69fd9c3ed2 | cb9f425df9bf8da6cd3556d45a32aac050e38512 | Uh, this isn't quite right :sweat_smile: (checking `NAMESPACES` four times :muscle:)
```suggestion
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
``` | tianon | 4,762 |
moby/moby | 42,410 | contrib/check-config.sh: Make script POSIX | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
I needed to run `check-config.sh` on a small system that doesn't have `bash`. So I did the changes needed to make it a POSIX shell script and thus remove the bash dependency. Having done that for my own needs, I thought I'd share this more portable version upstreams.
**- How I did it**
Replaced bashisms with POSIX
**- How to verify it**
Explicitly run `check-config.sh` with a POSIX shell such as e.g. `dash`.
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
`check-config.sh` is now a POSIX shell script (and not depending on bash).
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 10:22:00+00:00 | 2021-05-27 20:33:39+00:00 | contrib/check-config.sh | #!/usr/bin/env bash
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs=(
'/proc/config.gz'
"/boot/config-$(uname -r)"
"/usr/src/linux-$(uname -r)/.config"
'/usr/src/linux/.config'
)
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:="${possibleConfigs[0]}"}"
fi
if ! command -v zgrep &> /dev/null; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
local codes=()
if [ "$1" = 'bold' ]; then
codes=("${codes[@]}" '1')
shift
fi
if [ "$#" -gt 0 ]; then
local code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes=("${codes[@]}" "$code")
fi
fi
local IFS=';'
echo -en '\033['"${codes[*]}"'m'
}
wrap_color() {
text="$1"
shift
color "$@"
echo -n "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
echo -n "- "
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
source /etc/os-release 2> /dev/null || /bin/true
if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then
# this is a CentOS7 or RHEL7 system
grep -q "user_namespace.enable=1" /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
fi
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in "${possibleConfigs[@]}"; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
echo -n '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
echo "$(wrap_good 'cgroup hierarchy' 'cgroupv2')"
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
echo -n '- '
if command -v apparmor_parser &> /dev/null; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
echo -n ' '
if command -v apt-get &> /dev/null; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum &> /dev/null; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
flags=(
NAMESPACES {NET,PID,IPC,UTS}_NS
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG
KEYS
VETH BRIDGE BRIDGE_NETFILTER
IP_NF_FILTER IP_NF_TARGET_MASQUERADE
NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK,IPVS}
NETFILTER_XT_MARK
IP_NF_NAT NF_NAT
# required for bind-mounting /dev/mqueue into containers
POSIX_MQUEUE
)
check_flags "${flags[@]}"
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
echo -n "- "
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
echo -n "- "
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
echo -n "- "
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
flags=(
BLK_CGROUP BLK_DEV_THROTTLING
CGROUP_PERF
CGROUP_HUGETLB
NET_CLS_CGROUP $netprio
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED
IP_NF_TARGET_REDIRECT
IP_VS
IP_VS_NFCT
IP_VS_PROTO_TCP
IP_VS_PROTO_UDP
IP_VS_RR
SECURITY_SELINUX
SECURITY_APPARMOR
)
check_flags "${flags[@]}"
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
echo -n " - "
check_device /dev/zfs
echo -n " - "
check_command zfs
echo -n " - "
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | faa2d887f226cc96f08d3f78340bdb69fd9c3ed2 | cb9f425df9bf8da6cd3556d45a32aac050e38512 | Oh my, what a mistake from my side! A typical example of when the human brain tricks us to see what we believe we see; underscores are not spaces and the other way around. :-) Thanks for spotting! | joakimr-axis | 4,763 |
moby/moby | 42,409 | contrib/check-config.sh: Remove awk dependency | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Remove awk dependency in `contrib/check-config.sh`
**- How I did it**
Use grep and built-in functionality
**- How to verify it**
Run on host without awk
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Remove awk dependency in contrib/check-config.sh
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 09:00:04+00:00 | 2021-06-03 22:55:21+00:00 | contrib/check-config.sh | #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(sed -rne '/^[^ ]+ ([^ ]+) cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ].*$/ { s//\1/p; q }' /proc/mounts)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | a77317882d010b884a9101c6ad0b2d7db141082f | a2343c8c4f7c18c4209a83841e69a8c28857290a | This changes the actual implementation in a pretty subtle way, and IMO pretty drastically decreases the readability/maintainability of it.
I'm really curious what environment you're in that has a `/bin/sh` but doesn't have an `awk` implementation -- can you elaborate? Even BusyBox has a pretty good `awk` implementation. :sweat_smile: | tianon | 4,764 |
moby/moby | 42,409 | contrib/check-config.sh: Remove awk dependency | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Remove awk dependency in `contrib/check-config.sh`
**- How I did it**
Use grep and built-in functionality
**- How to verify it**
Run on host without awk
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Remove awk dependency in contrib/check-config.sh
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 09:00:04+00:00 | 2021-06-03 22:55:21+00:00 | contrib/check-config.sh | #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(sed -rne '/^[^ ]+ ([^ ]+) cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ].*$/ { s//\1/p; q }' /proc/mounts)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | a77317882d010b884a9101c6ad0b2d7db141082f | a2343c8c4f7c18c4209a83841e69a8c28857290a | I reckon the readability matter depends on how much a power user of awk one is. :-)
I tested back and forth and thought I had understood the current awk command as find those entries in /proc/mounts and match the ones with the type croup, then send them to head to just use the first of them. So I did, as you write, a subtle change in that I search the entries and stop searching having found the first match that has the cgroup type. But I reckon the outcome should stay correct? (Can there be such entries where the type is not cgroup, by the way?)
As you guessed, I didn't do this change just for the kicks, but on the edge device ([Axis Q1615-LE Mk III](https://www.axis.com/products/axis-q1615-le-mk-iii)) I use there was no awk to be found. I could certainly build one and provide it just to run this script, but thought it easier to avoid the awk dependency. | joakimr-axis | 4,765 |
moby/moby | 42,409 | contrib/check-config.sh: Remove awk dependency | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Remove awk dependency in `contrib/check-config.sh`
**- How I did it**
Use grep and built-in functionality
**- How to verify it**
Run on host without awk
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Remove awk dependency in contrib/check-config.sh
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 09:00:04+00:00 | 2021-06-03 22:55:21+00:00 | contrib/check-config.sh | #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(sed -rne '/^[^ ]+ ([^ ]+) cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ].*$/ { s//\1/p; q }' /proc/mounts)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | a77317882d010b884a9101c6ad0b2d7db141082f | a2343c8c4f7c18c4209a83841e69a8c28857290a | As for the readability part, perhaps replacing the efficient
```shell
[ "$type" = cgroup ] && {
...
}
```
expression with an
```shell
if [ "$type" = cgroup ]; then
...
fi
```
block instead would make it more approachable for maintainers? | joakimr-axis | 4,766 |
moby/moby | 42,409 | contrib/check-config.sh: Remove awk dependency | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Remove awk dependency in `contrib/check-config.sh`
**- How I did it**
Use grep and built-in functionality
**- How to verify it**
Run on host without awk
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Remove awk dependency in contrib/check-config.sh
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 09:00:04+00:00 | 2021-06-03 22:55:21+00:00 | contrib/check-config.sh | #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(sed -rne '/^[^ ]+ ([^ ]+) cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ].*$/ { s//\1/p; q }' /proc/mounts)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | a77317882d010b884a9101c6ad0b2d7db141082f | a2343c8c4f7c18c4209a83841e69a8c28857290a | I did an update with that now, please see what you think about it. | joakimr-axis | 4,767 |
moby/moby | 42,409 | contrib/check-config.sh: Remove awk dependency | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Remove awk dependency in `contrib/check-config.sh`
**- How I did it**
Use grep and built-in functionality
**- How to verify it**
Run on host without awk
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Remove awk dependency in contrib/check-config.sh
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 09:00:04+00:00 | 2021-06-03 22:55:21+00:00 | contrib/check-config.sh | #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(sed -rne '/^[^ ]+ ([^ ]+) cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ].*$/ { s//\1/p; q }' /proc/mounts)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | a77317882d010b884a9101c6ad0b2d7db141082f | a2343c8c4f7c18c4209a83841e69a8c28857290a | The biggest change here is that this new `grep` line is looking for `cpu` anywhere in any of the mount lines, which is subtly different from looking for a mount that has `cpu` exactly as a mount flag (and much more likely to have false positives).
Here's what I've got in mind instead:
```suggestion
cgroupSubsystemDir="$(grep -Em1 '^[^ ]+ [^ ]+ cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ]' /proc/mounts | cut -d' ' -f2)"
``` | tianon | 4,768 |
moby/moby | 42,409 | contrib/check-config.sh: Remove awk dependency | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Remove awk dependency in `contrib/check-config.sh`
**- How I did it**
Use grep and built-in functionality
**- How to verify it**
Run on host without awk
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Remove awk dependency in contrib/check-config.sh
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 09:00:04+00:00 | 2021-06-03 22:55:21+00:00 | contrib/check-config.sh | #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(sed -rne '/^[^ ]+ ([^ ]+) cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ].*$/ { s//\1/p; q }' /proc/mounts)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | a77317882d010b884a9101c6ad0b2d7db141082f | a2343c8c4f7c18c4209a83841e69a8c28857290a | Yes, I like that! IMO increased readability over the `awk` command plus getting rid of `awk`.
Even though I like `cut` (and have it available in my limited device), I reckon that external `cut` dependency could be replaced shell internal functionality. | joakimr-axis | 4,769 |
moby/moby | 42,409 | contrib/check-config.sh: Remove awk dependency | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Remove awk dependency in `contrib/check-config.sh`
**- How I did it**
Use grep and built-in functionality
**- How to verify it**
Run on host without awk
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Remove awk dependency in contrib/check-config.sh
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 09:00:04+00:00 | 2021-06-03 22:55:21+00:00 | contrib/check-config.sh | #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(sed -rne '/^[^ ]+ ([^ ]+) cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ].*$/ { s//\1/p; q }' /proc/mounts)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | a77317882d010b884a9101c6ad0b2d7db141082f | a2343c8c4f7c18c4209a83841e69a8c28857290a | If we're going to go to this level, IMO we might as well switch to `sed` and do it in a single command:
```suggestion
cgroupSubsystemDir="$(sed -rne '/^[^ ]+ ([^ ]+) cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ].*$/ { s//\1/p; q }' /proc/mounts)"
```
(Having a shell looping construct in here at all feels like overkill.) | tianon | 4,770 |
moby/moby | 42,409 | contrib/check-config.sh: Remove awk dependency | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Remove awk dependency in `contrib/check-config.sh`
**- How I did it**
Use grep and built-in functionality
**- How to verify it**
Run on host without awk
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Remove awk dependency in contrib/check-config.sh
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 09:00:04+00:00 | 2021-06-03 22:55:21+00:00 | contrib/check-config.sh | #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(sed -rne '/^[^ ]+ ([^ ]+) cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ].*$/ { s//\1/p; q }' /proc/mounts)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | a77317882d010b884a9101c6ad0b2d7db141082f | a2343c8c4f7c18c4209a83841e69a8c28857290a | So the loop is to give the successful exit code even for empty inputs, IMO not overkill but just standard shell scripting.
But I do like your `sed` version the best, that one is indeed the best, hands down. Outstanding winner.
With that I guess it's a bit wrong that the PR is tied to me now that you have the best solution. Can we find a good way to point the cred of the PR to you in the git history? | joakimr-axis | 4,771 |
moby/moby | 42,409 | contrib/check-config.sh: Remove awk dependency | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Remove awk dependency in `contrib/check-config.sh`
**- How I did it**
Use grep and built-in functionality
**- How to verify it**
Run on host without awk
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Remove awk dependency in contrib/check-config.sh
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 09:00:04+00:00 | 2021-06-03 22:55:21+00:00 | contrib/check-config.sh | #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(sed -rne '/^[^ ]+ ([^ ]+) cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ].*$/ { s//\1/p; q }' /proc/mounts)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | a77317882d010b884a9101c6ad0b2d7db141082f | a2343c8c4f7c18c4209a83841e69a8c28857290a | ...perhaps you could create a new PR with this final `sed` fix and then I close this PR and refer to that new PR that is yours? | joakimr-axis | 4,772 |
moby/moby | 42,409 | contrib/check-config.sh: Remove awk dependency | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Remove awk dependency in `contrib/check-config.sh`
**- How I did it**
Use grep and built-in functionality
**- How to verify it**
Run on host without awk
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Remove awk dependency in contrib/check-config.sh
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 09:00:04+00:00 | 2021-06-03 22:55:21+00:00 | contrib/check-config.sh | #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(sed -rne '/^[^ ]+ ([^ ]+) cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ].*$/ { s//\1/p; q }' /proc/mounts)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | a77317882d010b884a9101c6ad0b2d7db141082f | a2343c8c4f7c18c4209a83841e69a8c28857290a | I don't need any credit, but if you want to make sure I get some you can add `Co-authored-by: Tianon Gravi <[email protected]>` to your commit message (especially since you're going to want/need to squash this all back down to one commit :sweat_smile:) -- your latest commit already has the appropriate `Co-authored-by` so you can see that GitHub's given us both credit on it. :smile:
All your commits have a valid DCO and you've enabled repository maintainers editing your branch, so I'd be happy to perform that squash for you if you'd like me to. | tianon | 4,773 |
moby/moby | 42,409 | contrib/check-config.sh: Remove awk dependency | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Remove awk dependency in `contrib/check-config.sh`
**- How I did it**
Use grep and built-in functionality
**- How to verify it**
Run on host without awk
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
Remove awk dependency in contrib/check-config.sh
**- A picture of a cute animal (not mandatory but encouraged)**

| null | 2021-05-24 09:00:04+00:00 | 2021-06-03 22:55:21+00:00 | contrib/check-config.sh | #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| #!/usr/bin/env sh
set -e
EXITCODE=0
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs="
/proc/config.gz
/boot/config-$(uname -r)
/usr/src/linux-$(uname -r)/.config
/usr/src/linux/.config
"
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: "${CONFIG:=/proc/config.gz}"
fi
if ! command -v zgrep > /dev/null 2>&1; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
codes=
if [ "$1" = 'bold' ]; then
codes='1'
shift
fi
if [ "$#" -gt 0 ]; then
code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes="${codes:+$codes;}$code"
fi
fi
printf '\033[%sm' "$codes"
}
wrap_color() {
text="$1"
shift
color "$@"
printf '%s' "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
EXITCODE=1
fi
}
check_flags() {
for flag in "$@"; do
printf -- '- '
check_flag "$flag"
done
}
check_command() {
if command -v "$1" > /dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
EXITCODE=1
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
EXITCODE=1
fi
}
check_distro_userns() {
. /etc/os-release 2> /dev/null || /bin/true
case "$ID" in
centos | rhel)
case "$VERSION_ID" in
7*)
# this is a CentOS7 or RHEL7 system
grep -q 'user_namespace.enable=1' /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
EXITCODE=1
}
;;
esac
;;
esac
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in $possibleConfigs; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
printf -- '- '
if [ "$(stat -f -c %t /sys/fs/cgroup 2> /dev/null)" = '63677270' ]; then
wrap_good 'cgroup hierarchy' 'cgroupv2'
else
cgroupSubsystemDir="$(sed -rne '/^[^ ]+ ([^ ]+) cgroup ([^ ]*,)?(cpu|cpuacct|cpuset|devices|freezer|memory)[, ].*$/ { s//\1/p; q }' /proc/mounts)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" ] || [ -d "$cgroupDir/cpuacct" ] || [ -d "$cgroupDir/cpuset" ] || [ -d "$cgroupDir/devices" ] || [ -d "$cgroupDir/freezer" ] || [ -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
wrap_bad 'cgroup hierarchy' 'nonexistent??'
fi
EXITCODE=1
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2> /dev/null)" = 'Y' ]; then
printf -- '- '
if command -v apparmor_parser > /dev/null 2>&1; then
wrap_good 'apparmor' 'enabled and tools installed'
else
wrap_bad 'apparmor' 'enabled, but apparmor_parser missing'
printf ' '
if command -v apt-get > /dev/null 2>&1; then
wrap_color '(use "apt-get install apparmor" to fix this)'
elif command -v yum > /dev/null 2>&1; then
wrap_color '(your best bet is "yum install apparmor-parser")'
else
wrap_color '(look for an "apparmor" package for your distribution)'
fi
EXITCODE=1
fi
fi
check_flags \
NAMESPACES NET_NS PID_NS IPC_NS UTS_NS \
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG \
KEYS \
VETH BRIDGE BRIDGE_NETFILTER \
IP_NF_FILTER IP_NF_TARGET_MASQUERADE \
NETFILTER_XT_MATCH_ADDRTYPE \
NETFILTER_XT_MATCH_CONNTRACK \
NETFILTER_XT_MATCH_IPVS \
NETFILTER_XT_MARK \
IP_NF_NAT NF_NAT \
POSIX_MQUEUE
# (POSIX_MQUEUE is required for bind-mounting /dev/mqueue into containers)
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -lt 8 ]); then
check_flags DEVPTS_MULTIPLE_INSTANCES
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 1 ]; then
check_flags NF_NAT_IPV4
fi
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 2 ]; then
check_flags NF_NAT_NEEDED
fi
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
check_flags SECCOMP_FILTER
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP
# Kernel v5.8+ removes MEMCG_SWAP_ENABLED.
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 8 ]; then
CODE=${EXITCODE}
check_flags MEMCG_SWAP_ENABLED
# FIXME this check is cgroupv1-specific
if [ -e /sys/fs/cgroup/memory/memory.memsw.limit_in_bytes ]; then
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
EXITCODE=${CODE}
elif is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(cgroup swap accounting is currently not enabled, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
else
# Kernel v5.8+ enables swap accounting by default.
echo " $(wrap_color '(cgroup swap accounting is currently enabled)' bold black)"
fi
}
{
if is_set LEGACY_VSYSCALL_NATIVE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
elif is_set LEGACY_VSYSCALL_EMULATE; then
printf -- '- '
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
elif is_set LEGACY_VSYSCALL_NONE; then
printf -- '- '
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
echo " $(wrap_color ' VDSO which may assist in exploiting security vulnerabilities.)' bold black)"
# else Older kernels (prior to 3dc33bd30f3e, released in v4.40-rc1) do
# not have these LEGACY_VSYSCALL options and are effectively
# LEGACY_VSYSCALL_EMULATE. Even older kernels are presumably
# effectively LEGACY_VSYSCALL_NATIVE.
fi
}
if [ "$kernelMajor" -lt 4 ] || ([ "$kernelMajor" -eq 4 ] && [ "$kernelMinor" -le 5 ]); then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 18 ]); then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || ([ "$kernelMajor" -eq 3 ] && [ "$kernelMinor" -le 13 ]); then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
if [ "$kernelMajor" -lt 5 ]; then
check_flags IOSCHED_CFQ CFQ_GROUP_IOSCHED
fi
check_flags \
BLK_CGROUP BLK_DEV_THROTTLING \
CGROUP_PERF \
CGROUP_HUGETLB \
NET_CLS_CGROUP $netprio \
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED \
IP_NF_TARGET_REDIRECT \
IP_VS \
IP_VS_NFCT \
IP_VS_PROTO_TCP \
IP_VS_PROTO_UDP \
IP_VS_RR \
SECURITY_SELINUX \
SECURITY_APPARMOR
if ! is_set EXT4_USE_FOR_EXT2; then
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
if is_set EXT4_USE_FOR_EXT2; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext3 or ext4 as backing filesystem' bold black)"
else
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
fi
echo '- Network Drivers:'
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags VXLAN BRIDGE_VLAN_FILTERING | sed 's/^/ /'
echo ' Optional (for encrypted networks):'
check_flags CRYPTO CRYPTO_AEAD CRYPTO_GCM CRYPTO_SEQIV CRYPTO_GHASH \
XFRM XFRM_USER XFRM_ALGO INET_ESP | sed 's/^/ /'
if [ "$kernelMajor" -lt 5 ] || [ "$kernelMajor" -eq 5 -a "$kernelMinor" -le 3 ]; then
check_flags INET_XFRM_MODE_TRANSPORT | sed 's/^/ /'
fi
echo " - \"$(wrap_color 'ipvlan' blue)\":"
check_flags IPVLAN | sed 's/^/ /'
echo " - \"$(wrap_color 'macvlan' blue)\":"
check_flags MACVLAN DUMMY | sed 's/^/ /'
echo " - \"$(wrap_color 'ftp,tftp client in container' blue)\":"
check_flags NF_NAT_FTP NF_CONNTRACK_FTP NF_NAT_TFTP NF_CONNTRACK_TFTP | sed 's/^/ /'
# only fail if no storage drivers available
CODE=${EXITCODE}
EXITCODE=0
STORAGE=1
echo '- Storage Drivers:'
echo " - \"$(wrap_color 'aufs' blue)\":"
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'btrfs' blue)\":"
check_flags BTRFS_FS | sed 's/^/ /'
check_flags BTRFS_FS_POSIX_ACL | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'devicemapper' blue)\":"
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'overlay' blue)\":"
check_flags OVERLAY_FS | sed 's/^/ /'
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
echo " - \"$(wrap_color 'zfs' blue)\":"
printf ' - '
check_device /dev/zfs
printf ' - '
check_command zfs
printf ' - '
check_command zpool
[ "$EXITCODE" = 0 ] && STORAGE=0
EXITCODE=0
EXITCODE=$CODE
[ "$STORAGE" = 1 ] && EXITCODE=1
echo
check_limit_over() {
if [ "$(cat "$1")" -le "$2" ]; then
wrap_bad "- $1" "$(cat "$1")"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
EXITCODE=1
else
wrap_good "- $1" "$(cat "$1")"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
exit $EXITCODE
| joakimr-axis | a77317882d010b884a9101c6ad0b2d7db141082f | a2343c8c4f7c18c4209a83841e69a8c28857290a | > All your commits have a valid DCO and you've enabled repository maintainers editing your branch, so I'd be happy to perform that squash for you if you'd like me to.
No worries, git squash and push done now, and the commit message updated accordingly.
| joakimr-axis | 4,774 |
moby/moby | 42,393 | Daemon config validation | On `dockerd --validate`, return after the daemon config has been loaded and validated.
Continuation of https://github.com/moby/moby/pull/38138, avoids the `os.Exit`.
closes https://github.com/moby/moby/pull/38138
fixes https://github.com/moby/moby/issues/36911
| null | 2021-05-19 09:56:56+00:00 | 2021-06-23 17:32:07+00:00 | cmd/dockerd/daemon.go | package main
import (
"context"
"crypto/tls"
"fmt"
"net"
"os"
"path/filepath"
"runtime"
"strings"
"time"
containerddefaults "github.com/containerd/containerd/defaults"
"github.com/docker/docker/api"
apiserver "github.com/docker/docker/api/server"
buildbackend "github.com/docker/docker/api/server/backend/build"
"github.com/docker/docker/api/server/middleware"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/api/server/router/build"
checkpointrouter "github.com/docker/docker/api/server/router/checkpoint"
"github.com/docker/docker/api/server/router/container"
distributionrouter "github.com/docker/docker/api/server/router/distribution"
grpcrouter "github.com/docker/docker/api/server/router/grpc"
"github.com/docker/docker/api/server/router/image"
"github.com/docker/docker/api/server/router/network"
pluginrouter "github.com/docker/docker/api/server/router/plugin"
sessionrouter "github.com/docker/docker/api/server/router/session"
swarmrouter "github.com/docker/docker/api/server/router/swarm"
systemrouter "github.com/docker/docker/api/server/router/system"
"github.com/docker/docker/api/server/router/volume"
buildkit "github.com/docker/docker/builder/builder-next"
"github.com/docker/docker/builder/dockerfile"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/daemon"
"github.com/docker/docker/daemon/cluster"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/listeners"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/libcontainerd/supervisor"
dopts "github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/pidfile"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/plugin"
"github.com/docker/docker/rootless"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/tlsconfig"
swarmapi "github.com/docker/swarmkit/api"
"github.com/moby/buildkit/session"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
// DaemonCli represents the daemon CLI.
type DaemonCli struct {
*config.Config
configFile *string
flags *pflag.FlagSet
api *apiserver.Server
d *daemon.Daemon
authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins
}
// NewDaemonCli returns a daemon CLI
func NewDaemonCli() *DaemonCli {
return &DaemonCli{}
}
func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
stopc := make(chan bool)
defer close(stopc)
opts.SetDefaultOptions(opts.flags)
if cli.Config, err = loadDaemonCliConfig(opts); err != nil {
return err
}
warnOnDeprecatedConfigOptions(cli.Config)
if err := configureDaemonLogs(cli.Config); err != nil {
return err
}
logrus.Info("Starting up")
cli.configFile = &opts.configFile
cli.flags = opts.flags
if cli.Config.Debug {
debug.Enable()
}
if cli.Config.Experimental {
logrus.Warn("Running experimental build")
}
if cli.Config.IsRootless() {
logrus.Warn("Running in rootless mode. This mode has feature limitations.")
}
if rootless.RunningWithRootlessKit() {
logrus.Info("Running with RootlessKit integration")
if !cli.Config.IsRootless() {
return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit")
}
}
// return human-friendly error before creating files
if runtime.GOOS == "linux" && os.Geteuid() != 0 {
return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation")
}
if err := setDefaultUmask(); err != nil {
return err
}
// Create the daemon root before we create ANY other files (PID, or migrate keys)
// to ensure the appropriate ACL is set (particularly relevant on Windows)
if err := daemon.CreateDaemonRoot(cli.Config); err != nil {
return err
}
if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil {
return err
}
potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot}
if cli.Pidfile != "" {
pf, err := pidfile.New(cli.Pidfile)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile)
defer func() {
if err := pf.Remove(); err != nil {
logrus.Error(err)
}
}()
}
if cli.Config.IsRootless() {
// Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR
if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil {
// StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset
logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR")
}
}
serverConfig, err := newAPIServerConfig(cli)
if err != nil {
return errors.Wrap(err, "failed to create API server")
}
cli.api = apiserver.New(serverConfig)
hosts, err := loadListeners(cli, serverConfig)
if err != nil {
return errors.Wrap(err, "failed to load listeners")
}
ctx, cancel := context.WithCancel(context.Background())
waitForContainerDShutdown, err := cli.initContainerD(ctx)
if waitForContainerDShutdown != nil {
defer waitForContainerDShutdown(10 * time.Second)
}
if err != nil {
cancel()
return err
}
defer cancel()
signal.Trap(func() {
cli.stop()
<-stopc // wait for daemonCli.start() to return
}, logrus.StandardLogger())
// Notify that the API is active, but before daemon is set up.
preNotifyReady()
pluginStore := plugin.NewStore()
if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil {
logrus.Fatalf("Error creating middlewares: %v", err)
}
d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
d.StoreHosts(hosts)
// validate after NewDaemon has restored enabled plugins. Don't change order.
if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil {
return errors.Wrap(err, "failed to validate authorization plugin")
}
cli.d = d
if err := startMetricsServer(cli.Config.MetricsAddress); err != nil {
return errors.Wrap(err, "failed to start metrics server")
}
c, err := createAndStartCluster(cli, d)
if err != nil {
logrus.Fatalf("Error starting cluster component: %v", err)
}
// Restart all autostart containers which has a swarm endpoint
// and is not yet running now that we have successfully
// initialized the cluster.
d.RestartSwarmContainers()
logrus.Info("Daemon has completed initialization")
routerOptions, err := newRouterOptions(cli.Config, d)
if err != nil {
return err
}
routerOptions.api = cli.api
routerOptions.cluster = c
initRouter(routerOptions)
go d.ProcessClusterNotifications(ctx, c.GetWatchStream())
cli.setupConfigReloadTrap()
// The serve API routine never exits unless an error occurs
// We need to start it as a goroutine and wait on it so
// daemon doesn't exit
serveAPIWait := make(chan error)
go cli.api.Wait(serveAPIWait)
// after the daemon is done setting up we can notify systemd api
notifyReady()
// Daemon is fully initialized and handling API traffic
// Wait for serve API to complete
errAPI := <-serveAPIWait
c.Cleanup()
// notify systemd that we're shutting down
notifyStopping()
shutdownDaemon(d)
// Stop notification processing and any background processes
cancel()
if errAPI != nil {
return errors.Wrap(errAPI, "shutting down due to ServeAPI error")
}
logrus.Info("Daemon shutdown complete")
return nil
}
type routerOptions struct {
sessionManager *session.Manager
buildBackend *buildbackend.Backend
features *map[string]bool
buildkit *buildkit.Builder
daemon *daemon.Daemon
api *apiserver.Server
cluster *cluster.Cluster
}
func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) {
opts := routerOptions{}
sm, err := session.NewManager()
if err != nil {
return opts, errors.Wrap(err, "failed to create sessionmanager")
}
manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping())
if err != nil {
return opts, err
}
cgroupParent := newCgroupParent(config)
bk, err := buildkit.New(buildkit.Opt{
SessionManager: sm,
Root: filepath.Join(config.Root, "buildkit"),
Dist: d.DistributionServices(),
NetworkController: d.NetworkController(),
DefaultCgroupParent: cgroupParent,
RegistryHosts: d.RegistryHosts(),
BuilderConfig: config.Builder,
Rootless: d.Rootless(),
IdentityMapping: d.IdentityMapping(),
DNSConfig: config.DNSConfig,
ApparmorProfile: daemon.DefaultApparmorProfile(),
})
if err != nil {
return opts, err
}
bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService)
if err != nil {
return opts, errors.Wrap(err, "failed to create buildmanager")
}
return routerOptions{
sessionManager: sm,
buildBackend: bb,
buildkit: bk,
features: d.Features(),
daemon: d,
}, nil
}
func (cli *DaemonCli) reloadConfig() {
reload := func(c *config.Config) {
// Revalidate and reload the authorization plugins
if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil {
logrus.Fatalf("Error validating authorization plugin: %v", err)
return
}
cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins)
if err := cli.d.Reload(c); err != nil {
logrus.Errorf("Error reconfiguring the daemon: %v", err)
return
}
if c.IsValueSet("debug") {
debugEnabled := debug.IsEnabled()
switch {
case debugEnabled && !c.Debug: // disable debug
debug.Disable()
case c.Debug && !debugEnabled: // enable debug
debug.Enable()
}
}
}
if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil {
logrus.Error(err)
}
}
func (cli *DaemonCli) stop() {
cli.api.Close()
}
// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case
// d.Shutdown() is waiting too long to kill container or worst it's
// blocked there
func shutdownDaemon(d *daemon.Daemon) {
shutdownTimeout := d.ShutdownTimeout()
ch := make(chan struct{})
go func() {
d.Shutdown()
close(ch)
}()
if shutdownTimeout < 0 {
<-ch
logrus.Debug("Clean shutdown succeeded")
return
}
timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second)
defer timeout.Stop()
select {
case <-ch:
logrus.Debug("Clean shutdown succeeded")
case <-timeout.C:
logrus.Error("Force shutdown daemon")
}
}
func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
conf := opts.daemonConfig
flags := opts.flags
conf.Debug = opts.Debug
conf.Hosts = opts.Hosts
conf.LogLevel = opts.LogLevel
if opts.flags.Changed(FlagTLS) {
conf.TLS = &opts.TLS
}
if opts.flags.Changed(FlagTLSVerify) {
conf.TLSVerify = &opts.TLSVerify
v := true
conf.TLS = &v
}
conf.CommonTLSOptions = config.CommonTLSOptions{}
if opts.TLSOptions != nil {
conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile
conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile
conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile
}
if conf.TrustKeyPath == "" {
daemonConfDir, err := getDaemonConfDir(conf.Root)
if err != nil {
return nil, err
}
conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile)
}
if flags.Changed("graph") && flags.Changed("data-root") {
return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`)
}
if opts.configFile != "" {
c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile)
}
}
// the merged configuration can be nil if the config file didn't exist.
// leave the current configuration as it is if when that happens.
if c != nil {
conf = c
}
}
if err := config.Validate(conf); err != nil {
return nil, err
}
if flags.Changed("graph") {
logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`)
}
// Check if duplicate label-keys with different values are found
newLabels, err := config.GetConflictFreeLabels(conf.Labels)
if err != nil {
return nil, err
}
conf.Labels = newLabels
// Regardless of whether the user sets it to true or false, if they
// specify TLSVerify at all then we need to turn on TLS
if conf.IsValueSet(FlagTLSVerify) {
v := true
conf.TLS = &v
}
if conf.TLSVerify == nil && conf.TLS != nil {
conf.TLSVerify = conf.TLS
}
return conf, nil
}
func warnOnDeprecatedConfigOptions(config *config.Config) {
if config.ClusterAdvertise != "" {
logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`)
}
if config.ClusterStore != "" {
logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`)
}
if len(config.ClusterOpts) > 0 {
logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`)
}
}
func initRouter(opts routerOptions) {
decoder := runconfig.ContainerDecoder{
GetSysInfo: func() *sysinfo.SysInfo {
return opts.daemon.RawSysInfo(true)
},
}
routers := []router.Router{
// we need to add the checkpoint router before the container router or the DELETE gets masked
checkpointrouter.NewRouter(opts.daemon, decoder),
container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo(true).CgroupUnified),
image.NewRouter(opts.daemon.ImageService()),
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features),
volume.NewRouter(opts.daemon.VolumesService()),
build.NewRouter(opts.buildBackend, opts.daemon, opts.features),
sessionrouter.NewRouter(opts.sessionManager),
swarmrouter.NewRouter(opts.cluster),
pluginrouter.NewRouter(opts.daemon.PluginManager()),
distributionrouter.NewRouter(opts.daemon.ImageService()),
}
grpcBackends := []grpcrouter.Backend{}
for _, b := range []interface{}{opts.daemon, opts.buildBackend} {
if b, ok := b.(grpcrouter.Backend); ok {
grpcBackends = append(grpcBackends, b)
}
}
if len(grpcBackends) > 0 {
routers = append(routers, grpcrouter.NewRouter(grpcBackends...))
}
if opts.daemon.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(opts.daemon, opts.cluster))
}
if opts.daemon.HasExperimental() {
for _, r := range routers {
for _, route := range r.Routes() {
if experimental, ok := route.(router.ExperimentalRoute); ok {
experimental.Enable()
}
}
}
}
opts.api.InitRouter(routers...)
}
// TODO: remove this from cli and return the authzMiddleware
func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error {
v := cfg.Version
exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental)
s.UseMiddleware(exp)
vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion)
s.UseMiddleware(vm)
if cfg.CorsHeaders != "" {
c := middleware.NewCORSMiddleware(cfg.CorsHeaders)
s.UseMiddleware(c)
}
cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore)
cli.Config.AuthzMiddleware = cli.authzMiddleware
s.UseMiddleware(cli.authzMiddleware)
return nil
}
func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) {
opts, err := cli.getPlatformContainerdDaemonOpts()
if err != nil {
return nil, err
}
if cli.Config.Debug {
opts = append(opts, supervisor.WithLogLevel("debug"))
} else if cli.Config.LogLevel != "" {
opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel))
}
if !cli.Config.CriContainerd {
opts = append(opts, supervisor.WithPlugin("cri", nil))
}
return opts, nil
}
func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) {
serverConfig := &apiserver.Config{
Logging: true,
SocketGroup: cli.Config.SocketGroup,
Version: dockerversion.Version,
CorsHeaders: cli.Config.CorsHeaders,
}
if cli.Config.TLS != nil && *cli.Config.TLS {
tlsOptions := tlsconfig.Options{
CAFile: cli.Config.CommonTLSOptions.CAFile,
CertFile: cli.Config.CommonTLSOptions.CertFile,
KeyFile: cli.Config.CommonTLSOptions.KeyFile,
ExclusiveRootPools: true,
}
if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify {
// server requires and verifies client's certificate
tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig, err := tlsconfig.Server(tlsOptions)
if err != nil {
return nil, err
}
serverConfig.TLSConfig = tlsConfig
}
if len(cli.Config.Hosts) == 0 {
cli.Config.Hosts = make([]string, 1)
}
return serverConfig, nil
}
// checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify
// Going forward we do not want to support a scenario where dockerd listens
// on TCP without either TLS client auth (or an explicit opt-in to disable it)
func checkTLSAuthOK(c *config.Config) bool {
if c.TLS == nil {
// Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled
// Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed
return DefaultTLSValue
}
if !*c.TLS {
// TLS is explicitly disabled, which is supported
return true
}
if c.TLSVerify == nil {
// this actually shouldn't happen since we set TLSVerify on the config object anyway
// But in case it does get here, be cautious and assume this is not supported.
return false
}
// Either TLSVerify is explicitly enabled or disabled, both cases are supported
return true
}
func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) {
var hosts []string
seen := make(map[string]struct{}, len(cli.Config.Hosts))
useTLS := DefaultTLSValue
if cli.Config.TLS != nil {
useTLS = *cli.Config.TLS
}
for i := 0; i < len(cli.Config.Hosts); i++ {
var err error
if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil {
return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i])
}
if _, ok := seen[cli.Config.Hosts[i]]; ok {
continue
}
seen[cli.Config.Hosts[i]] = struct{}{}
protoAddr := cli.Config.Hosts[i]
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
if len(protoAddrParts) != 2 {
return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr)
}
proto := protoAddrParts[0]
addr := protoAddrParts[1]
// It's a bad idea to bind to TCP without tlsverify.
authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert
if proto == "tcp" && !authEnabled {
logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.")
logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!")
time.Sleep(time.Second)
// If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot"
// We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support
if !checkTLSAuthOK(cli.Config) {
ipAddr, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, errors.Wrap(err, "error parsing tcp address")
}
// shortcut all this extra stuff for literal "localhost"
// -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here.
if ipAddr != "localhost" {
ip := net.ParseIP(ipAddr)
if ip == nil {
ipA, err := net.ResolveIPAddr("ip", ipAddr)
if err != nil {
logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address")
}
if ipA != nil {
ip = ipA.IP
}
}
if ip == nil || !ip.IsLoopback() {
logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message")
logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network")
logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify)
logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release")
time.Sleep(15 * time.Second)
}
}
}
}
ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig)
if err != nil {
return nil, err
}
// If we're binding to a TCP port, make sure that a container doesn't try to use it.
if proto == "tcp" {
if err := allocateDaemonPort(addr); err != nil {
return nil, err
}
}
logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr)
hosts = append(hosts, protoAddrParts[1])
cli.api.Accept(addr, ls...)
}
return hosts, nil
}
func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) {
name, _ := os.Hostname()
// Use a buffered channel to pass changes from store watch API to daemon
// A buffer allows store watch API and daemon processing to not wait for each other
watchStream := make(chan *swarmapi.WatchMessage, 32)
c, err := cluster.New(cluster.Config{
Root: cli.Config.Root,
Name: name,
Backend: d,
VolumeBackend: d.VolumesService(),
ImageBackend: d.ImageService(),
PluginBackend: d.PluginManager(),
NetworkSubnetsProvider: d,
DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr,
RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick,
RaftElectionTick: cli.Config.SwarmRaftElectionTick,
RuntimeRoot: cli.getSwarmRunRoot(),
WatchStream: watchStream,
})
if err != nil {
return nil, err
}
d.SetCluster(c)
err = c.Start()
return c, err
}
// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver
// plugins present on the host and available to the daemon
func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error {
for _, reqPlugin := range requestedPlugins {
if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil {
return err
}
}
return nil
}
func systemContainerdRunning(honorXDG bool) (string, bool, error) {
addr := containerddefaults.DefaultAddress
if honorXDG {
runtimeDir, err := homedir.GetRuntimeDir()
if err != nil {
return "", false, err
}
addr = filepath.Join(runtimeDir, "containerd", "containerd.sock")
}
_, err := os.Lstat(addr)
return addr, err == nil, nil
}
// configureDaemonLogs sets the logrus logging level and formatting
func configureDaemonLogs(conf *config.Config) error {
if conf.LogLevel != "" {
lvl, err := logrus.ParseLevel(conf.LogLevel)
if err != nil {
return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel)
}
logrus.SetLevel(lvl)
} else {
logrus.SetLevel(logrus.InfoLevel)
}
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: jsonmessage.RFC3339NanoFixed,
DisableColors: conf.RawLogs,
FullTimestamp: true,
})
return nil
}
| package main
import (
"context"
"crypto/tls"
"fmt"
"net"
"os"
"path/filepath"
"runtime"
"strings"
"time"
containerddefaults "github.com/containerd/containerd/defaults"
"github.com/docker/docker/api"
apiserver "github.com/docker/docker/api/server"
buildbackend "github.com/docker/docker/api/server/backend/build"
"github.com/docker/docker/api/server/middleware"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/api/server/router/build"
checkpointrouter "github.com/docker/docker/api/server/router/checkpoint"
"github.com/docker/docker/api/server/router/container"
distributionrouter "github.com/docker/docker/api/server/router/distribution"
grpcrouter "github.com/docker/docker/api/server/router/grpc"
"github.com/docker/docker/api/server/router/image"
"github.com/docker/docker/api/server/router/network"
pluginrouter "github.com/docker/docker/api/server/router/plugin"
sessionrouter "github.com/docker/docker/api/server/router/session"
swarmrouter "github.com/docker/docker/api/server/router/swarm"
systemrouter "github.com/docker/docker/api/server/router/system"
"github.com/docker/docker/api/server/router/volume"
buildkit "github.com/docker/docker/builder/builder-next"
"github.com/docker/docker/builder/dockerfile"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/daemon"
"github.com/docker/docker/daemon/cluster"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/listeners"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/libcontainerd/supervisor"
dopts "github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/pidfile"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/plugin"
"github.com/docker/docker/rootless"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/tlsconfig"
swarmapi "github.com/docker/swarmkit/api"
"github.com/moby/buildkit/session"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
// DaemonCli represents the daemon CLI.
type DaemonCli struct {
*config.Config
configFile *string
flags *pflag.FlagSet
api *apiserver.Server
d *daemon.Daemon
authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins
}
// NewDaemonCli returns a daemon CLI
func NewDaemonCli() *DaemonCli {
return &DaemonCli{}
}
func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
opts.SetDefaultOptions(opts.flags)
if cli.Config, err = loadDaemonCliConfig(opts); err != nil {
return err
}
if opts.Validate {
// If config wasn't OK we wouldn't have made it this far.
fmt.Fprintln(os.Stderr, "configuration OK")
return nil
}
warnOnDeprecatedConfigOptions(cli.Config)
if err := configureDaemonLogs(cli.Config); err != nil {
return err
}
logrus.Info("Starting up")
cli.configFile = &opts.configFile
cli.flags = opts.flags
if cli.Config.Debug {
debug.Enable()
}
if cli.Config.Experimental {
logrus.Warn("Running experimental build")
}
if cli.Config.IsRootless() {
logrus.Warn("Running in rootless mode. This mode has feature limitations.")
}
if rootless.RunningWithRootlessKit() {
logrus.Info("Running with RootlessKit integration")
if !cli.Config.IsRootless() {
return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit")
}
}
// return human-friendly error before creating files
if runtime.GOOS == "linux" && os.Geteuid() != 0 {
return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation")
}
if err := setDefaultUmask(); err != nil {
return err
}
// Create the daemon root before we create ANY other files (PID, or migrate keys)
// to ensure the appropriate ACL is set (particularly relevant on Windows)
if err := daemon.CreateDaemonRoot(cli.Config); err != nil {
return err
}
if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil {
return err
}
potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot}
if cli.Pidfile != "" {
pf, err := pidfile.New(cli.Pidfile)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile)
defer func() {
if err := pf.Remove(); err != nil {
logrus.Error(err)
}
}()
}
if cli.Config.IsRootless() {
// Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR
if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil {
// StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset
logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR")
}
}
serverConfig, err := newAPIServerConfig(cli)
if err != nil {
return errors.Wrap(err, "failed to create API server")
}
cli.api = apiserver.New(serverConfig)
hosts, err := loadListeners(cli, serverConfig)
if err != nil {
return errors.Wrap(err, "failed to load listeners")
}
ctx, cancel := context.WithCancel(context.Background())
waitForContainerDShutdown, err := cli.initContainerD(ctx)
if waitForContainerDShutdown != nil {
defer waitForContainerDShutdown(10 * time.Second)
}
if err != nil {
cancel()
return err
}
defer cancel()
stopc := make(chan bool)
defer close(stopc)
signal.Trap(func() {
cli.stop()
<-stopc // wait for daemonCli.start() to return
}, logrus.StandardLogger())
// Notify that the API is active, but before daemon is set up.
preNotifyReady()
pluginStore := plugin.NewStore()
if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil {
logrus.Fatalf("Error creating middlewares: %v", err)
}
d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
d.StoreHosts(hosts)
// validate after NewDaemon has restored enabled plugins. Don't change order.
if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil {
return errors.Wrap(err, "failed to validate authorization plugin")
}
cli.d = d
if err := startMetricsServer(cli.Config.MetricsAddress); err != nil {
return errors.Wrap(err, "failed to start metrics server")
}
c, err := createAndStartCluster(cli, d)
if err != nil {
logrus.Fatalf("Error starting cluster component: %v", err)
}
// Restart all autostart containers which has a swarm endpoint
// and is not yet running now that we have successfully
// initialized the cluster.
d.RestartSwarmContainers()
logrus.Info("Daemon has completed initialization")
routerOptions, err := newRouterOptions(cli.Config, d)
if err != nil {
return err
}
routerOptions.api = cli.api
routerOptions.cluster = c
initRouter(routerOptions)
go d.ProcessClusterNotifications(ctx, c.GetWatchStream())
cli.setupConfigReloadTrap()
// The serve API routine never exits unless an error occurs
// We need to start it as a goroutine and wait on it so
// daemon doesn't exit
serveAPIWait := make(chan error)
go cli.api.Wait(serveAPIWait)
// after the daemon is done setting up we can notify systemd api
notifyReady()
// Daemon is fully initialized and handling API traffic
// Wait for serve API to complete
errAPI := <-serveAPIWait
c.Cleanup()
// notify systemd that we're shutting down
notifyStopping()
shutdownDaemon(d)
// Stop notification processing and any background processes
cancel()
if errAPI != nil {
return errors.Wrap(errAPI, "shutting down due to ServeAPI error")
}
logrus.Info("Daemon shutdown complete")
return nil
}
type routerOptions struct {
sessionManager *session.Manager
buildBackend *buildbackend.Backend
features *map[string]bool
buildkit *buildkit.Builder
daemon *daemon.Daemon
api *apiserver.Server
cluster *cluster.Cluster
}
func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) {
opts := routerOptions{}
sm, err := session.NewManager()
if err != nil {
return opts, errors.Wrap(err, "failed to create sessionmanager")
}
manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping())
if err != nil {
return opts, err
}
cgroupParent := newCgroupParent(config)
bk, err := buildkit.New(buildkit.Opt{
SessionManager: sm,
Root: filepath.Join(config.Root, "buildkit"),
Dist: d.DistributionServices(),
NetworkController: d.NetworkController(),
DefaultCgroupParent: cgroupParent,
RegistryHosts: d.RegistryHosts(),
BuilderConfig: config.Builder,
Rootless: d.Rootless(),
IdentityMapping: d.IdentityMapping(),
DNSConfig: config.DNSConfig,
ApparmorProfile: daemon.DefaultApparmorProfile(),
})
if err != nil {
return opts, err
}
bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService)
if err != nil {
return opts, errors.Wrap(err, "failed to create buildmanager")
}
return routerOptions{
sessionManager: sm,
buildBackend: bb,
buildkit: bk,
features: d.Features(),
daemon: d,
}, nil
}
func (cli *DaemonCli) reloadConfig() {
reload := func(c *config.Config) {
// Revalidate and reload the authorization plugins
if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil {
logrus.Fatalf("Error validating authorization plugin: %v", err)
return
}
cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins)
if err := cli.d.Reload(c); err != nil {
logrus.Errorf("Error reconfiguring the daemon: %v", err)
return
}
if c.IsValueSet("debug") {
debugEnabled := debug.IsEnabled()
switch {
case debugEnabled && !c.Debug: // disable debug
debug.Disable()
case c.Debug && !debugEnabled: // enable debug
debug.Enable()
}
}
}
if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil {
logrus.Error(err)
}
}
func (cli *DaemonCli) stop() {
cli.api.Close()
}
// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case
// d.Shutdown() is waiting too long to kill container or worst it's
// blocked there
func shutdownDaemon(d *daemon.Daemon) {
shutdownTimeout := d.ShutdownTimeout()
ch := make(chan struct{})
go func() {
d.Shutdown()
close(ch)
}()
if shutdownTimeout < 0 {
<-ch
logrus.Debug("Clean shutdown succeeded")
return
}
timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second)
defer timeout.Stop()
select {
case <-ch:
logrus.Debug("Clean shutdown succeeded")
case <-timeout.C:
logrus.Error("Force shutdown daemon")
}
}
func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
conf := opts.daemonConfig
flags := opts.flags
conf.Debug = opts.Debug
conf.Hosts = opts.Hosts
conf.LogLevel = opts.LogLevel
if opts.flags.Changed(FlagTLS) {
conf.TLS = &opts.TLS
}
if opts.flags.Changed(FlagTLSVerify) {
conf.TLSVerify = &opts.TLSVerify
v := true
conf.TLS = &v
}
conf.CommonTLSOptions = config.CommonTLSOptions{}
if opts.TLSOptions != nil {
conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile
conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile
conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile
}
if conf.TrustKeyPath == "" {
daemonConfDir, err := getDaemonConfDir(conf.Root)
if err != nil {
return nil, err
}
conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile)
}
if flags.Changed("graph") && flags.Changed("data-root") {
return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`)
}
if opts.configFile != "" {
c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile)
}
}
// the merged configuration can be nil if the config file didn't exist.
// leave the current configuration as it is if when that happens.
if c != nil {
conf = c
}
}
if err := config.Validate(conf); err != nil {
return nil, err
}
if flags.Changed("graph") {
logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`)
}
// Check if duplicate label-keys with different values are found
newLabels, err := config.GetConflictFreeLabels(conf.Labels)
if err != nil {
return nil, err
}
conf.Labels = newLabels
// Regardless of whether the user sets it to true or false, if they
// specify TLSVerify at all then we need to turn on TLS
if conf.IsValueSet(FlagTLSVerify) {
v := true
conf.TLS = &v
}
if conf.TLSVerify == nil && conf.TLS != nil {
conf.TLSVerify = conf.TLS
}
return conf, nil
}
func warnOnDeprecatedConfigOptions(config *config.Config) {
if config.ClusterAdvertise != "" {
logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`)
}
if config.ClusterStore != "" {
logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`)
}
if len(config.ClusterOpts) > 0 {
logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`)
}
}
func initRouter(opts routerOptions) {
decoder := runconfig.ContainerDecoder{
GetSysInfo: func() *sysinfo.SysInfo {
return opts.daemon.RawSysInfo(true)
},
}
routers := []router.Router{
// we need to add the checkpoint router before the container router or the DELETE gets masked
checkpointrouter.NewRouter(opts.daemon, decoder),
container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo(true).CgroupUnified),
image.NewRouter(opts.daemon.ImageService()),
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features),
volume.NewRouter(opts.daemon.VolumesService()),
build.NewRouter(opts.buildBackend, opts.daemon, opts.features),
sessionrouter.NewRouter(opts.sessionManager),
swarmrouter.NewRouter(opts.cluster),
pluginrouter.NewRouter(opts.daemon.PluginManager()),
distributionrouter.NewRouter(opts.daemon.ImageService()),
}
grpcBackends := []grpcrouter.Backend{}
for _, b := range []interface{}{opts.daemon, opts.buildBackend} {
if b, ok := b.(grpcrouter.Backend); ok {
grpcBackends = append(grpcBackends, b)
}
}
if len(grpcBackends) > 0 {
routers = append(routers, grpcrouter.NewRouter(grpcBackends...))
}
if opts.daemon.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(opts.daemon, opts.cluster))
}
if opts.daemon.HasExperimental() {
for _, r := range routers {
for _, route := range r.Routes() {
if experimental, ok := route.(router.ExperimentalRoute); ok {
experimental.Enable()
}
}
}
}
opts.api.InitRouter(routers...)
}
// TODO: remove this from cli and return the authzMiddleware
func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error {
v := cfg.Version
exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental)
s.UseMiddleware(exp)
vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion)
s.UseMiddleware(vm)
if cfg.CorsHeaders != "" {
c := middleware.NewCORSMiddleware(cfg.CorsHeaders)
s.UseMiddleware(c)
}
cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore)
cli.Config.AuthzMiddleware = cli.authzMiddleware
s.UseMiddleware(cli.authzMiddleware)
return nil
}
func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) {
opts, err := cli.getPlatformContainerdDaemonOpts()
if err != nil {
return nil, err
}
if cli.Config.Debug {
opts = append(opts, supervisor.WithLogLevel("debug"))
} else if cli.Config.LogLevel != "" {
opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel))
}
if !cli.Config.CriContainerd {
opts = append(opts, supervisor.WithPlugin("cri", nil))
}
return opts, nil
}
func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) {
serverConfig := &apiserver.Config{
Logging: true,
SocketGroup: cli.Config.SocketGroup,
Version: dockerversion.Version,
CorsHeaders: cli.Config.CorsHeaders,
}
if cli.Config.TLS != nil && *cli.Config.TLS {
tlsOptions := tlsconfig.Options{
CAFile: cli.Config.CommonTLSOptions.CAFile,
CertFile: cli.Config.CommonTLSOptions.CertFile,
KeyFile: cli.Config.CommonTLSOptions.KeyFile,
ExclusiveRootPools: true,
}
if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify {
// server requires and verifies client's certificate
tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig, err := tlsconfig.Server(tlsOptions)
if err != nil {
return nil, err
}
serverConfig.TLSConfig = tlsConfig
}
if len(cli.Config.Hosts) == 0 {
cli.Config.Hosts = make([]string, 1)
}
return serverConfig, nil
}
// checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify
// Going forward we do not want to support a scenario where dockerd listens
// on TCP without either TLS client auth (or an explicit opt-in to disable it)
func checkTLSAuthOK(c *config.Config) bool {
if c.TLS == nil {
// Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled
// Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed
return DefaultTLSValue
}
if !*c.TLS {
// TLS is explicitly disabled, which is supported
return true
}
if c.TLSVerify == nil {
// this actually shouldn't happen since we set TLSVerify on the config object anyway
// But in case it does get here, be cautious and assume this is not supported.
return false
}
// Either TLSVerify is explicitly enabled or disabled, both cases are supported
return true
}
func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) {
var hosts []string
seen := make(map[string]struct{}, len(cli.Config.Hosts))
useTLS := DefaultTLSValue
if cli.Config.TLS != nil {
useTLS = *cli.Config.TLS
}
for i := 0; i < len(cli.Config.Hosts); i++ {
var err error
if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil {
return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i])
}
if _, ok := seen[cli.Config.Hosts[i]]; ok {
continue
}
seen[cli.Config.Hosts[i]] = struct{}{}
protoAddr := cli.Config.Hosts[i]
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
if len(protoAddrParts) != 2 {
return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr)
}
proto := protoAddrParts[0]
addr := protoAddrParts[1]
// It's a bad idea to bind to TCP without tlsverify.
authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert
if proto == "tcp" && !authEnabled {
logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.")
logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!")
time.Sleep(time.Second)
// If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot"
// We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support
if !checkTLSAuthOK(cli.Config) {
ipAddr, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, errors.Wrap(err, "error parsing tcp address")
}
// shortcut all this extra stuff for literal "localhost"
// -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here.
if ipAddr != "localhost" {
ip := net.ParseIP(ipAddr)
if ip == nil {
ipA, err := net.ResolveIPAddr("ip", ipAddr)
if err != nil {
logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address")
}
if ipA != nil {
ip = ipA.IP
}
}
if ip == nil || !ip.IsLoopback() {
logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message")
logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network")
logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify)
logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release")
time.Sleep(15 * time.Second)
}
}
}
}
ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig)
if err != nil {
return nil, err
}
// If we're binding to a TCP port, make sure that a container doesn't try to use it.
if proto == "tcp" {
if err := allocateDaemonPort(addr); err != nil {
return nil, err
}
}
logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr)
hosts = append(hosts, protoAddrParts[1])
cli.api.Accept(addr, ls...)
}
return hosts, nil
}
func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) {
name, _ := os.Hostname()
// Use a buffered channel to pass changes from store watch API to daemon
// A buffer allows store watch API and daemon processing to not wait for each other
watchStream := make(chan *swarmapi.WatchMessage, 32)
c, err := cluster.New(cluster.Config{
Root: cli.Config.Root,
Name: name,
Backend: d,
VolumeBackend: d.VolumesService(),
ImageBackend: d.ImageService(),
PluginBackend: d.PluginManager(),
NetworkSubnetsProvider: d,
DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr,
RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick,
RaftElectionTick: cli.Config.SwarmRaftElectionTick,
RuntimeRoot: cli.getSwarmRunRoot(),
WatchStream: watchStream,
})
if err != nil {
return nil, err
}
d.SetCluster(c)
err = c.Start()
return c, err
}
// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver
// plugins present on the host and available to the daemon
func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error {
for _, reqPlugin := range requestedPlugins {
if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil {
return err
}
}
return nil
}
func systemContainerdRunning(honorXDG bool) (string, bool, error) {
addr := containerddefaults.DefaultAddress
if honorXDG {
runtimeDir, err := homedir.GetRuntimeDir()
if err != nil {
return "", false, err
}
addr = filepath.Join(runtimeDir, "containerd", "containerd.sock")
}
_, err := os.Lstat(addr)
return addr, err == nil, nil
}
// configureDaemonLogs sets the logrus logging level and formatting
func configureDaemonLogs(conf *config.Config) error {
if conf.LogLevel != "" {
lvl, err := logrus.ParseLevel(conf.LogLevel)
if err != nil {
return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel)
}
logrus.SetLevel(lvl)
} else {
logrus.SetLevel(logrus.InfoLevel)
}
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: jsonmessage.RFC3339NanoFixed,
DisableColors: conf.RawLogs,
FullTimestamp: true,
})
return nil
}
| aiordache | ee8f581167d1d36fe05c39d8975d8b9960a49521 | 314759dc2f4745925d8dec6d15acc7761c6e5c92 | Thanks!
Also wondering if we should move the `stopc` code at line 78 further down;
```go
stopc := make(chan bool)
defer close(stopc)
```
It looks like it's not used until line 190, so moving it closer to that would make it both easier to read, and skips the problem of that defer altogether. | thaJeztah | 4,775 |
moby/moby | 42,393 | Daemon config validation | On `dockerd --validate`, return after the daemon config has been loaded and validated.
Continuation of https://github.com/moby/moby/pull/38138, avoids the `os.Exit`.
closes https://github.com/moby/moby/pull/38138
fixes https://github.com/moby/moby/issues/36911
| null | 2021-05-19 09:56:56+00:00 | 2021-06-23 17:32:07+00:00 | cmd/dockerd/daemon.go | package main
import (
"context"
"crypto/tls"
"fmt"
"net"
"os"
"path/filepath"
"runtime"
"strings"
"time"
containerddefaults "github.com/containerd/containerd/defaults"
"github.com/docker/docker/api"
apiserver "github.com/docker/docker/api/server"
buildbackend "github.com/docker/docker/api/server/backend/build"
"github.com/docker/docker/api/server/middleware"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/api/server/router/build"
checkpointrouter "github.com/docker/docker/api/server/router/checkpoint"
"github.com/docker/docker/api/server/router/container"
distributionrouter "github.com/docker/docker/api/server/router/distribution"
grpcrouter "github.com/docker/docker/api/server/router/grpc"
"github.com/docker/docker/api/server/router/image"
"github.com/docker/docker/api/server/router/network"
pluginrouter "github.com/docker/docker/api/server/router/plugin"
sessionrouter "github.com/docker/docker/api/server/router/session"
swarmrouter "github.com/docker/docker/api/server/router/swarm"
systemrouter "github.com/docker/docker/api/server/router/system"
"github.com/docker/docker/api/server/router/volume"
buildkit "github.com/docker/docker/builder/builder-next"
"github.com/docker/docker/builder/dockerfile"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/daemon"
"github.com/docker/docker/daemon/cluster"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/listeners"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/libcontainerd/supervisor"
dopts "github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/pidfile"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/plugin"
"github.com/docker/docker/rootless"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/tlsconfig"
swarmapi "github.com/docker/swarmkit/api"
"github.com/moby/buildkit/session"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
// DaemonCli represents the daemon CLI.
type DaemonCli struct {
*config.Config
configFile *string
flags *pflag.FlagSet
api *apiserver.Server
d *daemon.Daemon
authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins
}
// NewDaemonCli returns a daemon CLI
func NewDaemonCli() *DaemonCli {
return &DaemonCli{}
}
func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
stopc := make(chan bool)
defer close(stopc)
opts.SetDefaultOptions(opts.flags)
if cli.Config, err = loadDaemonCliConfig(opts); err != nil {
return err
}
warnOnDeprecatedConfigOptions(cli.Config)
if err := configureDaemonLogs(cli.Config); err != nil {
return err
}
logrus.Info("Starting up")
cli.configFile = &opts.configFile
cli.flags = opts.flags
if cli.Config.Debug {
debug.Enable()
}
if cli.Config.Experimental {
logrus.Warn("Running experimental build")
}
if cli.Config.IsRootless() {
logrus.Warn("Running in rootless mode. This mode has feature limitations.")
}
if rootless.RunningWithRootlessKit() {
logrus.Info("Running with RootlessKit integration")
if !cli.Config.IsRootless() {
return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit")
}
}
// return human-friendly error before creating files
if runtime.GOOS == "linux" && os.Geteuid() != 0 {
return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation")
}
if err := setDefaultUmask(); err != nil {
return err
}
// Create the daemon root before we create ANY other files (PID, or migrate keys)
// to ensure the appropriate ACL is set (particularly relevant on Windows)
if err := daemon.CreateDaemonRoot(cli.Config); err != nil {
return err
}
if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil {
return err
}
potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot}
if cli.Pidfile != "" {
pf, err := pidfile.New(cli.Pidfile)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile)
defer func() {
if err := pf.Remove(); err != nil {
logrus.Error(err)
}
}()
}
if cli.Config.IsRootless() {
// Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR
if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil {
// StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset
logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR")
}
}
serverConfig, err := newAPIServerConfig(cli)
if err != nil {
return errors.Wrap(err, "failed to create API server")
}
cli.api = apiserver.New(serverConfig)
hosts, err := loadListeners(cli, serverConfig)
if err != nil {
return errors.Wrap(err, "failed to load listeners")
}
ctx, cancel := context.WithCancel(context.Background())
waitForContainerDShutdown, err := cli.initContainerD(ctx)
if waitForContainerDShutdown != nil {
defer waitForContainerDShutdown(10 * time.Second)
}
if err != nil {
cancel()
return err
}
defer cancel()
signal.Trap(func() {
cli.stop()
<-stopc // wait for daemonCli.start() to return
}, logrus.StandardLogger())
// Notify that the API is active, but before daemon is set up.
preNotifyReady()
pluginStore := plugin.NewStore()
if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil {
logrus.Fatalf("Error creating middlewares: %v", err)
}
d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
d.StoreHosts(hosts)
// validate after NewDaemon has restored enabled plugins. Don't change order.
if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil {
return errors.Wrap(err, "failed to validate authorization plugin")
}
cli.d = d
if err := startMetricsServer(cli.Config.MetricsAddress); err != nil {
return errors.Wrap(err, "failed to start metrics server")
}
c, err := createAndStartCluster(cli, d)
if err != nil {
logrus.Fatalf("Error starting cluster component: %v", err)
}
// Restart all autostart containers which has a swarm endpoint
// and is not yet running now that we have successfully
// initialized the cluster.
d.RestartSwarmContainers()
logrus.Info("Daemon has completed initialization")
routerOptions, err := newRouterOptions(cli.Config, d)
if err != nil {
return err
}
routerOptions.api = cli.api
routerOptions.cluster = c
initRouter(routerOptions)
go d.ProcessClusterNotifications(ctx, c.GetWatchStream())
cli.setupConfigReloadTrap()
// The serve API routine never exits unless an error occurs
// We need to start it as a goroutine and wait on it so
// daemon doesn't exit
serveAPIWait := make(chan error)
go cli.api.Wait(serveAPIWait)
// after the daemon is done setting up we can notify systemd api
notifyReady()
// Daemon is fully initialized and handling API traffic
// Wait for serve API to complete
errAPI := <-serveAPIWait
c.Cleanup()
// notify systemd that we're shutting down
notifyStopping()
shutdownDaemon(d)
// Stop notification processing and any background processes
cancel()
if errAPI != nil {
return errors.Wrap(errAPI, "shutting down due to ServeAPI error")
}
logrus.Info("Daemon shutdown complete")
return nil
}
type routerOptions struct {
sessionManager *session.Manager
buildBackend *buildbackend.Backend
features *map[string]bool
buildkit *buildkit.Builder
daemon *daemon.Daemon
api *apiserver.Server
cluster *cluster.Cluster
}
func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) {
opts := routerOptions{}
sm, err := session.NewManager()
if err != nil {
return opts, errors.Wrap(err, "failed to create sessionmanager")
}
manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping())
if err != nil {
return opts, err
}
cgroupParent := newCgroupParent(config)
bk, err := buildkit.New(buildkit.Opt{
SessionManager: sm,
Root: filepath.Join(config.Root, "buildkit"),
Dist: d.DistributionServices(),
NetworkController: d.NetworkController(),
DefaultCgroupParent: cgroupParent,
RegistryHosts: d.RegistryHosts(),
BuilderConfig: config.Builder,
Rootless: d.Rootless(),
IdentityMapping: d.IdentityMapping(),
DNSConfig: config.DNSConfig,
ApparmorProfile: daemon.DefaultApparmorProfile(),
})
if err != nil {
return opts, err
}
bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService)
if err != nil {
return opts, errors.Wrap(err, "failed to create buildmanager")
}
return routerOptions{
sessionManager: sm,
buildBackend: bb,
buildkit: bk,
features: d.Features(),
daemon: d,
}, nil
}
func (cli *DaemonCli) reloadConfig() {
reload := func(c *config.Config) {
// Revalidate and reload the authorization plugins
if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil {
logrus.Fatalf("Error validating authorization plugin: %v", err)
return
}
cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins)
if err := cli.d.Reload(c); err != nil {
logrus.Errorf("Error reconfiguring the daemon: %v", err)
return
}
if c.IsValueSet("debug") {
debugEnabled := debug.IsEnabled()
switch {
case debugEnabled && !c.Debug: // disable debug
debug.Disable()
case c.Debug && !debugEnabled: // enable debug
debug.Enable()
}
}
}
if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil {
logrus.Error(err)
}
}
func (cli *DaemonCli) stop() {
cli.api.Close()
}
// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case
// d.Shutdown() is waiting too long to kill container or worst it's
// blocked there
func shutdownDaemon(d *daemon.Daemon) {
shutdownTimeout := d.ShutdownTimeout()
ch := make(chan struct{})
go func() {
d.Shutdown()
close(ch)
}()
if shutdownTimeout < 0 {
<-ch
logrus.Debug("Clean shutdown succeeded")
return
}
timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second)
defer timeout.Stop()
select {
case <-ch:
logrus.Debug("Clean shutdown succeeded")
case <-timeout.C:
logrus.Error("Force shutdown daemon")
}
}
func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
conf := opts.daemonConfig
flags := opts.flags
conf.Debug = opts.Debug
conf.Hosts = opts.Hosts
conf.LogLevel = opts.LogLevel
if opts.flags.Changed(FlagTLS) {
conf.TLS = &opts.TLS
}
if opts.flags.Changed(FlagTLSVerify) {
conf.TLSVerify = &opts.TLSVerify
v := true
conf.TLS = &v
}
conf.CommonTLSOptions = config.CommonTLSOptions{}
if opts.TLSOptions != nil {
conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile
conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile
conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile
}
if conf.TrustKeyPath == "" {
daemonConfDir, err := getDaemonConfDir(conf.Root)
if err != nil {
return nil, err
}
conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile)
}
if flags.Changed("graph") && flags.Changed("data-root") {
return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`)
}
if opts.configFile != "" {
c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile)
}
}
// the merged configuration can be nil if the config file didn't exist.
// leave the current configuration as it is if when that happens.
if c != nil {
conf = c
}
}
if err := config.Validate(conf); err != nil {
return nil, err
}
if flags.Changed("graph") {
logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`)
}
// Check if duplicate label-keys with different values are found
newLabels, err := config.GetConflictFreeLabels(conf.Labels)
if err != nil {
return nil, err
}
conf.Labels = newLabels
// Regardless of whether the user sets it to true or false, if they
// specify TLSVerify at all then we need to turn on TLS
if conf.IsValueSet(FlagTLSVerify) {
v := true
conf.TLS = &v
}
if conf.TLSVerify == nil && conf.TLS != nil {
conf.TLSVerify = conf.TLS
}
return conf, nil
}
func warnOnDeprecatedConfigOptions(config *config.Config) {
if config.ClusterAdvertise != "" {
logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`)
}
if config.ClusterStore != "" {
logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`)
}
if len(config.ClusterOpts) > 0 {
logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`)
}
}
func initRouter(opts routerOptions) {
decoder := runconfig.ContainerDecoder{
GetSysInfo: func() *sysinfo.SysInfo {
return opts.daemon.RawSysInfo(true)
},
}
routers := []router.Router{
// we need to add the checkpoint router before the container router or the DELETE gets masked
checkpointrouter.NewRouter(opts.daemon, decoder),
container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo(true).CgroupUnified),
image.NewRouter(opts.daemon.ImageService()),
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features),
volume.NewRouter(opts.daemon.VolumesService()),
build.NewRouter(opts.buildBackend, opts.daemon, opts.features),
sessionrouter.NewRouter(opts.sessionManager),
swarmrouter.NewRouter(opts.cluster),
pluginrouter.NewRouter(opts.daemon.PluginManager()),
distributionrouter.NewRouter(opts.daemon.ImageService()),
}
grpcBackends := []grpcrouter.Backend{}
for _, b := range []interface{}{opts.daemon, opts.buildBackend} {
if b, ok := b.(grpcrouter.Backend); ok {
grpcBackends = append(grpcBackends, b)
}
}
if len(grpcBackends) > 0 {
routers = append(routers, grpcrouter.NewRouter(grpcBackends...))
}
if opts.daemon.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(opts.daemon, opts.cluster))
}
if opts.daemon.HasExperimental() {
for _, r := range routers {
for _, route := range r.Routes() {
if experimental, ok := route.(router.ExperimentalRoute); ok {
experimental.Enable()
}
}
}
}
opts.api.InitRouter(routers...)
}
// TODO: remove this from cli and return the authzMiddleware
func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error {
v := cfg.Version
exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental)
s.UseMiddleware(exp)
vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion)
s.UseMiddleware(vm)
if cfg.CorsHeaders != "" {
c := middleware.NewCORSMiddleware(cfg.CorsHeaders)
s.UseMiddleware(c)
}
cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore)
cli.Config.AuthzMiddleware = cli.authzMiddleware
s.UseMiddleware(cli.authzMiddleware)
return nil
}
func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) {
opts, err := cli.getPlatformContainerdDaemonOpts()
if err != nil {
return nil, err
}
if cli.Config.Debug {
opts = append(opts, supervisor.WithLogLevel("debug"))
} else if cli.Config.LogLevel != "" {
opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel))
}
if !cli.Config.CriContainerd {
opts = append(opts, supervisor.WithPlugin("cri", nil))
}
return opts, nil
}
func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) {
serverConfig := &apiserver.Config{
Logging: true,
SocketGroup: cli.Config.SocketGroup,
Version: dockerversion.Version,
CorsHeaders: cli.Config.CorsHeaders,
}
if cli.Config.TLS != nil && *cli.Config.TLS {
tlsOptions := tlsconfig.Options{
CAFile: cli.Config.CommonTLSOptions.CAFile,
CertFile: cli.Config.CommonTLSOptions.CertFile,
KeyFile: cli.Config.CommonTLSOptions.KeyFile,
ExclusiveRootPools: true,
}
if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify {
// server requires and verifies client's certificate
tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig, err := tlsconfig.Server(tlsOptions)
if err != nil {
return nil, err
}
serverConfig.TLSConfig = tlsConfig
}
if len(cli.Config.Hosts) == 0 {
cli.Config.Hosts = make([]string, 1)
}
return serverConfig, nil
}
// checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify
// Going forward we do not want to support a scenario where dockerd listens
// on TCP without either TLS client auth (or an explicit opt-in to disable it)
func checkTLSAuthOK(c *config.Config) bool {
if c.TLS == nil {
// Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled
// Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed
return DefaultTLSValue
}
if !*c.TLS {
// TLS is explicitly disabled, which is supported
return true
}
if c.TLSVerify == nil {
// this actually shouldn't happen since we set TLSVerify on the config object anyway
// But in case it does get here, be cautious and assume this is not supported.
return false
}
// Either TLSVerify is explicitly enabled or disabled, both cases are supported
return true
}
func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) {
var hosts []string
seen := make(map[string]struct{}, len(cli.Config.Hosts))
useTLS := DefaultTLSValue
if cli.Config.TLS != nil {
useTLS = *cli.Config.TLS
}
for i := 0; i < len(cli.Config.Hosts); i++ {
var err error
if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil {
return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i])
}
if _, ok := seen[cli.Config.Hosts[i]]; ok {
continue
}
seen[cli.Config.Hosts[i]] = struct{}{}
protoAddr := cli.Config.Hosts[i]
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
if len(protoAddrParts) != 2 {
return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr)
}
proto := protoAddrParts[0]
addr := protoAddrParts[1]
// It's a bad idea to bind to TCP without tlsverify.
authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert
if proto == "tcp" && !authEnabled {
logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.")
logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!")
time.Sleep(time.Second)
// If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot"
// We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support
if !checkTLSAuthOK(cli.Config) {
ipAddr, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, errors.Wrap(err, "error parsing tcp address")
}
// shortcut all this extra stuff for literal "localhost"
// -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here.
if ipAddr != "localhost" {
ip := net.ParseIP(ipAddr)
if ip == nil {
ipA, err := net.ResolveIPAddr("ip", ipAddr)
if err != nil {
logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address")
}
if ipA != nil {
ip = ipA.IP
}
}
if ip == nil || !ip.IsLoopback() {
logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message")
logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network")
logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify)
logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release")
time.Sleep(15 * time.Second)
}
}
}
}
ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig)
if err != nil {
return nil, err
}
// If we're binding to a TCP port, make sure that a container doesn't try to use it.
if proto == "tcp" {
if err := allocateDaemonPort(addr); err != nil {
return nil, err
}
}
logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr)
hosts = append(hosts, protoAddrParts[1])
cli.api.Accept(addr, ls...)
}
return hosts, nil
}
func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) {
name, _ := os.Hostname()
// Use a buffered channel to pass changes from store watch API to daemon
// A buffer allows store watch API and daemon processing to not wait for each other
watchStream := make(chan *swarmapi.WatchMessage, 32)
c, err := cluster.New(cluster.Config{
Root: cli.Config.Root,
Name: name,
Backend: d,
VolumeBackend: d.VolumesService(),
ImageBackend: d.ImageService(),
PluginBackend: d.PluginManager(),
NetworkSubnetsProvider: d,
DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr,
RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick,
RaftElectionTick: cli.Config.SwarmRaftElectionTick,
RuntimeRoot: cli.getSwarmRunRoot(),
WatchStream: watchStream,
})
if err != nil {
return nil, err
}
d.SetCluster(c)
err = c.Start()
return c, err
}
// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver
// plugins present on the host and available to the daemon
func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error {
for _, reqPlugin := range requestedPlugins {
if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil {
return err
}
}
return nil
}
func systemContainerdRunning(honorXDG bool) (string, bool, error) {
addr := containerddefaults.DefaultAddress
if honorXDG {
runtimeDir, err := homedir.GetRuntimeDir()
if err != nil {
return "", false, err
}
addr = filepath.Join(runtimeDir, "containerd", "containerd.sock")
}
_, err := os.Lstat(addr)
return addr, err == nil, nil
}
// configureDaemonLogs sets the logrus logging level and formatting
func configureDaemonLogs(conf *config.Config) error {
if conf.LogLevel != "" {
lvl, err := logrus.ParseLevel(conf.LogLevel)
if err != nil {
return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel)
}
logrus.SetLevel(lvl)
} else {
logrus.SetLevel(logrus.InfoLevel)
}
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: jsonmessage.RFC3339NanoFixed,
DisableColors: conf.RawLogs,
FullTimestamp: true,
})
return nil
}
| package main
import (
"context"
"crypto/tls"
"fmt"
"net"
"os"
"path/filepath"
"runtime"
"strings"
"time"
containerddefaults "github.com/containerd/containerd/defaults"
"github.com/docker/docker/api"
apiserver "github.com/docker/docker/api/server"
buildbackend "github.com/docker/docker/api/server/backend/build"
"github.com/docker/docker/api/server/middleware"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/api/server/router/build"
checkpointrouter "github.com/docker/docker/api/server/router/checkpoint"
"github.com/docker/docker/api/server/router/container"
distributionrouter "github.com/docker/docker/api/server/router/distribution"
grpcrouter "github.com/docker/docker/api/server/router/grpc"
"github.com/docker/docker/api/server/router/image"
"github.com/docker/docker/api/server/router/network"
pluginrouter "github.com/docker/docker/api/server/router/plugin"
sessionrouter "github.com/docker/docker/api/server/router/session"
swarmrouter "github.com/docker/docker/api/server/router/swarm"
systemrouter "github.com/docker/docker/api/server/router/system"
"github.com/docker/docker/api/server/router/volume"
buildkit "github.com/docker/docker/builder/builder-next"
"github.com/docker/docker/builder/dockerfile"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/daemon"
"github.com/docker/docker/daemon/cluster"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/listeners"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/libcontainerd/supervisor"
dopts "github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/pidfile"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/plugin"
"github.com/docker/docker/rootless"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/tlsconfig"
swarmapi "github.com/docker/swarmkit/api"
"github.com/moby/buildkit/session"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
// DaemonCli represents the daemon CLI.
type DaemonCli struct {
*config.Config
configFile *string
flags *pflag.FlagSet
api *apiserver.Server
d *daemon.Daemon
authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins
}
// NewDaemonCli returns a daemon CLI
func NewDaemonCli() *DaemonCli {
return &DaemonCli{}
}
func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
opts.SetDefaultOptions(opts.flags)
if cli.Config, err = loadDaemonCliConfig(opts); err != nil {
return err
}
if opts.Validate {
// If config wasn't OK we wouldn't have made it this far.
fmt.Fprintln(os.Stderr, "configuration OK")
return nil
}
warnOnDeprecatedConfigOptions(cli.Config)
if err := configureDaemonLogs(cli.Config); err != nil {
return err
}
logrus.Info("Starting up")
cli.configFile = &opts.configFile
cli.flags = opts.flags
if cli.Config.Debug {
debug.Enable()
}
if cli.Config.Experimental {
logrus.Warn("Running experimental build")
}
if cli.Config.IsRootless() {
logrus.Warn("Running in rootless mode. This mode has feature limitations.")
}
if rootless.RunningWithRootlessKit() {
logrus.Info("Running with RootlessKit integration")
if !cli.Config.IsRootless() {
return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit")
}
}
// return human-friendly error before creating files
if runtime.GOOS == "linux" && os.Geteuid() != 0 {
return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation")
}
if err := setDefaultUmask(); err != nil {
return err
}
// Create the daemon root before we create ANY other files (PID, or migrate keys)
// to ensure the appropriate ACL is set (particularly relevant on Windows)
if err := daemon.CreateDaemonRoot(cli.Config); err != nil {
return err
}
if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil {
return err
}
potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot}
if cli.Pidfile != "" {
pf, err := pidfile.New(cli.Pidfile)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile)
defer func() {
if err := pf.Remove(); err != nil {
logrus.Error(err)
}
}()
}
if cli.Config.IsRootless() {
// Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR
if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil {
// StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset
logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR")
}
}
serverConfig, err := newAPIServerConfig(cli)
if err != nil {
return errors.Wrap(err, "failed to create API server")
}
cli.api = apiserver.New(serverConfig)
hosts, err := loadListeners(cli, serverConfig)
if err != nil {
return errors.Wrap(err, "failed to load listeners")
}
ctx, cancel := context.WithCancel(context.Background())
waitForContainerDShutdown, err := cli.initContainerD(ctx)
if waitForContainerDShutdown != nil {
defer waitForContainerDShutdown(10 * time.Second)
}
if err != nil {
cancel()
return err
}
defer cancel()
stopc := make(chan bool)
defer close(stopc)
signal.Trap(func() {
cli.stop()
<-stopc // wait for daemonCli.start() to return
}, logrus.StandardLogger())
// Notify that the API is active, but before daemon is set up.
preNotifyReady()
pluginStore := plugin.NewStore()
if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil {
logrus.Fatalf("Error creating middlewares: %v", err)
}
d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
d.StoreHosts(hosts)
// validate after NewDaemon has restored enabled plugins. Don't change order.
if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil {
return errors.Wrap(err, "failed to validate authorization plugin")
}
cli.d = d
if err := startMetricsServer(cli.Config.MetricsAddress); err != nil {
return errors.Wrap(err, "failed to start metrics server")
}
c, err := createAndStartCluster(cli, d)
if err != nil {
logrus.Fatalf("Error starting cluster component: %v", err)
}
// Restart all autostart containers which has a swarm endpoint
// and is not yet running now that we have successfully
// initialized the cluster.
d.RestartSwarmContainers()
logrus.Info("Daemon has completed initialization")
routerOptions, err := newRouterOptions(cli.Config, d)
if err != nil {
return err
}
routerOptions.api = cli.api
routerOptions.cluster = c
initRouter(routerOptions)
go d.ProcessClusterNotifications(ctx, c.GetWatchStream())
cli.setupConfigReloadTrap()
// The serve API routine never exits unless an error occurs
// We need to start it as a goroutine and wait on it so
// daemon doesn't exit
serveAPIWait := make(chan error)
go cli.api.Wait(serveAPIWait)
// after the daemon is done setting up we can notify systemd api
notifyReady()
// Daemon is fully initialized and handling API traffic
// Wait for serve API to complete
errAPI := <-serveAPIWait
c.Cleanup()
// notify systemd that we're shutting down
notifyStopping()
shutdownDaemon(d)
// Stop notification processing and any background processes
cancel()
if errAPI != nil {
return errors.Wrap(errAPI, "shutting down due to ServeAPI error")
}
logrus.Info("Daemon shutdown complete")
return nil
}
type routerOptions struct {
sessionManager *session.Manager
buildBackend *buildbackend.Backend
features *map[string]bool
buildkit *buildkit.Builder
daemon *daemon.Daemon
api *apiserver.Server
cluster *cluster.Cluster
}
func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) {
opts := routerOptions{}
sm, err := session.NewManager()
if err != nil {
return opts, errors.Wrap(err, "failed to create sessionmanager")
}
manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping())
if err != nil {
return opts, err
}
cgroupParent := newCgroupParent(config)
bk, err := buildkit.New(buildkit.Opt{
SessionManager: sm,
Root: filepath.Join(config.Root, "buildkit"),
Dist: d.DistributionServices(),
NetworkController: d.NetworkController(),
DefaultCgroupParent: cgroupParent,
RegistryHosts: d.RegistryHosts(),
BuilderConfig: config.Builder,
Rootless: d.Rootless(),
IdentityMapping: d.IdentityMapping(),
DNSConfig: config.DNSConfig,
ApparmorProfile: daemon.DefaultApparmorProfile(),
})
if err != nil {
return opts, err
}
bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService)
if err != nil {
return opts, errors.Wrap(err, "failed to create buildmanager")
}
return routerOptions{
sessionManager: sm,
buildBackend: bb,
buildkit: bk,
features: d.Features(),
daemon: d,
}, nil
}
func (cli *DaemonCli) reloadConfig() {
reload := func(c *config.Config) {
// Revalidate and reload the authorization plugins
if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil {
logrus.Fatalf("Error validating authorization plugin: %v", err)
return
}
cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins)
if err := cli.d.Reload(c); err != nil {
logrus.Errorf("Error reconfiguring the daemon: %v", err)
return
}
if c.IsValueSet("debug") {
debugEnabled := debug.IsEnabled()
switch {
case debugEnabled && !c.Debug: // disable debug
debug.Disable()
case c.Debug && !debugEnabled: // enable debug
debug.Enable()
}
}
}
if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil {
logrus.Error(err)
}
}
func (cli *DaemonCli) stop() {
cli.api.Close()
}
// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case
// d.Shutdown() is waiting too long to kill container or worst it's
// blocked there
func shutdownDaemon(d *daemon.Daemon) {
shutdownTimeout := d.ShutdownTimeout()
ch := make(chan struct{})
go func() {
d.Shutdown()
close(ch)
}()
if shutdownTimeout < 0 {
<-ch
logrus.Debug("Clean shutdown succeeded")
return
}
timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second)
defer timeout.Stop()
select {
case <-ch:
logrus.Debug("Clean shutdown succeeded")
case <-timeout.C:
logrus.Error("Force shutdown daemon")
}
}
func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
conf := opts.daemonConfig
flags := opts.flags
conf.Debug = opts.Debug
conf.Hosts = opts.Hosts
conf.LogLevel = opts.LogLevel
if opts.flags.Changed(FlagTLS) {
conf.TLS = &opts.TLS
}
if opts.flags.Changed(FlagTLSVerify) {
conf.TLSVerify = &opts.TLSVerify
v := true
conf.TLS = &v
}
conf.CommonTLSOptions = config.CommonTLSOptions{}
if opts.TLSOptions != nil {
conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile
conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile
conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile
}
if conf.TrustKeyPath == "" {
daemonConfDir, err := getDaemonConfDir(conf.Root)
if err != nil {
return nil, err
}
conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile)
}
if flags.Changed("graph") && flags.Changed("data-root") {
return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`)
}
if opts.configFile != "" {
c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile)
}
}
// the merged configuration can be nil if the config file didn't exist.
// leave the current configuration as it is if when that happens.
if c != nil {
conf = c
}
}
if err := config.Validate(conf); err != nil {
return nil, err
}
if flags.Changed("graph") {
logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`)
}
// Check if duplicate label-keys with different values are found
newLabels, err := config.GetConflictFreeLabels(conf.Labels)
if err != nil {
return nil, err
}
conf.Labels = newLabels
// Regardless of whether the user sets it to true or false, if they
// specify TLSVerify at all then we need to turn on TLS
if conf.IsValueSet(FlagTLSVerify) {
v := true
conf.TLS = &v
}
if conf.TLSVerify == nil && conf.TLS != nil {
conf.TLSVerify = conf.TLS
}
return conf, nil
}
func warnOnDeprecatedConfigOptions(config *config.Config) {
if config.ClusterAdvertise != "" {
logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`)
}
if config.ClusterStore != "" {
logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`)
}
if len(config.ClusterOpts) > 0 {
logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`)
}
}
func initRouter(opts routerOptions) {
decoder := runconfig.ContainerDecoder{
GetSysInfo: func() *sysinfo.SysInfo {
return opts.daemon.RawSysInfo(true)
},
}
routers := []router.Router{
// we need to add the checkpoint router before the container router or the DELETE gets masked
checkpointrouter.NewRouter(opts.daemon, decoder),
container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo(true).CgroupUnified),
image.NewRouter(opts.daemon.ImageService()),
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features),
volume.NewRouter(opts.daemon.VolumesService()),
build.NewRouter(opts.buildBackend, opts.daemon, opts.features),
sessionrouter.NewRouter(opts.sessionManager),
swarmrouter.NewRouter(opts.cluster),
pluginrouter.NewRouter(opts.daemon.PluginManager()),
distributionrouter.NewRouter(opts.daemon.ImageService()),
}
grpcBackends := []grpcrouter.Backend{}
for _, b := range []interface{}{opts.daemon, opts.buildBackend} {
if b, ok := b.(grpcrouter.Backend); ok {
grpcBackends = append(grpcBackends, b)
}
}
if len(grpcBackends) > 0 {
routers = append(routers, grpcrouter.NewRouter(grpcBackends...))
}
if opts.daemon.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(opts.daemon, opts.cluster))
}
if opts.daemon.HasExperimental() {
for _, r := range routers {
for _, route := range r.Routes() {
if experimental, ok := route.(router.ExperimentalRoute); ok {
experimental.Enable()
}
}
}
}
opts.api.InitRouter(routers...)
}
// TODO: remove this from cli and return the authzMiddleware
func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error {
v := cfg.Version
exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental)
s.UseMiddleware(exp)
vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion)
s.UseMiddleware(vm)
if cfg.CorsHeaders != "" {
c := middleware.NewCORSMiddleware(cfg.CorsHeaders)
s.UseMiddleware(c)
}
cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore)
cli.Config.AuthzMiddleware = cli.authzMiddleware
s.UseMiddleware(cli.authzMiddleware)
return nil
}
func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) {
opts, err := cli.getPlatformContainerdDaemonOpts()
if err != nil {
return nil, err
}
if cli.Config.Debug {
opts = append(opts, supervisor.WithLogLevel("debug"))
} else if cli.Config.LogLevel != "" {
opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel))
}
if !cli.Config.CriContainerd {
opts = append(opts, supervisor.WithPlugin("cri", nil))
}
return opts, nil
}
func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) {
serverConfig := &apiserver.Config{
Logging: true,
SocketGroup: cli.Config.SocketGroup,
Version: dockerversion.Version,
CorsHeaders: cli.Config.CorsHeaders,
}
if cli.Config.TLS != nil && *cli.Config.TLS {
tlsOptions := tlsconfig.Options{
CAFile: cli.Config.CommonTLSOptions.CAFile,
CertFile: cli.Config.CommonTLSOptions.CertFile,
KeyFile: cli.Config.CommonTLSOptions.KeyFile,
ExclusiveRootPools: true,
}
if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify {
// server requires and verifies client's certificate
tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig, err := tlsconfig.Server(tlsOptions)
if err != nil {
return nil, err
}
serverConfig.TLSConfig = tlsConfig
}
if len(cli.Config.Hosts) == 0 {
cli.Config.Hosts = make([]string, 1)
}
return serverConfig, nil
}
// checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify
// Going forward we do not want to support a scenario where dockerd listens
// on TCP without either TLS client auth (or an explicit opt-in to disable it)
func checkTLSAuthOK(c *config.Config) bool {
if c.TLS == nil {
// Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled
// Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed
return DefaultTLSValue
}
if !*c.TLS {
// TLS is explicitly disabled, which is supported
return true
}
if c.TLSVerify == nil {
// this actually shouldn't happen since we set TLSVerify on the config object anyway
// But in case it does get here, be cautious and assume this is not supported.
return false
}
// Either TLSVerify is explicitly enabled or disabled, both cases are supported
return true
}
func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) {
var hosts []string
seen := make(map[string]struct{}, len(cli.Config.Hosts))
useTLS := DefaultTLSValue
if cli.Config.TLS != nil {
useTLS = *cli.Config.TLS
}
for i := 0; i < len(cli.Config.Hosts); i++ {
var err error
if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil {
return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i])
}
if _, ok := seen[cli.Config.Hosts[i]]; ok {
continue
}
seen[cli.Config.Hosts[i]] = struct{}{}
protoAddr := cli.Config.Hosts[i]
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
if len(protoAddrParts) != 2 {
return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr)
}
proto := protoAddrParts[0]
addr := protoAddrParts[1]
// It's a bad idea to bind to TCP without tlsverify.
authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert
if proto == "tcp" && !authEnabled {
logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.")
logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!")
time.Sleep(time.Second)
// If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot"
// We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support
if !checkTLSAuthOK(cli.Config) {
ipAddr, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, errors.Wrap(err, "error parsing tcp address")
}
// shortcut all this extra stuff for literal "localhost"
// -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here.
if ipAddr != "localhost" {
ip := net.ParseIP(ipAddr)
if ip == nil {
ipA, err := net.ResolveIPAddr("ip", ipAddr)
if err != nil {
logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address")
}
if ipA != nil {
ip = ipA.IP
}
}
if ip == nil || !ip.IsLoopback() {
logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message")
logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network")
logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify)
logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release")
time.Sleep(15 * time.Second)
}
}
}
}
ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig)
if err != nil {
return nil, err
}
// If we're binding to a TCP port, make sure that a container doesn't try to use it.
if proto == "tcp" {
if err := allocateDaemonPort(addr); err != nil {
return nil, err
}
}
logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr)
hosts = append(hosts, protoAddrParts[1])
cli.api.Accept(addr, ls...)
}
return hosts, nil
}
func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) {
name, _ := os.Hostname()
// Use a buffered channel to pass changes from store watch API to daemon
// A buffer allows store watch API and daemon processing to not wait for each other
watchStream := make(chan *swarmapi.WatchMessage, 32)
c, err := cluster.New(cluster.Config{
Root: cli.Config.Root,
Name: name,
Backend: d,
VolumeBackend: d.VolumesService(),
ImageBackend: d.ImageService(),
PluginBackend: d.PluginManager(),
NetworkSubnetsProvider: d,
DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr,
RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick,
RaftElectionTick: cli.Config.SwarmRaftElectionTick,
RuntimeRoot: cli.getSwarmRunRoot(),
WatchStream: watchStream,
})
if err != nil {
return nil, err
}
d.SetCluster(c)
err = c.Start()
return c, err
}
// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver
// plugins present on the host and available to the daemon
func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error {
for _, reqPlugin := range requestedPlugins {
if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil {
return err
}
}
return nil
}
func systemContainerdRunning(honorXDG bool) (string, bool, error) {
addr := containerddefaults.DefaultAddress
if honorXDG {
runtimeDir, err := homedir.GetRuntimeDir()
if err != nil {
return "", false, err
}
addr = filepath.Join(runtimeDir, "containerd", "containerd.sock")
}
_, err := os.Lstat(addr)
return addr, err == nil, nil
}
// configureDaemonLogs sets the logrus logging level and formatting
func configureDaemonLogs(conf *config.Config) error {
if conf.LogLevel != "" {
lvl, err := logrus.ParseLevel(conf.LogLevel)
if err != nil {
return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel)
}
logrus.SetLevel(lvl)
} else {
logrus.SetLevel(logrus.InfoLevel)
}
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: jsonmessage.RFC3339NanoFixed,
DisableColors: conf.RawLogs,
FullTimestamp: true,
})
return nil
}
| aiordache | ee8f581167d1d36fe05c39d8975d8b9960a49521 | 314759dc2f4745925d8dec6d15acc7761c6e5c92 | ```bash
mkdir /foo
echo '{' > /foo/malformed-config.json
echo '' > /foo/empty-config-1.json
echo '{}' > /foo/empty-config-2.json
echo '{"unknown-option": true}' > /foo/invalid-config-1.json
echo '{"debug": true}' > /foo/valid-config-1.json
```
Then trying them;
```bash
for file in /foo/*.json; do sh -xc "dockerd --validate --config-file=${file}"; done
+ dockerd --validate --config-file=/foo/empty-config-1.json
unable to configure the Docker daemon with file /foo/empty-config-1.json: EOF
+ dockerd --validate --config-file=/foo/empty-config-2.json
INFO[2021-05-19T10:31:30.747037342Z] Config OK
+ dockerd --validate --config-file=/foo/invalid-config-1.json
unable to configure the Docker daemon with file /foo/invalid-config-1.json: the following directives don't match any configuration option: unknown-option
+ dockerd --validate --config-file=/foo/malformed-config.json
unable to configure the Docker daemon with file /foo/malformed-config.json: unexpected EOF
+ dockerd --validate --config-file=/foo/valid-config-1.json
INFO[2021-05-19T10:31:30.883917472Z] Config OK
```
Looking at the output:
- For a follow-up: I think we should ignore the "empty" file. It's probably ok to ignore an empty `daemon.json` (same as how we ignore empty json (`{}`)
- Looking at it now; using `logrus.Infof()` adds a lot of noise; I think we should just print `Config OK`. This also allows the output to be more easily used in scripts (check if the output is "Config OK"
I think we should also have a simple integration test for this. I see there's a `TestConfigDaemonLibtrustID` test in https://github.com/moby/moby/blob/7b9275c0da707b030e62c96b679a976f31f929d3/integration/config/config_test.go#L382-L400
Some notes on that test though;
- it looks like it's "actually" in the wrong place (that test suite is testing `docker config` (swarm configs), so _perhaps_ should be moved to a new suite, but we can look at that later
- the "test utils" we have to start the daemon may be a bit complicated for this (they're mostly to setup a daemon and then interact with the running daemon, but not so much to test the daemon CLI); it would need a bit looking into (but happy to help if needed). This particular would probably have been easier to implement in the old `integration-cli` tests, but those were marked "deprecated", so we can't add new tests there. So instead, perhaps just using [`os/exec`](https://golang.org/pkg/os/exec/#Command) to start the binary and check stdout/stderr would work.
| thaJeztah | 4,776 |
moby/moby | 42,393 | Daemon config validation | On `dockerd --validate`, return after the daemon config has been loaded and validated.
Continuation of https://github.com/moby/moby/pull/38138, avoids the `os.Exit`.
closes https://github.com/moby/moby/pull/38138
fixes https://github.com/moby/moby/issues/36911
| null | 2021-05-19 09:56:56+00:00 | 2021-06-23 17:32:07+00:00 | cmd/dockerd/daemon.go | package main
import (
"context"
"crypto/tls"
"fmt"
"net"
"os"
"path/filepath"
"runtime"
"strings"
"time"
containerddefaults "github.com/containerd/containerd/defaults"
"github.com/docker/docker/api"
apiserver "github.com/docker/docker/api/server"
buildbackend "github.com/docker/docker/api/server/backend/build"
"github.com/docker/docker/api/server/middleware"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/api/server/router/build"
checkpointrouter "github.com/docker/docker/api/server/router/checkpoint"
"github.com/docker/docker/api/server/router/container"
distributionrouter "github.com/docker/docker/api/server/router/distribution"
grpcrouter "github.com/docker/docker/api/server/router/grpc"
"github.com/docker/docker/api/server/router/image"
"github.com/docker/docker/api/server/router/network"
pluginrouter "github.com/docker/docker/api/server/router/plugin"
sessionrouter "github.com/docker/docker/api/server/router/session"
swarmrouter "github.com/docker/docker/api/server/router/swarm"
systemrouter "github.com/docker/docker/api/server/router/system"
"github.com/docker/docker/api/server/router/volume"
buildkit "github.com/docker/docker/builder/builder-next"
"github.com/docker/docker/builder/dockerfile"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/daemon"
"github.com/docker/docker/daemon/cluster"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/listeners"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/libcontainerd/supervisor"
dopts "github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/pidfile"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/plugin"
"github.com/docker/docker/rootless"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/tlsconfig"
swarmapi "github.com/docker/swarmkit/api"
"github.com/moby/buildkit/session"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
// DaemonCli represents the daemon CLI.
type DaemonCli struct {
*config.Config
configFile *string
flags *pflag.FlagSet
api *apiserver.Server
d *daemon.Daemon
authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins
}
// NewDaemonCli returns a daemon CLI
func NewDaemonCli() *DaemonCli {
return &DaemonCli{}
}
func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
stopc := make(chan bool)
defer close(stopc)
opts.SetDefaultOptions(opts.flags)
if cli.Config, err = loadDaemonCliConfig(opts); err != nil {
return err
}
warnOnDeprecatedConfigOptions(cli.Config)
if err := configureDaemonLogs(cli.Config); err != nil {
return err
}
logrus.Info("Starting up")
cli.configFile = &opts.configFile
cli.flags = opts.flags
if cli.Config.Debug {
debug.Enable()
}
if cli.Config.Experimental {
logrus.Warn("Running experimental build")
}
if cli.Config.IsRootless() {
logrus.Warn("Running in rootless mode. This mode has feature limitations.")
}
if rootless.RunningWithRootlessKit() {
logrus.Info("Running with RootlessKit integration")
if !cli.Config.IsRootless() {
return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit")
}
}
// return human-friendly error before creating files
if runtime.GOOS == "linux" && os.Geteuid() != 0 {
return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation")
}
if err := setDefaultUmask(); err != nil {
return err
}
// Create the daemon root before we create ANY other files (PID, or migrate keys)
// to ensure the appropriate ACL is set (particularly relevant on Windows)
if err := daemon.CreateDaemonRoot(cli.Config); err != nil {
return err
}
if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil {
return err
}
potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot}
if cli.Pidfile != "" {
pf, err := pidfile.New(cli.Pidfile)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile)
defer func() {
if err := pf.Remove(); err != nil {
logrus.Error(err)
}
}()
}
if cli.Config.IsRootless() {
// Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR
if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil {
// StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset
logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR")
}
}
serverConfig, err := newAPIServerConfig(cli)
if err != nil {
return errors.Wrap(err, "failed to create API server")
}
cli.api = apiserver.New(serverConfig)
hosts, err := loadListeners(cli, serverConfig)
if err != nil {
return errors.Wrap(err, "failed to load listeners")
}
ctx, cancel := context.WithCancel(context.Background())
waitForContainerDShutdown, err := cli.initContainerD(ctx)
if waitForContainerDShutdown != nil {
defer waitForContainerDShutdown(10 * time.Second)
}
if err != nil {
cancel()
return err
}
defer cancel()
signal.Trap(func() {
cli.stop()
<-stopc // wait for daemonCli.start() to return
}, logrus.StandardLogger())
// Notify that the API is active, but before daemon is set up.
preNotifyReady()
pluginStore := plugin.NewStore()
if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil {
logrus.Fatalf("Error creating middlewares: %v", err)
}
d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
d.StoreHosts(hosts)
// validate after NewDaemon has restored enabled plugins. Don't change order.
if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil {
return errors.Wrap(err, "failed to validate authorization plugin")
}
cli.d = d
if err := startMetricsServer(cli.Config.MetricsAddress); err != nil {
return errors.Wrap(err, "failed to start metrics server")
}
c, err := createAndStartCluster(cli, d)
if err != nil {
logrus.Fatalf("Error starting cluster component: %v", err)
}
// Restart all autostart containers which has a swarm endpoint
// and is not yet running now that we have successfully
// initialized the cluster.
d.RestartSwarmContainers()
logrus.Info("Daemon has completed initialization")
routerOptions, err := newRouterOptions(cli.Config, d)
if err != nil {
return err
}
routerOptions.api = cli.api
routerOptions.cluster = c
initRouter(routerOptions)
go d.ProcessClusterNotifications(ctx, c.GetWatchStream())
cli.setupConfigReloadTrap()
// The serve API routine never exits unless an error occurs
// We need to start it as a goroutine and wait on it so
// daemon doesn't exit
serveAPIWait := make(chan error)
go cli.api.Wait(serveAPIWait)
// after the daemon is done setting up we can notify systemd api
notifyReady()
// Daemon is fully initialized and handling API traffic
// Wait for serve API to complete
errAPI := <-serveAPIWait
c.Cleanup()
// notify systemd that we're shutting down
notifyStopping()
shutdownDaemon(d)
// Stop notification processing and any background processes
cancel()
if errAPI != nil {
return errors.Wrap(errAPI, "shutting down due to ServeAPI error")
}
logrus.Info("Daemon shutdown complete")
return nil
}
type routerOptions struct {
sessionManager *session.Manager
buildBackend *buildbackend.Backend
features *map[string]bool
buildkit *buildkit.Builder
daemon *daemon.Daemon
api *apiserver.Server
cluster *cluster.Cluster
}
func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) {
opts := routerOptions{}
sm, err := session.NewManager()
if err != nil {
return opts, errors.Wrap(err, "failed to create sessionmanager")
}
manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping())
if err != nil {
return opts, err
}
cgroupParent := newCgroupParent(config)
bk, err := buildkit.New(buildkit.Opt{
SessionManager: sm,
Root: filepath.Join(config.Root, "buildkit"),
Dist: d.DistributionServices(),
NetworkController: d.NetworkController(),
DefaultCgroupParent: cgroupParent,
RegistryHosts: d.RegistryHosts(),
BuilderConfig: config.Builder,
Rootless: d.Rootless(),
IdentityMapping: d.IdentityMapping(),
DNSConfig: config.DNSConfig,
ApparmorProfile: daemon.DefaultApparmorProfile(),
})
if err != nil {
return opts, err
}
bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService)
if err != nil {
return opts, errors.Wrap(err, "failed to create buildmanager")
}
return routerOptions{
sessionManager: sm,
buildBackend: bb,
buildkit: bk,
features: d.Features(),
daemon: d,
}, nil
}
func (cli *DaemonCli) reloadConfig() {
reload := func(c *config.Config) {
// Revalidate and reload the authorization plugins
if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil {
logrus.Fatalf("Error validating authorization plugin: %v", err)
return
}
cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins)
if err := cli.d.Reload(c); err != nil {
logrus.Errorf("Error reconfiguring the daemon: %v", err)
return
}
if c.IsValueSet("debug") {
debugEnabled := debug.IsEnabled()
switch {
case debugEnabled && !c.Debug: // disable debug
debug.Disable()
case c.Debug && !debugEnabled: // enable debug
debug.Enable()
}
}
}
if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil {
logrus.Error(err)
}
}
func (cli *DaemonCli) stop() {
cli.api.Close()
}
// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case
// d.Shutdown() is waiting too long to kill container or worst it's
// blocked there
func shutdownDaemon(d *daemon.Daemon) {
shutdownTimeout := d.ShutdownTimeout()
ch := make(chan struct{})
go func() {
d.Shutdown()
close(ch)
}()
if shutdownTimeout < 0 {
<-ch
logrus.Debug("Clean shutdown succeeded")
return
}
timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second)
defer timeout.Stop()
select {
case <-ch:
logrus.Debug("Clean shutdown succeeded")
case <-timeout.C:
logrus.Error("Force shutdown daemon")
}
}
func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
conf := opts.daemonConfig
flags := opts.flags
conf.Debug = opts.Debug
conf.Hosts = opts.Hosts
conf.LogLevel = opts.LogLevel
if opts.flags.Changed(FlagTLS) {
conf.TLS = &opts.TLS
}
if opts.flags.Changed(FlagTLSVerify) {
conf.TLSVerify = &opts.TLSVerify
v := true
conf.TLS = &v
}
conf.CommonTLSOptions = config.CommonTLSOptions{}
if opts.TLSOptions != nil {
conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile
conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile
conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile
}
if conf.TrustKeyPath == "" {
daemonConfDir, err := getDaemonConfDir(conf.Root)
if err != nil {
return nil, err
}
conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile)
}
if flags.Changed("graph") && flags.Changed("data-root") {
return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`)
}
if opts.configFile != "" {
c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile)
}
}
// the merged configuration can be nil if the config file didn't exist.
// leave the current configuration as it is if when that happens.
if c != nil {
conf = c
}
}
if err := config.Validate(conf); err != nil {
return nil, err
}
if flags.Changed("graph") {
logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`)
}
// Check if duplicate label-keys with different values are found
newLabels, err := config.GetConflictFreeLabels(conf.Labels)
if err != nil {
return nil, err
}
conf.Labels = newLabels
// Regardless of whether the user sets it to true or false, if they
// specify TLSVerify at all then we need to turn on TLS
if conf.IsValueSet(FlagTLSVerify) {
v := true
conf.TLS = &v
}
if conf.TLSVerify == nil && conf.TLS != nil {
conf.TLSVerify = conf.TLS
}
return conf, nil
}
func warnOnDeprecatedConfigOptions(config *config.Config) {
if config.ClusterAdvertise != "" {
logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`)
}
if config.ClusterStore != "" {
logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`)
}
if len(config.ClusterOpts) > 0 {
logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`)
}
}
func initRouter(opts routerOptions) {
decoder := runconfig.ContainerDecoder{
GetSysInfo: func() *sysinfo.SysInfo {
return opts.daemon.RawSysInfo(true)
},
}
routers := []router.Router{
// we need to add the checkpoint router before the container router or the DELETE gets masked
checkpointrouter.NewRouter(opts.daemon, decoder),
container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo(true).CgroupUnified),
image.NewRouter(opts.daemon.ImageService()),
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features),
volume.NewRouter(opts.daemon.VolumesService()),
build.NewRouter(opts.buildBackend, opts.daemon, opts.features),
sessionrouter.NewRouter(opts.sessionManager),
swarmrouter.NewRouter(opts.cluster),
pluginrouter.NewRouter(opts.daemon.PluginManager()),
distributionrouter.NewRouter(opts.daemon.ImageService()),
}
grpcBackends := []grpcrouter.Backend{}
for _, b := range []interface{}{opts.daemon, opts.buildBackend} {
if b, ok := b.(grpcrouter.Backend); ok {
grpcBackends = append(grpcBackends, b)
}
}
if len(grpcBackends) > 0 {
routers = append(routers, grpcrouter.NewRouter(grpcBackends...))
}
if opts.daemon.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(opts.daemon, opts.cluster))
}
if opts.daemon.HasExperimental() {
for _, r := range routers {
for _, route := range r.Routes() {
if experimental, ok := route.(router.ExperimentalRoute); ok {
experimental.Enable()
}
}
}
}
opts.api.InitRouter(routers...)
}
// TODO: remove this from cli and return the authzMiddleware
func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error {
v := cfg.Version
exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental)
s.UseMiddleware(exp)
vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion)
s.UseMiddleware(vm)
if cfg.CorsHeaders != "" {
c := middleware.NewCORSMiddleware(cfg.CorsHeaders)
s.UseMiddleware(c)
}
cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore)
cli.Config.AuthzMiddleware = cli.authzMiddleware
s.UseMiddleware(cli.authzMiddleware)
return nil
}
func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) {
opts, err := cli.getPlatformContainerdDaemonOpts()
if err != nil {
return nil, err
}
if cli.Config.Debug {
opts = append(opts, supervisor.WithLogLevel("debug"))
} else if cli.Config.LogLevel != "" {
opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel))
}
if !cli.Config.CriContainerd {
opts = append(opts, supervisor.WithPlugin("cri", nil))
}
return opts, nil
}
func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) {
serverConfig := &apiserver.Config{
Logging: true,
SocketGroup: cli.Config.SocketGroup,
Version: dockerversion.Version,
CorsHeaders: cli.Config.CorsHeaders,
}
if cli.Config.TLS != nil && *cli.Config.TLS {
tlsOptions := tlsconfig.Options{
CAFile: cli.Config.CommonTLSOptions.CAFile,
CertFile: cli.Config.CommonTLSOptions.CertFile,
KeyFile: cli.Config.CommonTLSOptions.KeyFile,
ExclusiveRootPools: true,
}
if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify {
// server requires and verifies client's certificate
tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig, err := tlsconfig.Server(tlsOptions)
if err != nil {
return nil, err
}
serverConfig.TLSConfig = tlsConfig
}
if len(cli.Config.Hosts) == 0 {
cli.Config.Hosts = make([]string, 1)
}
return serverConfig, nil
}
// checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify
// Going forward we do not want to support a scenario where dockerd listens
// on TCP without either TLS client auth (or an explicit opt-in to disable it)
func checkTLSAuthOK(c *config.Config) bool {
if c.TLS == nil {
// Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled
// Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed
return DefaultTLSValue
}
if !*c.TLS {
// TLS is explicitly disabled, which is supported
return true
}
if c.TLSVerify == nil {
// this actually shouldn't happen since we set TLSVerify on the config object anyway
// But in case it does get here, be cautious and assume this is not supported.
return false
}
// Either TLSVerify is explicitly enabled or disabled, both cases are supported
return true
}
func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) {
var hosts []string
seen := make(map[string]struct{}, len(cli.Config.Hosts))
useTLS := DefaultTLSValue
if cli.Config.TLS != nil {
useTLS = *cli.Config.TLS
}
for i := 0; i < len(cli.Config.Hosts); i++ {
var err error
if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil {
return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i])
}
if _, ok := seen[cli.Config.Hosts[i]]; ok {
continue
}
seen[cli.Config.Hosts[i]] = struct{}{}
protoAddr := cli.Config.Hosts[i]
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
if len(protoAddrParts) != 2 {
return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr)
}
proto := protoAddrParts[0]
addr := protoAddrParts[1]
// It's a bad idea to bind to TCP without tlsverify.
authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert
if proto == "tcp" && !authEnabled {
logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.")
logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!")
time.Sleep(time.Second)
// If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot"
// We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support
if !checkTLSAuthOK(cli.Config) {
ipAddr, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, errors.Wrap(err, "error parsing tcp address")
}
// shortcut all this extra stuff for literal "localhost"
// -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here.
if ipAddr != "localhost" {
ip := net.ParseIP(ipAddr)
if ip == nil {
ipA, err := net.ResolveIPAddr("ip", ipAddr)
if err != nil {
logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address")
}
if ipA != nil {
ip = ipA.IP
}
}
if ip == nil || !ip.IsLoopback() {
logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message")
logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network")
logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify)
logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release")
time.Sleep(15 * time.Second)
}
}
}
}
ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig)
if err != nil {
return nil, err
}
// If we're binding to a TCP port, make sure that a container doesn't try to use it.
if proto == "tcp" {
if err := allocateDaemonPort(addr); err != nil {
return nil, err
}
}
logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr)
hosts = append(hosts, protoAddrParts[1])
cli.api.Accept(addr, ls...)
}
return hosts, nil
}
func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) {
name, _ := os.Hostname()
// Use a buffered channel to pass changes from store watch API to daemon
// A buffer allows store watch API and daemon processing to not wait for each other
watchStream := make(chan *swarmapi.WatchMessage, 32)
c, err := cluster.New(cluster.Config{
Root: cli.Config.Root,
Name: name,
Backend: d,
VolumeBackend: d.VolumesService(),
ImageBackend: d.ImageService(),
PluginBackend: d.PluginManager(),
NetworkSubnetsProvider: d,
DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr,
RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick,
RaftElectionTick: cli.Config.SwarmRaftElectionTick,
RuntimeRoot: cli.getSwarmRunRoot(),
WatchStream: watchStream,
})
if err != nil {
return nil, err
}
d.SetCluster(c)
err = c.Start()
return c, err
}
// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver
// plugins present on the host and available to the daemon
func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error {
for _, reqPlugin := range requestedPlugins {
if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil {
return err
}
}
return nil
}
func systemContainerdRunning(honorXDG bool) (string, bool, error) {
addr := containerddefaults.DefaultAddress
if honorXDG {
runtimeDir, err := homedir.GetRuntimeDir()
if err != nil {
return "", false, err
}
addr = filepath.Join(runtimeDir, "containerd", "containerd.sock")
}
_, err := os.Lstat(addr)
return addr, err == nil, nil
}
// configureDaemonLogs sets the logrus logging level and formatting
func configureDaemonLogs(conf *config.Config) error {
if conf.LogLevel != "" {
lvl, err := logrus.ParseLevel(conf.LogLevel)
if err != nil {
return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel)
}
logrus.SetLevel(lvl)
} else {
logrus.SetLevel(logrus.InfoLevel)
}
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: jsonmessage.RFC3339NanoFixed,
DisableColors: conf.RawLogs,
FullTimestamp: true,
})
return nil
}
| package main
import (
"context"
"crypto/tls"
"fmt"
"net"
"os"
"path/filepath"
"runtime"
"strings"
"time"
containerddefaults "github.com/containerd/containerd/defaults"
"github.com/docker/docker/api"
apiserver "github.com/docker/docker/api/server"
buildbackend "github.com/docker/docker/api/server/backend/build"
"github.com/docker/docker/api/server/middleware"
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/api/server/router/build"
checkpointrouter "github.com/docker/docker/api/server/router/checkpoint"
"github.com/docker/docker/api/server/router/container"
distributionrouter "github.com/docker/docker/api/server/router/distribution"
grpcrouter "github.com/docker/docker/api/server/router/grpc"
"github.com/docker/docker/api/server/router/image"
"github.com/docker/docker/api/server/router/network"
pluginrouter "github.com/docker/docker/api/server/router/plugin"
sessionrouter "github.com/docker/docker/api/server/router/session"
swarmrouter "github.com/docker/docker/api/server/router/swarm"
systemrouter "github.com/docker/docker/api/server/router/system"
"github.com/docker/docker/api/server/router/volume"
buildkit "github.com/docker/docker/builder/builder-next"
"github.com/docker/docker/builder/dockerfile"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/daemon"
"github.com/docker/docker/daemon/cluster"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/listeners"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/libcontainerd/supervisor"
dopts "github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/pidfile"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/plugin"
"github.com/docker/docker/rootless"
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/tlsconfig"
swarmapi "github.com/docker/swarmkit/api"
"github.com/moby/buildkit/session"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
// DaemonCli represents the daemon CLI.
type DaemonCli struct {
*config.Config
configFile *string
flags *pflag.FlagSet
api *apiserver.Server
d *daemon.Daemon
authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins
}
// NewDaemonCli returns a daemon CLI
func NewDaemonCli() *DaemonCli {
return &DaemonCli{}
}
func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
opts.SetDefaultOptions(opts.flags)
if cli.Config, err = loadDaemonCliConfig(opts); err != nil {
return err
}
if opts.Validate {
// If config wasn't OK we wouldn't have made it this far.
fmt.Fprintln(os.Stderr, "configuration OK")
return nil
}
warnOnDeprecatedConfigOptions(cli.Config)
if err := configureDaemonLogs(cli.Config); err != nil {
return err
}
logrus.Info("Starting up")
cli.configFile = &opts.configFile
cli.flags = opts.flags
if cli.Config.Debug {
debug.Enable()
}
if cli.Config.Experimental {
logrus.Warn("Running experimental build")
}
if cli.Config.IsRootless() {
logrus.Warn("Running in rootless mode. This mode has feature limitations.")
}
if rootless.RunningWithRootlessKit() {
logrus.Info("Running with RootlessKit integration")
if !cli.Config.IsRootless() {
return fmt.Errorf("rootless mode needs to be enabled for running with RootlessKit")
}
}
// return human-friendly error before creating files
if runtime.GOOS == "linux" && os.Geteuid() != 0 {
return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation")
}
if err := setDefaultUmask(); err != nil {
return err
}
// Create the daemon root before we create ANY other files (PID, or migrate keys)
// to ensure the appropriate ACL is set (particularly relevant on Windows)
if err := daemon.CreateDaemonRoot(cli.Config); err != nil {
return err
}
if err := system.MkdirAll(cli.Config.ExecRoot, 0700); err != nil {
return err
}
potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot}
if cli.Pidfile != "" {
pf, err := pidfile.New(cli.Pidfile)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile)
defer func() {
if err := pf.Remove(); err != nil {
logrus.Error(err)
}
}()
}
if cli.Config.IsRootless() {
// Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR
if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil {
// StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset
logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR")
}
}
serverConfig, err := newAPIServerConfig(cli)
if err != nil {
return errors.Wrap(err, "failed to create API server")
}
cli.api = apiserver.New(serverConfig)
hosts, err := loadListeners(cli, serverConfig)
if err != nil {
return errors.Wrap(err, "failed to load listeners")
}
ctx, cancel := context.WithCancel(context.Background())
waitForContainerDShutdown, err := cli.initContainerD(ctx)
if waitForContainerDShutdown != nil {
defer waitForContainerDShutdown(10 * time.Second)
}
if err != nil {
cancel()
return err
}
defer cancel()
stopc := make(chan bool)
defer close(stopc)
signal.Trap(func() {
cli.stop()
<-stopc // wait for daemonCli.start() to return
}, logrus.StandardLogger())
// Notify that the API is active, but before daemon is set up.
preNotifyReady()
pluginStore := plugin.NewStore()
if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil {
logrus.Fatalf("Error creating middlewares: %v", err)
}
d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
d.StoreHosts(hosts)
// validate after NewDaemon has restored enabled plugins. Don't change order.
if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil {
return errors.Wrap(err, "failed to validate authorization plugin")
}
cli.d = d
if err := startMetricsServer(cli.Config.MetricsAddress); err != nil {
return errors.Wrap(err, "failed to start metrics server")
}
c, err := createAndStartCluster(cli, d)
if err != nil {
logrus.Fatalf("Error starting cluster component: %v", err)
}
// Restart all autostart containers which has a swarm endpoint
// and is not yet running now that we have successfully
// initialized the cluster.
d.RestartSwarmContainers()
logrus.Info("Daemon has completed initialization")
routerOptions, err := newRouterOptions(cli.Config, d)
if err != nil {
return err
}
routerOptions.api = cli.api
routerOptions.cluster = c
initRouter(routerOptions)
go d.ProcessClusterNotifications(ctx, c.GetWatchStream())
cli.setupConfigReloadTrap()
// The serve API routine never exits unless an error occurs
// We need to start it as a goroutine and wait on it so
// daemon doesn't exit
serveAPIWait := make(chan error)
go cli.api.Wait(serveAPIWait)
// after the daemon is done setting up we can notify systemd api
notifyReady()
// Daemon is fully initialized and handling API traffic
// Wait for serve API to complete
errAPI := <-serveAPIWait
c.Cleanup()
// notify systemd that we're shutting down
notifyStopping()
shutdownDaemon(d)
// Stop notification processing and any background processes
cancel()
if errAPI != nil {
return errors.Wrap(errAPI, "shutting down due to ServeAPI error")
}
logrus.Info("Daemon shutdown complete")
return nil
}
type routerOptions struct {
sessionManager *session.Manager
buildBackend *buildbackend.Backend
features *map[string]bool
buildkit *buildkit.Builder
daemon *daemon.Daemon
api *apiserver.Server
cluster *cluster.Cluster
}
func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, error) {
opts := routerOptions{}
sm, err := session.NewManager()
if err != nil {
return opts, errors.Wrap(err, "failed to create sessionmanager")
}
manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping())
if err != nil {
return opts, err
}
cgroupParent := newCgroupParent(config)
bk, err := buildkit.New(buildkit.Opt{
SessionManager: sm,
Root: filepath.Join(config.Root, "buildkit"),
Dist: d.DistributionServices(),
NetworkController: d.NetworkController(),
DefaultCgroupParent: cgroupParent,
RegistryHosts: d.RegistryHosts(),
BuilderConfig: config.Builder,
Rootless: d.Rootless(),
IdentityMapping: d.IdentityMapping(),
DNSConfig: config.DNSConfig,
ApparmorProfile: daemon.DefaultApparmorProfile(),
})
if err != nil {
return opts, err
}
bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService)
if err != nil {
return opts, errors.Wrap(err, "failed to create buildmanager")
}
return routerOptions{
sessionManager: sm,
buildBackend: bb,
buildkit: bk,
features: d.Features(),
daemon: d,
}, nil
}
func (cli *DaemonCli) reloadConfig() {
reload := func(c *config.Config) {
// Revalidate and reload the authorization plugins
if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil {
logrus.Fatalf("Error validating authorization plugin: %v", err)
return
}
cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins)
if err := cli.d.Reload(c); err != nil {
logrus.Errorf("Error reconfiguring the daemon: %v", err)
return
}
if c.IsValueSet("debug") {
debugEnabled := debug.IsEnabled()
switch {
case debugEnabled && !c.Debug: // disable debug
debug.Disable()
case c.Debug && !debugEnabled: // enable debug
debug.Enable()
}
}
}
if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil {
logrus.Error(err)
}
}
func (cli *DaemonCli) stop() {
cli.api.Close()
}
// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case
// d.Shutdown() is waiting too long to kill container or worst it's
// blocked there
func shutdownDaemon(d *daemon.Daemon) {
shutdownTimeout := d.ShutdownTimeout()
ch := make(chan struct{})
go func() {
d.Shutdown()
close(ch)
}()
if shutdownTimeout < 0 {
<-ch
logrus.Debug("Clean shutdown succeeded")
return
}
timeout := time.NewTimer(time.Duration(shutdownTimeout) * time.Second)
defer timeout.Stop()
select {
case <-ch:
logrus.Debug("Clean shutdown succeeded")
case <-timeout.C:
logrus.Error("Force shutdown daemon")
}
}
func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
conf := opts.daemonConfig
flags := opts.flags
conf.Debug = opts.Debug
conf.Hosts = opts.Hosts
conf.LogLevel = opts.LogLevel
if opts.flags.Changed(FlagTLS) {
conf.TLS = &opts.TLS
}
if opts.flags.Changed(FlagTLSVerify) {
conf.TLSVerify = &opts.TLSVerify
v := true
conf.TLS = &v
}
conf.CommonTLSOptions = config.CommonTLSOptions{}
if opts.TLSOptions != nil {
conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile
conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile
conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile
}
if conf.TrustKeyPath == "" {
daemonConfDir, err := getDaemonConfDir(conf.Root)
if err != nil {
return nil, err
}
conf.TrustKeyPath = filepath.Join(daemonConfDir, defaultTrustKeyFile)
}
if flags.Changed("graph") && flags.Changed("data-root") {
return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`)
}
if opts.configFile != "" {
c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return nil, errors.Wrapf(err, "unable to configure the Docker daemon with file %s", opts.configFile)
}
}
// the merged configuration can be nil if the config file didn't exist.
// leave the current configuration as it is if when that happens.
if c != nil {
conf = c
}
}
if err := config.Validate(conf); err != nil {
return nil, err
}
if flags.Changed("graph") {
logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`)
}
// Check if duplicate label-keys with different values are found
newLabels, err := config.GetConflictFreeLabels(conf.Labels)
if err != nil {
return nil, err
}
conf.Labels = newLabels
// Regardless of whether the user sets it to true or false, if they
// specify TLSVerify at all then we need to turn on TLS
if conf.IsValueSet(FlagTLSVerify) {
v := true
conf.TLS = &v
}
if conf.TLSVerify == nil && conf.TLS != nil {
conf.TLSVerify = conf.TLS
}
return conf, nil
}
func warnOnDeprecatedConfigOptions(config *config.Config) {
if config.ClusterAdvertise != "" {
logrus.Warn(`The "cluster-advertise" option is deprecated. To be removed soon.`)
}
if config.ClusterStore != "" {
logrus.Warn(`The "cluster-store" option is deprecated. To be removed soon.`)
}
if len(config.ClusterOpts) > 0 {
logrus.Warn(`The "cluster-store-opt" option is deprecated. To be removed soon.`)
}
}
func initRouter(opts routerOptions) {
decoder := runconfig.ContainerDecoder{
GetSysInfo: func() *sysinfo.SysInfo {
return opts.daemon.RawSysInfo(true)
},
}
routers := []router.Router{
// we need to add the checkpoint router before the container router or the DELETE gets masked
checkpointrouter.NewRouter(opts.daemon, decoder),
container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo(true).CgroupUnified),
image.NewRouter(opts.daemon.ImageService()),
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildkit, opts.features),
volume.NewRouter(opts.daemon.VolumesService()),
build.NewRouter(opts.buildBackend, opts.daemon, opts.features),
sessionrouter.NewRouter(opts.sessionManager),
swarmrouter.NewRouter(opts.cluster),
pluginrouter.NewRouter(opts.daemon.PluginManager()),
distributionrouter.NewRouter(opts.daemon.ImageService()),
}
grpcBackends := []grpcrouter.Backend{}
for _, b := range []interface{}{opts.daemon, opts.buildBackend} {
if b, ok := b.(grpcrouter.Backend); ok {
grpcBackends = append(grpcBackends, b)
}
}
if len(grpcBackends) > 0 {
routers = append(routers, grpcrouter.NewRouter(grpcBackends...))
}
if opts.daemon.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(opts.daemon, opts.cluster))
}
if opts.daemon.HasExperimental() {
for _, r := range routers {
for _, route := range r.Routes() {
if experimental, ok := route.(router.ExperimentalRoute); ok {
experimental.Enable()
}
}
}
}
opts.api.InitRouter(routers...)
}
// TODO: remove this from cli and return the authzMiddleware
func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error {
v := cfg.Version
exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental)
s.UseMiddleware(exp)
vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion)
s.UseMiddleware(vm)
if cfg.CorsHeaders != "" {
c := middleware.NewCORSMiddleware(cfg.CorsHeaders)
s.UseMiddleware(c)
}
cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore)
cli.Config.AuthzMiddleware = cli.authzMiddleware
s.UseMiddleware(cli.authzMiddleware)
return nil
}
func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) {
opts, err := cli.getPlatformContainerdDaemonOpts()
if err != nil {
return nil, err
}
if cli.Config.Debug {
opts = append(opts, supervisor.WithLogLevel("debug"))
} else if cli.Config.LogLevel != "" {
opts = append(opts, supervisor.WithLogLevel(cli.Config.LogLevel))
}
if !cli.Config.CriContainerd {
opts = append(opts, supervisor.WithPlugin("cri", nil))
}
return opts, nil
}
func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) {
serverConfig := &apiserver.Config{
Logging: true,
SocketGroup: cli.Config.SocketGroup,
Version: dockerversion.Version,
CorsHeaders: cli.Config.CorsHeaders,
}
if cli.Config.TLS != nil && *cli.Config.TLS {
tlsOptions := tlsconfig.Options{
CAFile: cli.Config.CommonTLSOptions.CAFile,
CertFile: cli.Config.CommonTLSOptions.CertFile,
KeyFile: cli.Config.CommonTLSOptions.KeyFile,
ExclusiveRootPools: true,
}
if cli.Config.TLSVerify == nil || *cli.Config.TLSVerify {
// server requires and verifies client's certificate
tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert
}
tlsConfig, err := tlsconfig.Server(tlsOptions)
if err != nil {
return nil, err
}
serverConfig.TLSConfig = tlsConfig
}
if len(cli.Config.Hosts) == 0 {
cli.Config.Hosts = make([]string, 1)
}
return serverConfig, nil
}
// checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify
// Going forward we do not want to support a scenario where dockerd listens
// on TCP without either TLS client auth (or an explicit opt-in to disable it)
func checkTLSAuthOK(c *config.Config) bool {
if c.TLS == nil {
// Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled
// Or TLS is disabled by default... in any of these cases, we can just take the default value as to how to proceed
return DefaultTLSValue
}
if !*c.TLS {
// TLS is explicitly disabled, which is supported
return true
}
if c.TLSVerify == nil {
// this actually shouldn't happen since we set TLSVerify on the config object anyway
// But in case it does get here, be cautious and assume this is not supported.
return false
}
// Either TLSVerify is explicitly enabled or disabled, both cases are supported
return true
}
func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) {
var hosts []string
seen := make(map[string]struct{}, len(cli.Config.Hosts))
useTLS := DefaultTLSValue
if cli.Config.TLS != nil {
useTLS = *cli.Config.TLS
}
for i := 0; i < len(cli.Config.Hosts); i++ {
var err error
if cli.Config.Hosts[i], err = dopts.ParseHost(useTLS, honorXDG, cli.Config.Hosts[i]); err != nil {
return nil, errors.Wrapf(err, "error parsing -H %s", cli.Config.Hosts[i])
}
if _, ok := seen[cli.Config.Hosts[i]]; ok {
continue
}
seen[cli.Config.Hosts[i]] = struct{}{}
protoAddr := cli.Config.Hosts[i]
protoAddrParts := strings.SplitN(protoAddr, "://", 2)
if len(protoAddrParts) != 2 {
return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr)
}
proto := protoAddrParts[0]
addr := protoAddrParts[1]
// It's a bad idea to bind to TCP without tlsverify.
authEnabled := serverConfig.TLSConfig != nil && serverConfig.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert
if proto == "tcp" && !authEnabled {
logrus.WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.")
logrus.WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!")
time.Sleep(time.Second)
// If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot"
// We do not want to continue to support a default mode where tls verification is disabled, so we do some extra warnings here and eventually remove support
if !checkTLSAuthOK(cli.Config) {
ipAddr, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, errors.Wrap(err, "error parsing tcp address")
}
// shortcut all this extra stuff for literal "localhost"
// -H supports specifying hostnames, since we want to bypass this on loopback interfaces we'll look it up here.
if ipAddr != "localhost" {
ip := net.ParseIP(ipAddr)
if ip == nil {
ipA, err := net.ResolveIPAddr("ip", ipAddr)
if err != nil {
logrus.WithError(err).WithField("host", ipAddr).Error("Error looking up specified host address")
}
if ipA != nil {
ip = ipA.IP
}
}
if ip == nil || !ip.IsLoopback() {
logrus.WithField("host", protoAddr).Warn("Binding to an IP address without --tlsverify is deprecated. Startup is intentionally being slowed down to show this message")
logrus.WithField("host", protoAddr).Warn("Please consider generating tls certificates with client validation to prevent exposing unauthenticated root access to your network")
logrus.WithField("host", protoAddr).Warnf("You can override this by explicitly specifying '--%s=false' or '--%s=false'", FlagTLS, FlagTLSVerify)
logrus.WithField("host", protoAddr).Warnf("Support for listening on TCP without authentication or explicit intent to run without authentication will be removed in the next release")
time.Sleep(15 * time.Second)
}
}
}
}
ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig)
if err != nil {
return nil, err
}
// If we're binding to a TCP port, make sure that a container doesn't try to use it.
if proto == "tcp" {
if err := allocateDaemonPort(addr); err != nil {
return nil, err
}
}
logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr)
hosts = append(hosts, protoAddrParts[1])
cli.api.Accept(addr, ls...)
}
return hosts, nil
}
func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) {
name, _ := os.Hostname()
// Use a buffered channel to pass changes from store watch API to daemon
// A buffer allows store watch API and daemon processing to not wait for each other
watchStream := make(chan *swarmapi.WatchMessage, 32)
c, err := cluster.New(cluster.Config{
Root: cli.Config.Root,
Name: name,
Backend: d,
VolumeBackend: d.VolumesService(),
ImageBackend: d.ImageService(),
PluginBackend: d.PluginManager(),
NetworkSubnetsProvider: d,
DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr,
RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick,
RaftElectionTick: cli.Config.SwarmRaftElectionTick,
RuntimeRoot: cli.getSwarmRunRoot(),
WatchStream: watchStream,
})
if err != nil {
return nil, err
}
d.SetCluster(c)
err = c.Start()
return c, err
}
// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver
// plugins present on the host and available to the daemon
func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error {
for _, reqPlugin := range requestedPlugins {
if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil {
return err
}
}
return nil
}
func systemContainerdRunning(honorXDG bool) (string, bool, error) {
addr := containerddefaults.DefaultAddress
if honorXDG {
runtimeDir, err := homedir.GetRuntimeDir()
if err != nil {
return "", false, err
}
addr = filepath.Join(runtimeDir, "containerd", "containerd.sock")
}
_, err := os.Lstat(addr)
return addr, err == nil, nil
}
// configureDaemonLogs sets the logrus logging level and formatting
func configureDaemonLogs(conf *config.Config) error {
if conf.LogLevel != "" {
lvl, err := logrus.ParseLevel(conf.LogLevel)
if err != nil {
return fmt.Errorf("unable to parse logging level: %s", conf.LogLevel)
}
logrus.SetLevel(lvl)
} else {
logrus.SetLevel(logrus.InfoLevel)
}
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: jsonmessage.RFC3339NanoFixed,
DisableColors: conf.RawLogs,
FullTimestamp: true,
})
return nil
}
| aiordache | ee8f581167d1d36fe05c39d8975d8b9960a49521 | 314759dc2f4745925d8dec6d15acc7761c6e5c92 | Discussing with @tonistiigi @tianon and (for comparison), NGINX prints this on stderr
```
nginx -t > /dev/null
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
```
| thaJeztah | 4,777 |
moby/moby | 42,393 | Daemon config validation | On `dockerd --validate`, return after the daemon config has been loaded and validated.
Continuation of https://github.com/moby/moby/pull/38138, avoids the `os.Exit`.
closes https://github.com/moby/moby/pull/38138
fixes https://github.com/moby/moby/issues/36911
| null | 2021-05-19 09:56:56+00:00 | 2021-06-23 17:32:07+00:00 | daemon/config/config.go | package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
var reader io.Reader
if flags != nil {
var jsonConfig map[string]interface{}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
b = bytes.TrimSpace(b)
if len(b) == 0 {
// empty config file
return &config, nil
}
if flags != nil {
var jsonConfig map[string]interface{}
if err := json.Unmarshal(b, &jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
if err := json.Unmarshal(b, &config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| aiordache | ee8f581167d1d36fe05c39d8975d8b9960a49521 | 314759dc2f4745925d8dec6d15acc7761c6e5c92 | Better to use `io.EOF` here instead of string matching
```suggestion
if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil && err != io.EOF {
``` | thaJeztah | 4,778 |
moby/moby | 42,393 | Daemon config validation | On `dockerd --validate`, return after the daemon config has been loaded and validated.
Continuation of https://github.com/moby/moby/pull/38138, avoids the `os.Exit`.
closes https://github.com/moby/moby/pull/38138
fixes https://github.com/moby/moby/issues/36911
| null | 2021-05-19 09:56:56+00:00 | 2021-06-23 17:32:07+00:00 | daemon/config/config.go | package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
var reader io.Reader
if flags != nil {
var jsonConfig map[string]interface{}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
b = bytes.TrimSpace(b)
if len(b) == 0 {
// empty config file
return &config, nil
}
if flags != nil {
var jsonConfig map[string]interface{}
if err := json.Unmarshal(b, &jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
if err := json.Unmarshal(b, &config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| aiordache | ee8f581167d1d36fe05c39d8975d8b9960a49521 | 314759dc2f4745925d8dec6d15acc7761c6e5c92 | Same here
```suggestion
if err := json.NewDecoder(reader).Decode(&config); err != nil && err != io.EOF {
``` | thaJeztah | 4,779 |
moby/moby | 42,393 | Daemon config validation | On `dockerd --validate`, return after the daemon config has been loaded and validated.
Continuation of https://github.com/moby/moby/pull/38138, avoids the `os.Exit`.
closes https://github.com/moby/moby/pull/38138
fixes https://github.com/moby/moby/issues/36911
| null | 2021-05-19 09:56:56+00:00 | 2021-06-23 17:32:07+00:00 | daemon/config/config.go | package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
var reader io.Reader
if flags != nil {
var jsonConfig map[string]interface{}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
b = bytes.TrimSpace(b)
if len(b) == 0 {
// empty config file
return &config, nil
}
if flags != nil {
var jsonConfig map[string]interface{}
if err := json.Unmarshal(b, &jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
if err := json.Unmarshal(b, &config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| aiordache | ee8f581167d1d36fe05c39d8975d8b9960a49521 | 314759dc2f4745925d8dec6d15acc7761c6e5c92 | what change caused this? Also, this ignores the errors after first json block. As the data is already in byte slice then `Unmarshal` is cleaner. | tonistiigi | 4,780 |
moby/moby | 42,393 | Daemon config validation | On `dockerd --validate`, return after the daemon config has been loaded and validated.
Continuation of https://github.com/moby/moby/pull/38138, avoids the `os.Exit`.
closes https://github.com/moby/moby/pull/38138
fixes https://github.com/moby/moby/issues/36911
| null | 2021-05-19 09:56:56+00:00 | 2021-06-23 17:32:07+00:00 | daemon/config/config.go | package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
var reader io.Reader
if flags != nil {
var jsonConfig map[string]interface{}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
b = bytes.TrimSpace(b)
if len(b) == 0 {
// empty config file
return &config, nil
}
if flags != nil {
var jsonConfig map[string]interface{}
if err := json.Unmarshal(b, &jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
if err := json.Unmarshal(b, &config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| aiordache | ee8f581167d1d36fe05c39d8975d8b9960a49521 | 314759dc2f4745925d8dec6d15acc7761c6e5c92 | I think this was added to allow for an empty `daemon.json` file (empty file, not empty `{}`) | thaJeztah | 4,781 |
moby/moby | 42,393 | Daemon config validation | On `dockerd --validate`, return after the daemon config has been loaded and validated.
Continuation of https://github.com/moby/moby/pull/38138, avoids the `os.Exit`.
closes https://github.com/moby/moby/pull/38138
fixes https://github.com/moby/moby/issues/36911
| null | 2021-05-19 09:56:56+00:00 | 2021-06-23 17:32:07+00:00 | daemon/config/config.go | package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
var reader io.Reader
if flags != nil {
var jsonConfig map[string]interface{}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
b = bytes.TrimSpace(b)
if len(b) == 0 {
// empty config file
return &config, nil
}
if flags != nil {
var jsonConfig map[string]interface{}
if err := json.Unmarshal(b, &jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
if err := json.Unmarshal(b, &config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| aiordache | ee8f581167d1d36fe05c39d8975d8b9960a49521 | 314759dc2f4745925d8dec6d15acc7761c6e5c92 | Not sure if this is a good idea. But if added should be an explicit empty check then. Tbh I'm surprised this returns `io.EOF` and not `ErrUnexpectedEOF`. | tonistiigi | 4,782 |
moby/moby | 42,393 | Daemon config validation | On `dockerd --validate`, return after the daemon config has been loaded and validated.
Continuation of https://github.com/moby/moby/pull/38138, avoids the `os.Exit`.
closes https://github.com/moby/moby/pull/38138
fixes https://github.com/moby/moby/issues/36911
| null | 2021-05-19 09:56:56+00:00 | 2021-06-23 17:32:07+00:00 | daemon/config/config.go | package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
var reader io.Reader
if flags != nil {
var jsonConfig map[string]interface{}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
b = bytes.TrimSpace(b)
if len(b) == 0 {
// empty config file
return &config, nil
}
if flags != nil {
var jsonConfig map[string]interface{}
if err := json.Unmarshal(b, &jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
if err := json.Unmarshal(b, &config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| aiordache | ee8f581167d1d36fe05c39d8975d8b9960a49521 | 314759dc2f4745925d8dec6d15acc7761c6e5c92 | I think it would return `ErrUnexpectedEOF` if the JSON is incomplete (`{`), and `EOF` if the file is empty (or only whitespace);
https://play.golang.org/p/1_N1QidcM7T
```go
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
)
func main() {
var reader io.Reader
var jsonConfig map[string]interface{}
for _, config := range []string{"{", "[", `""`, "", " ", `[]`, `{}`} {
reader = bytes.NewReader([]byte(config))
if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil {
fmt.Println(err)
continue
}
fmt.Println(config, "is ok")
}
}
```
Prints:
```
unexpected EOF
unexpected EOF
json: cannot unmarshal string into Go value of type map[string]interface {}
EOF
EOF
json: cannot unmarshal array into Go value of type map[string]interface {}
{} is ok
```
> Not sure if this is a good idea
I thought slightly relaxing would be ok; we already did allow an empty `{}`; considering an empty file to be the equivalent (config with nothing set).
Do you have specific concerns? | thaJeztah | 4,783 |
moby/moby | 42,393 | Daemon config validation | On `dockerd --validate`, return after the daemon config has been loaded and validated.
Continuation of https://github.com/moby/moby/pull/38138, avoids the `os.Exit`.
closes https://github.com/moby/moby/pull/38138
fixes https://github.com/moby/moby/issues/36911
| null | 2021-05-19 09:56:56+00:00 | 2021-06-23 17:32:07+00:00 | daemon/config/config.go | package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
var reader io.Reader
if flags != nil {
var jsonConfig map[string]interface{}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
b = bytes.TrimSpace(b)
if len(b) == 0 {
// empty config file
return &config, nil
}
if flags != nil {
var jsonConfig map[string]interface{}
if err := json.Unmarshal(b, &jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
if err := json.Unmarshal(b, &config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| aiordache | ee8f581167d1d36fe05c39d8975d8b9960a49521 | 314759dc2f4745925d8dec6d15acc7761c6e5c92 | Yeah, idk why empty and spaces are considered as valid json for `Decode`. `JSON.parse` in js in unexpectedeof for both cases.
I'm not strictly against allowing empty file, if detection for it is clear in code. It might lead to error going unnoticed though. If Desktop fills this file from a field I do think correct behavior would be to not create a file if the field is empty, not to create an empty file. | tonistiigi | 4,784 |
moby/moby | 42,393 | Daemon config validation | On `dockerd --validate`, return after the daemon config has been loaded and validated.
Continuation of https://github.com/moby/moby/pull/38138, avoids the `os.Exit`.
closes https://github.com/moby/moby/pull/38138
fixes https://github.com/moby/moby/issues/36911
| null | 2021-05-19 09:56:56+00:00 | 2021-06-23 17:32:07+00:00 | daemon/config/config.go | package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
var reader io.Reader
if flags != nil {
var jsonConfig map[string]interface{}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
reader = bytes.NewReader(b)
if err := json.NewDecoder(reader).Decode(&config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| package config // import "github.com/docker/docker/daemon/config"
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"strings"
"sync"
daemondiscovery "github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/authorization"
"github.com/docker/docker/pkg/discovery"
"github.com/docker/docker/registry"
"github.com/imdario/mergo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
const (
// DefaultMaxConcurrentDownloads is the default value for
// maximum number of downloads that
// may take place at a time for each pull.
DefaultMaxConcurrentDownloads = 3
// DefaultMaxConcurrentUploads is the default value for
// maximum number of uploads that
// may take place at a time for each push.
DefaultMaxConcurrentUploads = 5
// DefaultDownloadAttempts is the default value for
// maximum number of attempts that
// may take place at a time for each pull when the connection is lost.
DefaultDownloadAttempts = 5
// DefaultShmSize is the default value for container's shm size
DefaultShmSize = int64(67108864)
// DefaultNetworkMtu is the default value for network MTU
DefaultNetworkMtu = 1500
// DisableNetworkBridge is the default value of the option to disable network bridge
DisableNetworkBridge = "none"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "runc"
// StockRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
StockRuntimeName = "runc"
// LinuxV1RuntimeName is the runtime used to specify the containerd v1 shim with the runc binary
// Note this is different than io.containerd.runc.v1 which would be the v1 shim using the v2 shim API.
// This is specifically for the v1 shim using the v1 shim API.
LinuxV1RuntimeName = "io.containerd.runtime.v1.linux"
// LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim
LinuxV2RuntimeName = "io.containerd.runc.v2"
)
var builtinRuntimes = map[string]bool{
StockRuntimeName: true,
LinuxV1RuntimeName: true,
LinuxV2RuntimeName: true,
}
// flatOptions contains configuration keys
// that MUST NOT be parsed as deep structures.
// Use this to differentiate these options
// with others like the ones in CommonTLSOptions.
var flatOptions = map[string]bool{
"cluster-store-opts": true,
"log-opts": true,
"runtimes": true,
"default-ulimits": true,
"features": true,
"builder": true,
}
// skipValidateOptions contains configuration keys
// that will be skipped from findConfigurationConflicts
// for unknown flag validation.
var skipValidateOptions = map[string]bool{
"features": true,
"builder": true,
// Corresponding flag has been removed because it was already unusable
"deprecated-key-path": true,
}
// skipDuplicates contains configuration keys that
// will be skipped when checking duplicated
// configuration field defined in both daemon
// config file and from dockerd cli flags.
// This allows some configurations to be merged
// during the parsing.
var skipDuplicates = map[string]bool{
"runtimes": true,
}
// LogConfig represents the default log configuration.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type LogConfig struct {
Type string `json:"log-driver,omitempty"`
Config map[string]string `json:"log-opts,omitempty"`
}
// commonBridgeConfig stores all the platform-common bridge driver specific
// configuration.
type commonBridgeConfig struct {
Iface string `json:"bridge,omitempty"`
FixedCIDR string `json:"fixed-cidr,omitempty"`
}
// NetworkConfig stores the daemon-wide networking configurations
type NetworkConfig struct {
// Default address pools for docker networks
DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"`
// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
}
// CommonTLSOptions defines TLS configuration for the daemon server.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonTLSOptions struct {
CAFile string `json:"tlscacert,omitempty"`
CertFile string `json:"tlscert,omitempty"`
KeyFile string `json:"tlskey,omitempty"`
}
// DNSConfig defines the DNS configurations.
type DNSConfig struct {
DNS []string `json:"dns,omitempty"`
DNSOptions []string `json:"dns-opts,omitempty"`
DNSSearch []string `json:"dns-search,omitempty"`
HostGatewayIP net.IP `json:"host-gateway-ip,omitempty"`
}
// CommonConfig defines the configuration of a docker daemon which is
// common across platforms.
// It includes json tags to deserialize configuration from a file
// using the same names that the flags in the command line use.
type CommonConfig struct {
AuthzMiddleware *authorization.Middleware `json:"-"`
AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins
AutoRestart bool `json:"-"`
Context map[string][]string `json:"-"`
DisableBridge bool `json:"-"`
ExecOptions []string `json:"exec-opts,omitempty"`
GraphDriver string `json:"storage-driver,omitempty"`
GraphOptions []string `json:"storage-opts,omitempty"`
Labels []string `json:"labels,omitempty"`
Mtu int `json:"mtu,omitempty"`
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
Pidfile string `json:"pidfile,omitempty"`
RawLogs bool `json:"raw-logs,omitempty"`
RootDeprecated string `json:"graph,omitempty"`
Root string `json:"data-root,omitempty"`
ExecRoot string `json:"exec-root,omitempty"`
SocketGroup string `json:"group,omitempty"`
CorsHeaders string `json:"api-cors-header,omitempty"`
// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
// when pushing to a registry which does not support schema 2. This field is marked as
// deprecated because schema 1 manifests are deprecated in favor of schema 2 and the
// daemon ID will use a dedicated identifier not shared with exported signatures.
TrustKeyPath string `json:"deprecated-key-path,omitempty"`
// LiveRestoreEnabled determines whether we should keep containers
// alive upon daemon shutdown/start
LiveRestoreEnabled bool `json:"live-restore,omitempty"`
// ClusterStore is the storage backend used for the cluster information. It is used by both
// multihost networking (to store networks and endpoints information) and by the node discovery
// mechanism.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterStore string `json:"cluster-store,omitempty"`
// ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such
// as TLS configuration settings.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"`
// ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node
// discovery. This should be a 'host:port' combination on which that daemon instance is
// reachable by other hosts.
// Deprecated: host-discovery and overlay networks with external k/v stores are deprecated
ClusterAdvertise string `json:"cluster-advertise,omitempty"`
// MaxConcurrentDownloads is the maximum number of downloads that
// may take place at a time for each pull.
MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"`
// MaxConcurrentUploads is the maximum number of uploads that
// may take place at a time for each push.
MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"`
// MaxDownloadAttempts is the maximum number of attempts that
// may take place at a time for each push.
MaxDownloadAttempts *int `json:"max-download-attempts,omitempty"`
// ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container
// to stop when daemon is being shutdown
ShutdownTimeout int `json:"shutdown-timeout,omitempty"`
Debug bool `json:"debug,omitempty"`
Hosts []string `json:"hosts,omitempty"`
LogLevel string `json:"log-level,omitempty"`
TLS *bool `json:"tls,omitempty"`
TLSVerify *bool `json:"tlsverify,omitempty"`
// Embedded structs that allow config
// deserialization without the full struct.
CommonTLSOptions
// SwarmDefaultAdvertiseAddr is the default host/IP or network interface
// to use if a wildcard address is specified in the ListenAddr value
// given to the /swarm/init endpoint and no advertise address is
// specified.
SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"`
// SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat
// Typical value is 1
SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"`
// SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose
// a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick.
// Higher values can make the quorum less sensitive to transient faults in the environment, but this also
// means it takes longer for the managers to detect a down leader.
SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"`
MetricsAddress string `json:"metrics-addr"`
DNSConfig
LogConfig
BridgeConfig // bridgeConfig holds bridge network specific configuration.
NetworkConfig
registry.ServiceOptions
sync.Mutex
// FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags
// It should probably be handled outside this package.
ValuesSet map[string]interface{} `json:"-"`
Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
// Exposed node Generic Resources
// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
NodeGenericResources []string `json:"node-generic-resources,omitempty"`
// ContainerAddr is the address used to connect to containerd if we're
// not starting it ourselves
ContainerdAddr string `json:"containerd,omitempty"`
// CriContainerd determines whether a supervised containerd instance
// should be configured with the CRI plugin enabled. This allows using
// Docker's containerd instance directly with a Kubernetes kubelet.
CriContainerd bool `json:"cri-containerd,omitempty"`
// Features contains a list of feature key value pairs indicating what features are enabled or disabled.
// If a certain feature doesn't appear in this list then it's unset (i.e. neither true nor false).
Features map[string]bool `json:"features,omitempty"`
Builder BuilderConfig `json:"builder,omitempty"`
ContainerdNamespace string `json:"containerd-namespace,omitempty"`
ContainerdPluginNamespace string `json:"containerd-plugin-namespace,omitempty"`
}
// IsValueSet returns true if a configuration value
// was explicitly set in the configuration file.
func (conf *Config) IsValueSet(name string) bool {
if conf.ValuesSet == nil {
return false
}
_, ok := conf.ValuesSet[name]
return ok
}
// New returns a new fully initialized Config struct
func New() *Config {
config := Config{}
config.LogConfig.Config = make(map[string]string)
config.ClusterOpts = make(map[string]string)
return &config
}
// ParseClusterAdvertiseSettings parses the specified advertise settings
func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
if clusterAdvertise == "" {
return "", daemondiscovery.ErrDiscoveryDisabled
}
if clusterStore == "" {
return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration")
}
advertise, err := discovery.ParseAdvertise(clusterAdvertise)
if err != nil {
return "", errors.Wrap(err, "discovery advertise parsing failed")
}
return advertise, nil
}
// GetConflictFreeLabels validates Labels for conflict
// In swarm the duplicates for labels are removed
// so we only take same values here, no conflict values
// If the key-value is the same we will only take the last label
func GetConflictFreeLabels(labels []string) ([]string, error) {
labelMap := map[string]string{}
for _, label := range labels {
stringSlice := strings.SplitN(label, "=", 2)
if len(stringSlice) > 1 {
// If there is a conflict we will return an error
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
}
labelMap[stringSlice[0]] = stringSlice[1]
}
}
newLabels := []string{}
for k, v := range labelMap {
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
}
return newLabels, nil
}
// Reload reads the configuration in the host and reloads the daemon and server.
func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error {
logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
newConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
if flags.Changed("config-file") || !os.IsNotExist(err) {
return errors.Wrapf(err, "unable to configure the Docker daemon with file %s", configFile)
}
newConfig = New()
}
if err := Validate(newConfig); err != nil {
return errors.Wrap(err, "file configuration validation failed")
}
// Check if duplicate label-keys with different values are found
newLabels, err := GetConflictFreeLabels(newConfig.Labels)
if err != nil {
return err
}
newConfig.Labels = newLabels
reload(newConfig)
return nil
}
// boolValue is an interface that boolean value flags implement
// to tell the command line how to make -name equivalent to -name=true.
type boolValue interface {
IsBoolFlag() bool
}
// MergeDaemonConfigurations reads a configuration file,
// loads the file configuration in an isolated structure,
// and merges the configuration provided from flags on top
// if there are no conflicts.
func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) {
fileConfig, err := getConflictFreeConfiguration(configFile, flags)
if err != nil {
return nil, err
}
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "configuration validation from file failed")
}
// merge flags configuration on top of the file configuration
if err := mergo.Merge(fileConfig, flagsConfig); err != nil {
return nil, err
}
// We need to validate again once both fileConfig and flagsConfig
// have been merged
if err := Validate(fileConfig); err != nil {
return nil, errors.Wrap(err, "merged configuration validation from file and command line flags failed")
}
return fileConfig, nil
}
// getConflictFreeConfiguration loads the configuration from a JSON file.
// It compares that configuration with the one provided by the flags,
// and returns an error if there are conflicts.
func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) {
b, err := ioutil.ReadFile(configFile)
if err != nil {
return nil, err
}
var config Config
b = bytes.TrimSpace(b)
if len(b) == 0 {
// empty config file
return &config, nil
}
if flags != nil {
var jsonConfig map[string]interface{}
if err := json.Unmarshal(b, &jsonConfig); err != nil {
return nil, err
}
configSet := configValuesSet(jsonConfig)
if err := findConfigurationConflicts(configSet, flags); err != nil {
return nil, err
}
// Override flag values to make sure the values set in the config file with nullable values, like `false`,
// are not overridden by default truthy values from the flags that were not explicitly set.
// See https://github.com/docker/docker/issues/20289 for an example.
//
// TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers.
namedOptions := make(map[string]interface{})
for key, value := range configSet {
f := flags.Lookup(key)
if f == nil { // ignore named flags that don't match
namedOptions[key] = value
continue
}
if _, ok := f.Value.(boolValue); ok {
f.Value.Set(fmt.Sprintf("%v", value))
}
}
if len(namedOptions) > 0 {
// set also default for mergeVal flags that are boolValue at the same time.
flags.VisitAll(func(f *pflag.Flag) {
if opt, named := f.Value.(opts.NamedOption); named {
v, set := namedOptions[opt.Name()]
_, boolean := f.Value.(boolValue)
if set && boolean {
f.Value.Set(fmt.Sprintf("%v", v))
}
}
})
}
config.ValuesSet = configSet
}
if err := json.Unmarshal(b, &config); err != nil {
return nil, err
}
if config.RootDeprecated != "" {
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
if config.Root != "" {
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
}
config.Root = config.RootDeprecated
}
return &config, nil
}
// configValuesSet returns the configuration values explicitly set in the file.
func configValuesSet(config map[string]interface{}) map[string]interface{} {
flatten := make(map[string]interface{})
for k, v := range config {
if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] {
for km, vm := range m {
flatten[km] = vm
}
continue
}
flatten[k] = v
}
return flatten
}
// findConfigurationConflicts iterates over the provided flags searching for
// duplicated configurations and unknown keys. It returns an error with all the conflicts if
// it finds any.
func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error {
// 1. Search keys from the file that we don't recognize as flags.
unknownKeys := make(map[string]interface{})
for key, value := range config {
if flag := flags.Lookup(key); flag == nil && !skipValidateOptions[key] {
unknownKeys[key] = value
}
}
// 2. Discard values that implement NamedOption.
// Their configuration name differs from their flag name, like `labels` and `label`.
if len(unknownKeys) > 0 {
unknownNamedConflicts := func(f *pflag.Flag) {
if namedOption, ok := f.Value.(opts.NamedOption); ok {
delete(unknownKeys, namedOption.Name())
}
}
flags.VisitAll(unknownNamedConflicts)
}
if len(unknownKeys) > 0 {
var unknown []string
for key := range unknownKeys {
unknown = append(unknown, key)
}
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
}
var conflicts []string
printConflict := func(name string, flagValue, fileValue interface{}) string {
return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue)
}
// 3. Search keys that are present as a flag and as a file option.
duplicatedConflicts := func(f *pflag.Flag) {
// search option name in the json configuration payload if the value is a named option
if namedOption, ok := f.Value.(opts.NamedOption); ok {
if optsValue, ok := config[namedOption.Name()]; ok && !skipDuplicates[namedOption.Name()] {
conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue))
}
} else {
// search flag name in the json configuration payload
for _, name := range []string{f.Name, f.Shorthand} {
if value, ok := config[name]; ok && !skipDuplicates[name] {
conflicts = append(conflicts, printConflict(name, f.Value.String(), value))
break
}
}
}
}
flags.Visit(duplicatedConflicts)
if len(conflicts) > 0 {
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
}
return nil
}
// Validate validates some specific configs.
// such as config.DNS, config.Labels, config.DNSSearch,
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
func Validate(config *Config) error {
// validate DNS
for _, dns := range config.DNS {
if _, err := opts.ValidateIPAddress(dns); err != nil {
return err
}
}
// validate DNSSearch
for _, dnsSearch := range config.DNSSearch {
if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil {
return err
}
}
// validate Labels
for _, label := range config.Labels {
if _, err := opts.ValidateLabel(label); err != nil {
return err
}
}
// validate MaxConcurrentDownloads
if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 {
return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads)
}
// validate MaxConcurrentUploads
if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 {
return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads)
}
if err := ValidateMaxDownloadAttempts(config); err != nil {
return err
}
// validate that "default" runtime is not reset
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
if _, ok := runtimes[StockRuntimeName]; ok {
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
}
}
if _, err := ParseGenericResources(config.NodeGenericResources); err != nil {
return err
}
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
if !builtinRuntimes[defaultRuntime] {
runtimes := config.GetAllRuntimes()
if _, ok := runtimes[defaultRuntime]; !ok {
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
}
}
}
// validate platform-specific settings
return config.ValidatePlatformConfig()
}
// ValidateMaxDownloadAttempts validates if the max-download-attempts is within the valid range
func ValidateMaxDownloadAttempts(config *Config) error {
if config.MaxDownloadAttempts != nil && *config.MaxDownloadAttempts <= 0 {
return fmt.Errorf("invalid max download attempts: %d", *config.MaxDownloadAttempts)
}
return nil
}
// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not.
func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool {
if config.ClusterStore != backendType || config.ClusterAdvertise != advertise {
return true
}
if (config.ClusterOpts == nil && clusterOpts == nil) ||
(config.ClusterOpts == nil && len(clusterOpts) == 0) ||
(len(config.ClusterOpts) == 0 && clusterOpts == nil) {
return false
}
return !reflect.DeepEqual(config.ClusterOpts, clusterOpts)
}
| aiordache | ee8f581167d1d36fe05c39d8975d8b9960a49521 | 314759dc2f4745925d8dec6d15acc7761c6e5c92 | I'm inclined to do the reverse here; not convert "empty" to `{}`, but check if it's empty, and if so, return early:
```suggestion
var config Config
b = bytes.TrimSpace(b)
if len(b) == 0 {
// empty config file
return &config, nil
}
```
Looks like all the code below should only be for loading the JSON file, and for validating if there's conflicts, so we wouldn't have to run that code afaics (yeah, this code and logic needs a rewrite, as it's quite messy) | thaJeztah | 4,785 |
moby/moby | 42,393 | Daemon config validation | On `dockerd --validate`, return after the daemon config has been loaded and validated.
Continuation of https://github.com/moby/moby/pull/38138, avoids the `os.Exit`.
closes https://github.com/moby/moby/pull/38138
fixes https://github.com/moby/moby/issues/36911
| null | 2021-05-19 09:56:56+00:00 | 2021-06-23 17:32:07+00:00 | testutil/daemon/daemon.go | package daemon // import "github.com/docker/docker/testutil/daemon"
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"os/user"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/client"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/testutil/request"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
"gotest.tools/v3/assert"
)
// LogT is the subset of the testing.TB interface used by the daemon.
type LogT interface {
Logf(string, ...interface{})
}
// nopLog is a no-op implementation of LogT that is used in daemons created by
// NewDaemon (where no testing.TB is available).
type nopLog struct{}
func (nopLog) Logf(string, ...interface{}) {}
const (
defaultDockerdBinary = "dockerd"
defaultContainerdSocket = "/var/run/docker/containerd/containerd.sock"
defaultDockerdRootlessBinary = "dockerd-rootless.sh"
)
var errDaemonNotStarted = errors.New("daemon not started")
// SockRoot holds the path of the default docker integration daemon socket
var SockRoot = filepath.Join(os.TempDir(), "docker-integration")
type clientConfig struct {
transport *http.Transport
scheme string
addr string
}
// Daemon represents a Docker daemon for the testing framework
type Daemon struct {
Root string
Folder string
Wait chan error
UseDefaultHost bool
UseDefaultTLSHost bool
id string
logFile *os.File
cmd *exec.Cmd
storageDriver string
userlandProxy bool
defaultCgroupNamespaceMode string
execRoot string
experimental bool
init bool
dockerdBinary string
log LogT
pidFile string
args []string
containerdSocket string
rootlessUser *user.User
rootlessXDGRuntimeDir string
// swarm related field
swarmListenAddr string
SwarmPort int // FIXME(vdemeester) should probably not be exported
DefaultAddrPool []string
SubnetSize uint32
DataPathPort uint32
OOMScoreAdjust int
// cached information
CachedInfo types.Info
}
// NewDaemon returns a Daemon instance to be used for testing.
// The daemon will not automatically start.
// The daemon will modify and create files under workingDir.
func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) {
storageDriver := os.Getenv("DOCKER_GRAPHDRIVER")
if err := os.MkdirAll(SockRoot, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to create daemon socket root %q", SockRoot)
}
id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID()))
dir := filepath.Join(workingDir, id)
daemonFolder, err := filepath.Abs(dir)
if err != nil {
return nil, err
}
daemonRoot := filepath.Join(daemonFolder, "root")
if err := os.MkdirAll(daemonRoot, 0755); err != nil {
return nil, errors.Wrapf(err, "failed to create daemon root %q", daemonRoot)
}
userlandProxy := true
if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
if val, err := strconv.ParseBool(env); err != nil {
userlandProxy = val
}
}
d := &Daemon{
id: id,
Folder: daemonFolder,
Root: daemonRoot,
storageDriver: storageDriver,
userlandProxy: userlandProxy,
// dxr stands for docker-execroot (shortened for avoiding unix(7) path length limitation)
execRoot: filepath.Join(os.TempDir(), "dxr", id),
dockerdBinary: defaultDockerdBinary,
swarmListenAddr: defaultSwarmListenAddr,
SwarmPort: DefaultSwarmPort,
log: nopLog{},
containerdSocket: defaultContainerdSocket,
}
for _, op := range ops {
op(d)
}
if d.rootlessUser != nil {
if err := os.Chmod(SockRoot, 0777); err != nil {
return nil, err
}
uid, err := strconv.Atoi(d.rootlessUser.Uid)
if err != nil {
return nil, err
}
gid, err := strconv.Atoi(d.rootlessUser.Gid)
if err != nil {
return nil, err
}
if err := os.Chown(d.Folder, uid, gid); err != nil {
return nil, err
}
if err := os.Chown(d.Root, uid, gid); err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Dir(d.execRoot), 0700); err != nil {
return nil, err
}
if err := os.Chown(filepath.Dir(d.execRoot), uid, gid); err != nil {
return nil, err
}
if err := os.MkdirAll(d.execRoot, 0700); err != nil {
return nil, err
}
if err := os.Chown(d.execRoot, uid, gid); err != nil {
return nil, err
}
d.rootlessXDGRuntimeDir = filepath.Join(d.Folder, "xdgrun")
if err := os.MkdirAll(d.rootlessXDGRuntimeDir, 0700); err != nil {
return nil, err
}
if err := os.Chown(d.rootlessXDGRuntimeDir, uid, gid); err != nil {
return nil, err
}
d.containerdSocket = ""
}
return d, nil
}
// New returns a Daemon instance to be used for testing.
// This will create a directory such as d123456789 in the folder specified by
// $DOCKER_INTEGRATION_DAEMON_DEST or $DEST.
// The daemon will not automatically start.
func New(t testing.TB, ops ...Option) *Daemon {
t.Helper()
dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
if dest == "" {
dest = os.Getenv("DEST")
}
dest = filepath.Join(dest, t.Name())
assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable")
if os.Getenv("DOCKER_ROOTLESS") != "" {
if os.Getenv("DOCKER_REMAP_ROOT") != "" {
t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_REMAP_ROOT currently")
}
if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
if val, err := strconv.ParseBool(env); err == nil && !val {
t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_USERLANDPROXY=false")
}
}
ops = append(ops, WithRootlessUser("unprivilegeduser"))
}
ops = append(ops, WithOOMScoreAdjust(-500))
d, err := NewDaemon(dest, ops...)
assert.NilError(t, err, "could not create daemon at %q", dest)
if d.rootlessUser != nil && d.dockerdBinary != defaultDockerdBinary {
t.Skipf("DOCKER_ROOTLESS doesn't support specifying non-default dockerd binary path %q", d.dockerdBinary)
}
return d
}
// ContainersNamespace returns the containerd namespace used for containers.
func (d *Daemon) ContainersNamespace() string {
return d.id
}
// RootDir returns the root directory of the daemon.
func (d *Daemon) RootDir() string {
return d.Root
}
// ID returns the generated id of the daemon
func (d *Daemon) ID() string {
return d.id
}
// StorageDriver returns the configured storage driver of the daemon
func (d *Daemon) StorageDriver() string {
return d.storageDriver
}
// Sock returns the socket path of the daemon
func (d *Daemon) Sock() string {
return fmt.Sprintf("unix://" + d.sockPath())
}
func (d *Daemon) sockPath() string {
return filepath.Join(SockRoot, d.id+".sock")
}
// LogFileName returns the path the daemon's log file
func (d *Daemon) LogFileName() string {
return d.logFile.Name()
}
// ReadLogFile returns the content of the daemon log file
func (d *Daemon) ReadLogFile() ([]byte, error) {
return ioutil.ReadFile(d.logFile.Name())
}
// NewClientT creates new client based on daemon's socket path
func (d *Daemon) NewClientT(t testing.TB, extraOpts ...client.Opt) *client.Client {
t.Helper()
c, err := d.NewClient(extraOpts...)
assert.NilError(t, err, "[%s] could not create daemon client", d.id)
return c
}
// NewClient creates new client based on daemon's socket path
func (d *Daemon) NewClient(extraOpts ...client.Opt) (*client.Client, error) {
clientOpts := []client.Opt{
client.FromEnv,
client.WithHost(d.Sock()),
}
clientOpts = append(clientOpts, extraOpts...)
return client.NewClientWithOpts(clientOpts...)
}
// Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files
func (d *Daemon) Cleanup(t testing.TB) {
t.Helper()
cleanupMount(t, d)
cleanupRaftDir(t, d)
cleanupNetworkNamespace(t, d)
}
// Start starts the daemon and return once it is ready to receive requests.
func (d *Daemon) Start(t testing.TB, args ...string) {
t.Helper()
if err := d.StartWithError(args...); err != nil {
d.DumpStackAndQuit() // in case the daemon is stuck
t.Fatalf("[%s] failed to start daemon with arguments %v : %v", d.id, d.args, err)
}
}
// StartWithError starts the daemon and return once it is ready to receive requests.
// It returns an error in case it couldn't start.
func (d *Daemon) StartWithError(args ...string) error {
logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)
if err != nil {
return errors.Wrapf(err, "[%s] failed to create logfile", d.id)
}
return d.StartWithLogFile(logFile, args...)
}
// StartWithLogFile will start the daemon and attach its streams to a given file.
func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
d.handleUserns()
dockerdBinary, err := exec.LookPath(d.dockerdBinary)
if err != nil {
return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id)
}
if d.pidFile == "" {
d.pidFile = filepath.Join(d.Folder, "docker.pid")
}
d.args = []string{}
if d.rootlessUser != nil {
if d.dockerdBinary != defaultDockerdBinary {
return errors.Errorf("[%s] DOCKER_ROOTLESS doesn't support non-default dockerd binary path %q", d.id, d.dockerdBinary)
}
dockerdBinary = "sudo"
d.args = append(d.args,
"-u", d.rootlessUser.Username,
"-E", "XDG_RUNTIME_DIR="+d.rootlessXDGRuntimeDir,
"-E", "HOME="+d.rootlessUser.HomeDir,
"-E", "PATH="+os.Getenv("PATH"),
"--",
defaultDockerdRootlessBinary,
)
}
d.args = append(d.args,
"--data-root", d.Root,
"--exec-root", d.execRoot,
"--pidfile", d.pidFile,
fmt.Sprintf("--userland-proxy=%t", d.userlandProxy),
"--containerd-namespace", d.id,
"--containerd-plugins-namespace", d.id+"p",
)
if d.containerdSocket != "" {
d.args = append(d.args, "--containerd", d.containerdSocket)
}
if d.defaultCgroupNamespaceMode != "" {
d.args = append(d.args, "--default-cgroupns-mode", d.defaultCgroupNamespaceMode)
}
if d.experimental {
d.args = append(d.args, "--experimental")
}
if d.init {
d.args = append(d.args, "--init")
}
if !(d.UseDefaultHost || d.UseDefaultTLSHost) {
d.args = append(d.args, "--host", d.Sock())
}
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
d.args = append(d.args, "--userns-remap", root)
}
// If we don't explicitly set the log-level or debug flag(-D) then
// turn on debug mode
foundLog := false
foundSd := false
for _, a := range providedArgs {
if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") {
foundLog = true
}
if strings.Contains(a, "--storage-driver") {
foundSd = true
}
}
if !foundLog {
d.args = append(d.args, "--debug")
}
if d.storageDriver != "" && !foundSd {
d.args = append(d.args, "--storage-driver", d.storageDriver)
}
d.args = append(d.args, providedArgs...)
d.cmd = exec.Command(dockerdBinary, d.args...)
d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1")
d.cmd.Stdout = out
d.cmd.Stderr = out
d.logFile = out
if d.rootlessUser != nil {
// sudo requires this for propagating signals
setsid(d.cmd)
}
if err := d.cmd.Start(); err != nil {
return errors.Wrapf(err, "[%s] could not start daemon container", d.id)
}
wait := make(chan error, 1)
go func() {
ret := d.cmd.Wait()
d.log.Logf("[%s] exiting daemon", d.id)
// If we send before logging, we might accidentally log _after_ the test is done.
// As of Go 1.12, this incurs a panic instead of silently being dropped.
wait <- ret
close(wait)
}()
d.Wait = wait
clientConfig, err := d.getClientConfig()
if err != nil {
return err
}
client := &http.Client{
Transport: clientConfig.transport,
}
req, err := http.NewRequest(http.MethodGet, "/_ping", nil)
if err != nil {
return errors.Wrapf(err, "[%s] could not create new request", d.id)
}
req.URL.Host = clientConfig.addr
req.URL.Scheme = clientConfig.scheme
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
// make sure daemon is ready to receive requests
for i := 0; ; i++ {
d.log.Logf("[%s] waiting for daemon to start", d.id)
select {
case <-ctx.Done():
return errors.Wrapf(ctx.Err(), "[%s] daemon exited and never started", d.id)
case err := <-d.Wait:
return errors.Wrapf(err, "[%s] daemon exited during startup", d.id)
default:
rctx, rcancel := context.WithTimeout(context.TODO(), 2*time.Second)
defer rcancel()
resp, err := client.Do(req.WithContext(rctx))
if err != nil {
if i > 2 { // don't log the first couple, this ends up just being noise
d.log.Logf("[%s] error pinging daemon on start: %v", d.id, err)
}
select {
case <-ctx.Done():
case <-time.After(500 * time.Millisecond):
}
continue
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status)
}
d.log.Logf("[%s] daemon started\n", d.id)
d.Root, err = d.queryRootDir()
if err != nil {
return errors.Wrapf(err, "[%s] error querying daemon for root directory", d.id)
}
return nil
}
}
}
// StartWithBusybox will first start the daemon with Daemon.Start()
// then save the busybox image from the main daemon and load it into this Daemon instance.
func (d *Daemon) StartWithBusybox(t testing.TB, arg ...string) {
t.Helper()
d.Start(t, arg...)
d.LoadBusybox(t)
}
// Kill will send a SIGKILL to the daemon
func (d *Daemon) Kill() error {
if d.cmd == nil || d.Wait == nil {
return errDaemonNotStarted
}
defer func() {
d.logFile.Close()
d.cmd = nil
}()
if err := d.cmd.Process.Kill(); err != nil {
return err
}
if d.pidFile != "" {
_ = os.Remove(d.pidFile)
}
return nil
}
// Pid returns the pid of the daemon
func (d *Daemon) Pid() int {
return d.cmd.Process.Pid
}
// Interrupt stops the daemon by sending it an Interrupt signal
func (d *Daemon) Interrupt() error {
return d.Signal(os.Interrupt)
}
// Signal sends the specified signal to the daemon if running
func (d *Daemon) Signal(signal os.Signal) error {
if d.cmd == nil || d.Wait == nil {
return errDaemonNotStarted
}
return d.cmd.Process.Signal(signal)
}
// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its
// stack to its log file and exit
// This is used primarily for gathering debug information on test timeout
func (d *Daemon) DumpStackAndQuit() {
if d.cmd == nil || d.cmd.Process == nil {
return
}
SignalDaemonDump(d.cmd.Process.Pid)
}
// Stop will send a SIGINT every second and wait for the daemon to stop.
// If it times out, a SIGKILL is sent.
// Stop will not delete the daemon directory. If a purged daemon is needed,
// instantiate a new one with NewDaemon.
// If an error occurs while starting the daemon, the test will fail.
func (d *Daemon) Stop(t testing.TB) {
t.Helper()
err := d.StopWithError()
if err != nil {
if err != errDaemonNotStarted {
t.Fatalf("[%s] error while stopping the daemon: %v", d.id, err)
} else {
t.Logf("[%s] daemon is not started", d.id)
}
}
}
// StopWithError will send a SIGINT every second and wait for the daemon to stop.
// If it timeouts, a SIGKILL is sent.
// Stop will not delete the daemon directory. If a purged daemon is needed,
// instantiate a new one with NewDaemon.
func (d *Daemon) StopWithError() (err error) {
if d.cmd == nil || d.Wait == nil {
return errDaemonNotStarted
}
defer func() {
if err != nil {
d.log.Logf("[%s] error while stopping daemon: %v", d.id, err)
} else {
d.log.Logf("[%s] daemon stopped", d.id)
if d.pidFile != "" {
_ = os.Remove(d.pidFile)
}
}
if err := d.logFile.Close(); err != nil {
d.log.Logf("[%s] failed to close daemon logfile: %v", d.id, err)
}
d.cmd = nil
}()
i := 1
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
tick := ticker.C
d.log.Logf("[%s] stopping daemon", d.id)
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
if strings.Contains(err.Error(), "os: process already finished") {
return errDaemonNotStarted
}
return errors.Wrapf(err, "[%s] could not send signal", d.id)
}
out1:
for {
select {
case err := <-d.Wait:
return err
case <-time.After(20 * time.Second):
// time for stopping jobs and run onShutdown hooks
d.log.Logf("[%s] daemon stop timed out after 20 seconds", d.id)
break out1
}
}
out2:
for {
select {
case err := <-d.Wait:
return err
case <-tick:
i++
if i > 5 {
d.log.Logf("[%s] tried to interrupt daemon for %d times, now try to kill it", d.id, i)
break out2
}
d.log.Logf("[%d] attempt #%d/5: daemon is still running with pid %d", i, d.cmd.Process.Pid)
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
return errors.Wrapf(err, "[%s] attempt #%d/5 could not send signal", d.id, i)
}
}
}
if err := d.cmd.Process.Kill(); err != nil {
d.log.Logf("[%s] failed to kill daemon: %v", d.id, err)
return err
}
return nil
}
// Restart will restart the daemon by first stopping it and the starting it.
// If an error occurs while starting the daemon, the test will fail.
func (d *Daemon) Restart(t testing.TB, args ...string) {
t.Helper()
d.Stop(t)
d.Start(t, args...)
}
// RestartWithError will restart the daemon by first stopping it and then starting it.
func (d *Daemon) RestartWithError(arg ...string) error {
if err := d.StopWithError(); err != nil {
return err
}
return d.StartWithError(arg...)
}
func (d *Daemon) handleUserns() {
// in the case of tests running a user namespace-enabled daemon, we have resolved
// d.Root to be the actual final path of the graph dir after the "uid.gid" of
// remapped root is added--we need to subtract it from the path before calling
// start or else we will continue making subdirectories rather than truly restarting
// with the same location/root:
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
d.Root = filepath.Dir(d.Root)
}
}
// ReloadConfig asks the daemon to reload its configuration
func (d *Daemon) ReloadConfig() error {
if d.cmd == nil || d.cmd.Process == nil {
return errors.New("daemon is not running")
}
errCh := make(chan error, 1)
started := make(chan struct{})
go func() {
_, body, err := request.Get("/events", request.Host(d.Sock()))
close(started)
if err != nil {
errCh <- err
return
}
defer body.Close()
dec := json.NewDecoder(body)
for {
var e events.Message
if err := dec.Decode(&e); err != nil {
errCh <- err
return
}
if e.Type != events.DaemonEventType {
continue
}
if e.Action != "reload" {
continue
}
close(errCh) // notify that we are done
return
}
}()
<-started
if err := signalDaemonReload(d.cmd.Process.Pid); err != nil {
return errors.Wrapf(err, "[%s] error signaling daemon reload", d.id)
}
select {
case err := <-errCh:
if err != nil {
return errors.Wrapf(err, "[%s] error waiting for daemon reload event", d.id)
}
case <-time.After(30 * time.Second):
return errors.Errorf("[%s] daemon reload event timed out after 30 seconds", d.id)
}
return nil
}
// LoadBusybox image into the daemon
func (d *Daemon) LoadBusybox(t testing.TB) {
t.Helper()
clientHost, err := client.NewClientWithOpts(client.FromEnv)
assert.NilError(t, err, "[%s] failed to create client", d.id)
defer clientHost.Close()
ctx := context.Background()
reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"})
assert.NilError(t, err, "[%s] failed to download busybox", d.id)
defer reader.Close()
c := d.NewClientT(t)
defer c.Close()
resp, err := c.ImageLoad(ctx, reader, true)
assert.NilError(t, err, "[%s] failed to load busybox", d.id)
defer resp.Body.Close()
}
func (d *Daemon) getClientConfig() (*clientConfig, error) {
var (
transport *http.Transport
scheme string
addr string
proto string
)
if d.UseDefaultTLSHost {
option := &tlsconfig.Options{
CAFile: "fixtures/https/ca.pem",
CertFile: "fixtures/https/client-cert.pem",
KeyFile: "fixtures/https/client-key.pem",
}
tlsConfig, err := tlsconfig.Client(*option)
if err != nil {
return nil, err
}
transport = &http.Transport{
TLSClientConfig: tlsConfig,
}
addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort)
scheme = "https"
proto = "tcp"
} else if d.UseDefaultHost {
addr = opts.DefaultUnixSocket
proto = "unix"
scheme = "http"
transport = &http.Transport{}
} else {
addr = d.sockPath()
proto = "unix"
scheme = "http"
transport = &http.Transport{}
}
if err := sockets.ConfigureTransport(transport, proto, addr); err != nil {
return nil, err
}
transport.DisableKeepAlives = true
if proto == "unix" {
addr = filepath.Base(addr)
}
return &clientConfig{
transport: transport,
scheme: scheme,
addr: addr,
}, nil
}
func (d *Daemon) queryRootDir() (string, error) {
// update daemon root by asking /info endpoint (to support user
// namespaced daemon with root remapped uid.gid directory)
clientConfig, err := d.getClientConfig()
if err != nil {
return "", err
}
c := &http.Client{
Transport: clientConfig.transport,
}
req, err := http.NewRequest(http.MethodGet, "/info", nil)
if err != nil {
return "", err
}
req.Header.Set("Content-Type", "application/json")
req.URL.Host = clientConfig.addr
req.URL.Scheme = clientConfig.scheme
resp, err := c.Do(req)
if err != nil {
return "", err
}
body := ioutils.NewReadCloserWrapper(resp.Body, func() error {
return resp.Body.Close()
})
type Info struct {
DockerRootDir string
}
var b []byte
var i Info
b, err = request.ReadBody(body)
if err == nil && resp.StatusCode == http.StatusOK {
// read the docker root dir
if err = json.Unmarshal(b, &i); err == nil {
return i.DockerRootDir, nil
}
}
return "", err
}
// Info returns the info struct for this daemon
func (d *Daemon) Info(t testing.TB) types.Info {
t.Helper()
c := d.NewClientT(t)
info, err := c.Info(context.Background())
assert.NilError(t, err)
assert.NilError(t, c.Close())
return info
}
// cleanupRaftDir removes swarmkit wal files if present
func cleanupRaftDir(t testing.TB, d *Daemon) {
t.Helper()
for _, p := range []string{"wal", "wal-v3-encrypted", "snap-v3-encrypted"} {
dir := filepath.Join(d.Root, "swarm/raft", p)
if err := os.RemoveAll(dir); err != nil {
t.Logf("[%s] error removing %v: %v", d.id, dir, err)
}
}
}
| package daemon // import "github.com/docker/docker/testutil/daemon"
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"os/user"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/client"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/docker/testutil/request"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
"gotest.tools/v3/assert"
)
// LogT is the subset of the testing.TB interface used by the daemon.
type LogT interface {
Logf(string, ...interface{})
}
// nopLog is a no-op implementation of LogT that is used in daemons created by
// NewDaemon (where no testing.TB is available).
type nopLog struct{}
func (nopLog) Logf(string, ...interface{}) {}
const (
defaultDockerdBinary = "dockerd"
defaultContainerdSocket = "/var/run/docker/containerd/containerd.sock"
defaultDockerdRootlessBinary = "dockerd-rootless.sh"
)
var errDaemonNotStarted = errors.New("daemon not started")
// SockRoot holds the path of the default docker integration daemon socket
var SockRoot = filepath.Join(os.TempDir(), "docker-integration")
type clientConfig struct {
transport *http.Transport
scheme string
addr string
}
// Daemon represents a Docker daemon for the testing framework
type Daemon struct {
Root string
Folder string
Wait chan error
UseDefaultHost bool
UseDefaultTLSHost bool
id string
logFile *os.File
cmd *exec.Cmd
storageDriver string
userlandProxy bool
defaultCgroupNamespaceMode string
execRoot string
experimental bool
init bool
dockerdBinary string
log LogT
pidFile string
args []string
containerdSocket string
rootlessUser *user.User
rootlessXDGRuntimeDir string
// swarm related field
swarmListenAddr string
SwarmPort int // FIXME(vdemeester) should probably not be exported
DefaultAddrPool []string
SubnetSize uint32
DataPathPort uint32
OOMScoreAdjust int
// cached information
CachedInfo types.Info
}
// NewDaemon returns a Daemon instance to be used for testing.
// The daemon will not automatically start.
// The daemon will modify and create files under workingDir.
func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) {
storageDriver := os.Getenv("DOCKER_GRAPHDRIVER")
if err := os.MkdirAll(SockRoot, 0700); err != nil {
return nil, errors.Wrapf(err, "failed to create daemon socket root %q", SockRoot)
}
id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID()))
dir := filepath.Join(workingDir, id)
daemonFolder, err := filepath.Abs(dir)
if err != nil {
return nil, err
}
daemonRoot := filepath.Join(daemonFolder, "root")
if err := os.MkdirAll(daemonRoot, 0755); err != nil {
return nil, errors.Wrapf(err, "failed to create daemon root %q", daemonRoot)
}
userlandProxy := true
if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
if val, err := strconv.ParseBool(env); err != nil {
userlandProxy = val
}
}
d := &Daemon{
id: id,
Folder: daemonFolder,
Root: daemonRoot,
storageDriver: storageDriver,
userlandProxy: userlandProxy,
// dxr stands for docker-execroot (shortened for avoiding unix(7) path length limitation)
execRoot: filepath.Join(os.TempDir(), "dxr", id),
dockerdBinary: defaultDockerdBinary,
swarmListenAddr: defaultSwarmListenAddr,
SwarmPort: DefaultSwarmPort,
log: nopLog{},
containerdSocket: defaultContainerdSocket,
}
for _, op := range ops {
op(d)
}
if d.rootlessUser != nil {
if err := os.Chmod(SockRoot, 0777); err != nil {
return nil, err
}
uid, err := strconv.Atoi(d.rootlessUser.Uid)
if err != nil {
return nil, err
}
gid, err := strconv.Atoi(d.rootlessUser.Gid)
if err != nil {
return nil, err
}
if err := os.Chown(d.Folder, uid, gid); err != nil {
return nil, err
}
if err := os.Chown(d.Root, uid, gid); err != nil {
return nil, err
}
if err := os.MkdirAll(filepath.Dir(d.execRoot), 0700); err != nil {
return nil, err
}
if err := os.Chown(filepath.Dir(d.execRoot), uid, gid); err != nil {
return nil, err
}
if err := os.MkdirAll(d.execRoot, 0700); err != nil {
return nil, err
}
if err := os.Chown(d.execRoot, uid, gid); err != nil {
return nil, err
}
d.rootlessXDGRuntimeDir = filepath.Join(d.Folder, "xdgrun")
if err := os.MkdirAll(d.rootlessXDGRuntimeDir, 0700); err != nil {
return nil, err
}
if err := os.Chown(d.rootlessXDGRuntimeDir, uid, gid); err != nil {
return nil, err
}
d.containerdSocket = ""
}
return d, nil
}
// New returns a Daemon instance to be used for testing.
// This will create a directory such as d123456789 in the folder specified by
// $DOCKER_INTEGRATION_DAEMON_DEST or $DEST.
// The daemon will not automatically start.
func New(t testing.TB, ops ...Option) *Daemon {
t.Helper()
dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST")
if dest == "" {
dest = os.Getenv("DEST")
}
dest = filepath.Join(dest, t.Name())
assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable")
if os.Getenv("DOCKER_ROOTLESS") != "" {
if os.Getenv("DOCKER_REMAP_ROOT") != "" {
t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_REMAP_ROOT currently")
}
if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" {
if val, err := strconv.ParseBool(env); err == nil && !val {
t.Skip("DOCKER_ROOTLESS doesn't support DOCKER_USERLANDPROXY=false")
}
}
ops = append(ops, WithRootlessUser("unprivilegeduser"))
}
ops = append(ops, WithOOMScoreAdjust(-500))
d, err := NewDaemon(dest, ops...)
assert.NilError(t, err, "could not create daemon at %q", dest)
if d.rootlessUser != nil && d.dockerdBinary != defaultDockerdBinary {
t.Skipf("DOCKER_ROOTLESS doesn't support specifying non-default dockerd binary path %q", d.dockerdBinary)
}
return d
}
// BinaryPath returns the binary and its arguments.
func (d *Daemon) BinaryPath() (string, error) {
dockerdBinary, err := exec.LookPath(d.dockerdBinary)
if err != nil {
return "", errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id)
}
return dockerdBinary, nil
}
// ContainersNamespace returns the containerd namespace used for containers.
func (d *Daemon) ContainersNamespace() string {
return d.id
}
// RootDir returns the root directory of the daemon.
func (d *Daemon) RootDir() string {
return d.Root
}
// ID returns the generated id of the daemon
func (d *Daemon) ID() string {
return d.id
}
// StorageDriver returns the configured storage driver of the daemon
func (d *Daemon) StorageDriver() string {
return d.storageDriver
}
// Sock returns the socket path of the daemon
func (d *Daemon) Sock() string {
return fmt.Sprintf("unix://" + d.sockPath())
}
func (d *Daemon) sockPath() string {
return filepath.Join(SockRoot, d.id+".sock")
}
// LogFileName returns the path the daemon's log file
func (d *Daemon) LogFileName() string {
return d.logFile.Name()
}
// ReadLogFile returns the content of the daemon log file
func (d *Daemon) ReadLogFile() ([]byte, error) {
return ioutil.ReadFile(d.logFile.Name())
}
// NewClientT creates new client based on daemon's socket path
func (d *Daemon) NewClientT(t testing.TB, extraOpts ...client.Opt) *client.Client {
t.Helper()
c, err := d.NewClient(extraOpts...)
assert.NilError(t, err, "[%s] could not create daemon client", d.id)
return c
}
// NewClient creates new client based on daemon's socket path
func (d *Daemon) NewClient(extraOpts ...client.Opt) (*client.Client, error) {
clientOpts := []client.Opt{
client.FromEnv,
client.WithHost(d.Sock()),
}
clientOpts = append(clientOpts, extraOpts...)
return client.NewClientWithOpts(clientOpts...)
}
// Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files
func (d *Daemon) Cleanup(t testing.TB) {
t.Helper()
cleanupMount(t, d)
cleanupRaftDir(t, d)
cleanupNetworkNamespace(t, d)
}
// Start starts the daemon and return once it is ready to receive requests.
func (d *Daemon) Start(t testing.TB, args ...string) {
t.Helper()
if err := d.StartWithError(args...); err != nil {
d.DumpStackAndQuit() // in case the daemon is stuck
t.Fatalf("[%s] failed to start daemon with arguments %v : %v", d.id, d.args, err)
}
}
// StartWithError starts the daemon and return once it is ready to receive requests.
// It returns an error in case it couldn't start.
func (d *Daemon) StartWithError(args ...string) error {
logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)
if err != nil {
return errors.Wrapf(err, "[%s] failed to create logfile", d.id)
}
return d.StartWithLogFile(logFile, args...)
}
// StartWithLogFile will start the daemon and attach its streams to a given file.
func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error {
d.handleUserns()
dockerdBinary, err := d.BinaryPath()
if err != nil {
return err
}
if d.pidFile == "" {
d.pidFile = filepath.Join(d.Folder, "docker.pid")
}
d.args = []string{}
if d.rootlessUser != nil {
if d.dockerdBinary != defaultDockerdBinary {
return errors.Errorf("[%s] DOCKER_ROOTLESS doesn't support non-default dockerd binary path %q", d.id, d.dockerdBinary)
}
dockerdBinary = "sudo"
d.args = append(d.args,
"-u", d.rootlessUser.Username,
"-E", "XDG_RUNTIME_DIR="+d.rootlessXDGRuntimeDir,
"-E", "HOME="+d.rootlessUser.HomeDir,
"-E", "PATH="+os.Getenv("PATH"),
"--",
defaultDockerdRootlessBinary,
)
}
d.args = append(d.args,
"--data-root", d.Root,
"--exec-root", d.execRoot,
"--pidfile", d.pidFile,
fmt.Sprintf("--userland-proxy=%t", d.userlandProxy),
"--containerd-namespace", d.id,
"--containerd-plugins-namespace", d.id+"p",
)
if d.containerdSocket != "" {
d.args = append(d.args, "--containerd", d.containerdSocket)
}
if d.defaultCgroupNamespaceMode != "" {
d.args = append(d.args, "--default-cgroupns-mode", d.defaultCgroupNamespaceMode)
}
if d.experimental {
d.args = append(d.args, "--experimental")
}
if d.init {
d.args = append(d.args, "--init")
}
if !(d.UseDefaultHost || d.UseDefaultTLSHost) {
d.args = append(d.args, "--host", d.Sock())
}
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
d.args = append(d.args, "--userns-remap", root)
}
// If we don't explicitly set the log-level or debug flag(-D) then
// turn on debug mode
foundLog := false
foundSd := false
for _, a := range providedArgs {
if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") {
foundLog = true
}
if strings.Contains(a, "--storage-driver") {
foundSd = true
}
}
if !foundLog {
d.args = append(d.args, "--debug")
}
if d.storageDriver != "" && !foundSd {
d.args = append(d.args, "--storage-driver", d.storageDriver)
}
d.args = append(d.args, providedArgs...)
d.cmd = exec.Command(dockerdBinary, d.args...)
d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1")
d.cmd.Stdout = out
d.cmd.Stderr = out
d.logFile = out
if d.rootlessUser != nil {
// sudo requires this for propagating signals
setsid(d.cmd)
}
if err := d.cmd.Start(); err != nil {
return errors.Wrapf(err, "[%s] could not start daemon container", d.id)
}
wait := make(chan error, 1)
go func() {
ret := d.cmd.Wait()
d.log.Logf("[%s] exiting daemon", d.id)
// If we send before logging, we might accidentally log _after_ the test is done.
// As of Go 1.12, this incurs a panic instead of silently being dropped.
wait <- ret
close(wait)
}()
d.Wait = wait
clientConfig, err := d.getClientConfig()
if err != nil {
return err
}
client := &http.Client{
Transport: clientConfig.transport,
}
req, err := http.NewRequest(http.MethodGet, "/_ping", nil)
if err != nil {
return errors.Wrapf(err, "[%s] could not create new request", d.id)
}
req.URL.Host = clientConfig.addr
req.URL.Scheme = clientConfig.scheme
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
// make sure daemon is ready to receive requests
for i := 0; ; i++ {
d.log.Logf("[%s] waiting for daemon to start", d.id)
select {
case <-ctx.Done():
return errors.Wrapf(ctx.Err(), "[%s] daemon exited and never started", d.id)
case err := <-d.Wait:
return errors.Wrapf(err, "[%s] daemon exited during startup", d.id)
default:
rctx, rcancel := context.WithTimeout(context.TODO(), 2*time.Second)
defer rcancel()
resp, err := client.Do(req.WithContext(rctx))
if err != nil {
if i > 2 { // don't log the first couple, this ends up just being noise
d.log.Logf("[%s] error pinging daemon on start: %v", d.id, err)
}
select {
case <-ctx.Done():
case <-time.After(500 * time.Millisecond):
}
continue
}
resp.Body.Close()
if resp.StatusCode != http.StatusOK {
d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status)
}
d.log.Logf("[%s] daemon started\n", d.id)
d.Root, err = d.queryRootDir()
if err != nil {
return errors.Wrapf(err, "[%s] error querying daemon for root directory", d.id)
}
return nil
}
}
}
// StartWithBusybox will first start the daemon with Daemon.Start()
// then save the busybox image from the main daemon and load it into this Daemon instance.
func (d *Daemon) StartWithBusybox(t testing.TB, arg ...string) {
t.Helper()
d.Start(t, arg...)
d.LoadBusybox(t)
}
// Kill will send a SIGKILL to the daemon
func (d *Daemon) Kill() error {
if d.cmd == nil || d.Wait == nil {
return errDaemonNotStarted
}
defer func() {
d.logFile.Close()
d.cmd = nil
}()
if err := d.cmd.Process.Kill(); err != nil {
return err
}
if d.pidFile != "" {
_ = os.Remove(d.pidFile)
}
return nil
}
// Pid returns the pid of the daemon
func (d *Daemon) Pid() int {
return d.cmd.Process.Pid
}
// Interrupt stops the daemon by sending it an Interrupt signal
func (d *Daemon) Interrupt() error {
return d.Signal(os.Interrupt)
}
// Signal sends the specified signal to the daemon if running
func (d *Daemon) Signal(signal os.Signal) error {
if d.cmd == nil || d.Wait == nil {
return errDaemonNotStarted
}
return d.cmd.Process.Signal(signal)
}
// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its
// stack to its log file and exit
// This is used primarily for gathering debug information on test timeout
func (d *Daemon) DumpStackAndQuit() {
if d.cmd == nil || d.cmd.Process == nil {
return
}
SignalDaemonDump(d.cmd.Process.Pid)
}
// Stop will send a SIGINT every second and wait for the daemon to stop.
// If it times out, a SIGKILL is sent.
// Stop will not delete the daemon directory. If a purged daemon is needed,
// instantiate a new one with NewDaemon.
// If an error occurs while starting the daemon, the test will fail.
func (d *Daemon) Stop(t testing.TB) {
t.Helper()
err := d.StopWithError()
if err != nil {
if err != errDaemonNotStarted {
t.Fatalf("[%s] error while stopping the daemon: %v", d.id, err)
} else {
t.Logf("[%s] daemon is not started", d.id)
}
}
}
// StopWithError will send a SIGINT every second and wait for the daemon to stop.
// If it timeouts, a SIGKILL is sent.
// Stop will not delete the daemon directory. If a purged daemon is needed,
// instantiate a new one with NewDaemon.
func (d *Daemon) StopWithError() (err error) {
if d.cmd == nil || d.Wait == nil {
return errDaemonNotStarted
}
defer func() {
if err != nil {
d.log.Logf("[%s] error while stopping daemon: %v", d.id, err)
} else {
d.log.Logf("[%s] daemon stopped", d.id)
if d.pidFile != "" {
_ = os.Remove(d.pidFile)
}
}
if err := d.logFile.Close(); err != nil {
d.log.Logf("[%s] failed to close daemon logfile: %v", d.id, err)
}
d.cmd = nil
}()
i := 1
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
tick := ticker.C
d.log.Logf("[%s] stopping daemon", d.id)
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
if strings.Contains(err.Error(), "os: process already finished") {
return errDaemonNotStarted
}
return errors.Wrapf(err, "[%s] could not send signal", d.id)
}
out1:
for {
select {
case err := <-d.Wait:
return err
case <-time.After(20 * time.Second):
// time for stopping jobs and run onShutdown hooks
d.log.Logf("[%s] daemon stop timed out after 20 seconds", d.id)
break out1
}
}
out2:
for {
select {
case err := <-d.Wait:
return err
case <-tick:
i++
if i > 5 {
d.log.Logf("[%s] tried to interrupt daemon for %d times, now try to kill it", d.id, i)
break out2
}
d.log.Logf("[%d] attempt #%d/5: daemon is still running with pid %d", i, d.cmd.Process.Pid)
if err := d.cmd.Process.Signal(os.Interrupt); err != nil {
return errors.Wrapf(err, "[%s] attempt #%d/5 could not send signal", d.id, i)
}
}
}
if err := d.cmd.Process.Kill(); err != nil {
d.log.Logf("[%s] failed to kill daemon: %v", d.id, err)
return err
}
return nil
}
// Restart will restart the daemon by first stopping it and the starting it.
// If an error occurs while starting the daemon, the test will fail.
func (d *Daemon) Restart(t testing.TB, args ...string) {
t.Helper()
d.Stop(t)
d.Start(t, args...)
}
// RestartWithError will restart the daemon by first stopping it and then starting it.
func (d *Daemon) RestartWithError(arg ...string) error {
if err := d.StopWithError(); err != nil {
return err
}
return d.StartWithError(arg...)
}
func (d *Daemon) handleUserns() {
// in the case of tests running a user namespace-enabled daemon, we have resolved
// d.Root to be the actual final path of the graph dir after the "uid.gid" of
// remapped root is added--we need to subtract it from the path before calling
// start or else we will continue making subdirectories rather than truly restarting
// with the same location/root:
if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
d.Root = filepath.Dir(d.Root)
}
}
// ReloadConfig asks the daemon to reload its configuration
func (d *Daemon) ReloadConfig() error {
if d.cmd == nil || d.cmd.Process == nil {
return errors.New("daemon is not running")
}
errCh := make(chan error, 1)
started := make(chan struct{})
go func() {
_, body, err := request.Get("/events", request.Host(d.Sock()))
close(started)
if err != nil {
errCh <- err
return
}
defer body.Close()
dec := json.NewDecoder(body)
for {
var e events.Message
if err := dec.Decode(&e); err != nil {
errCh <- err
return
}
if e.Type != events.DaemonEventType {
continue
}
if e.Action != "reload" {
continue
}
close(errCh) // notify that we are done
return
}
}()
<-started
if err := signalDaemonReload(d.cmd.Process.Pid); err != nil {
return errors.Wrapf(err, "[%s] error signaling daemon reload", d.id)
}
select {
case err := <-errCh:
if err != nil {
return errors.Wrapf(err, "[%s] error waiting for daemon reload event", d.id)
}
case <-time.After(30 * time.Second):
return errors.Errorf("[%s] daemon reload event timed out after 30 seconds", d.id)
}
return nil
}
// LoadBusybox image into the daemon
func (d *Daemon) LoadBusybox(t testing.TB) {
t.Helper()
clientHost, err := client.NewClientWithOpts(client.FromEnv)
assert.NilError(t, err, "[%s] failed to create client", d.id)
defer clientHost.Close()
ctx := context.Background()
reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"})
assert.NilError(t, err, "[%s] failed to download busybox", d.id)
defer reader.Close()
c := d.NewClientT(t)
defer c.Close()
resp, err := c.ImageLoad(ctx, reader, true)
assert.NilError(t, err, "[%s] failed to load busybox", d.id)
defer resp.Body.Close()
}
func (d *Daemon) getClientConfig() (*clientConfig, error) {
var (
transport *http.Transport
scheme string
addr string
proto string
)
if d.UseDefaultTLSHost {
option := &tlsconfig.Options{
CAFile: "fixtures/https/ca.pem",
CertFile: "fixtures/https/client-cert.pem",
KeyFile: "fixtures/https/client-key.pem",
}
tlsConfig, err := tlsconfig.Client(*option)
if err != nil {
return nil, err
}
transport = &http.Transport{
TLSClientConfig: tlsConfig,
}
addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort)
scheme = "https"
proto = "tcp"
} else if d.UseDefaultHost {
addr = opts.DefaultUnixSocket
proto = "unix"
scheme = "http"
transport = &http.Transport{}
} else {
addr = d.sockPath()
proto = "unix"
scheme = "http"
transport = &http.Transport{}
}
if err := sockets.ConfigureTransport(transport, proto, addr); err != nil {
return nil, err
}
transport.DisableKeepAlives = true
if proto == "unix" {
addr = filepath.Base(addr)
}
return &clientConfig{
transport: transport,
scheme: scheme,
addr: addr,
}, nil
}
func (d *Daemon) queryRootDir() (string, error) {
// update daemon root by asking /info endpoint (to support user
// namespaced daemon with root remapped uid.gid directory)
clientConfig, err := d.getClientConfig()
if err != nil {
return "", err
}
c := &http.Client{
Transport: clientConfig.transport,
}
req, err := http.NewRequest(http.MethodGet, "/info", nil)
if err != nil {
return "", err
}
req.Header.Set("Content-Type", "application/json")
req.URL.Host = clientConfig.addr
req.URL.Scheme = clientConfig.scheme
resp, err := c.Do(req)
if err != nil {
return "", err
}
body := ioutils.NewReadCloserWrapper(resp.Body, func() error {
return resp.Body.Close()
})
type Info struct {
DockerRootDir string
}
var b []byte
var i Info
b, err = request.ReadBody(body)
if err == nil && resp.StatusCode == http.StatusOK {
// read the docker root dir
if err = json.Unmarshal(b, &i); err == nil {
return i.DockerRootDir, nil
}
}
return "", err
}
// Info returns the info struct for this daemon
func (d *Daemon) Info(t testing.TB) types.Info {
t.Helper()
c := d.NewClientT(t)
info, err := c.Info(context.Background())
assert.NilError(t, err)
assert.NilError(t, c.Close())
return info
}
// cleanupRaftDir removes swarmkit wal files if present
func cleanupRaftDir(t testing.TB, d *Daemon) {
t.Helper()
for _, p := range []string{"wal", "wal-v3-encrypted", "snap-v3-encrypted"} {
dir := filepath.Join(d.Root, "swarm/raft", p)
if err := os.RemoveAll(dir); err != nil {
t.Logf("[%s] error removing %v: %v", d.id, dir, err)
}
}
}
| aiordache | ee8f581167d1d36fe05c39d8975d8b9960a49521 | 314759dc2f4745925d8dec6d15acc7761c6e5c92 | Perhaps we should keep it simple and only do this part here, and keep the special handling for rootless in `StartWithLogFile()` | thaJeztah | 4,786 |
moby/moby | 42,373 | Upstream systemd unit files from docker-ce-packaging repo | The docker-ce-packaging repo currently maintains its own copy of the systemd unit files for docker; https://github.com/docker/docker-ce-packaging/tree/6857538d997e58d7b941c65522c9c0526c5fc9da/systemd
This PR moves the changes from those files back to the ones in this repository. There were some conflicts, so needs a review.
I also removed the (now unused) `.rpm` version of the systemd unit.
I tried to preserve history from the docker-ce-packaging repository, taking this approach;
```bash
# install filter-repo (https://github.com/newren/git-filter-repo/blob/main/INSTALL.md)
brew install git-filter-repo
# create a temporary clone of the packaging repo
git clone https://github.com/docker/docker-ce-packaging.git ~/Projects/packaging_tmp
cd ~/Projects/packaging_tmp
# remove all code, except for the files we need, and rename the files
git filter-repo \
--path-match systemd/ \
--path-rename systemd/docker.service:contrib/init/systemd/docker.service \
--path-rename systemd/docker.socket:contrib/init/systemd/docker.socket
# integrate the files in the docker repository
cd ~/go/src/github.com/docker/docker/
git checkout -b upstream_systemd_units
git remote add packaging_tmp ~/Projects/packaging_tmp
git fetch packaging_tmp
git merge --allow-unrelated-histories --signoff -S packaging_tmp/master
```
After this is merged, I'll update the containerd-packaging repository to remove the systemd unit files from that repository and to consume the unit files from this repository instead.
Differences between the results in this branch, and the units currently in docker-ce-packaging;
in `docker.service`:
```patch
git diff --no-index ~/go/src/github.com/docker/docker-ce-packaging/systemd/docker.service contrib/init/systemd/docker.service
diff --git a/Users/sebastiaan/go/src/github.com/docker/docker-ce-packaging/systemd/docker.service b/contrib/init/systemd/docker.service
index 942008045d..c40735a2b5 100644
--- a/Users/sebastiaan/go/src/github.com/docker/docker-ce-packaging/systemd/docker.service
+++ b/contrib/init/systemd/docker.service
@@ -1,7 +1,7 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target firewalld.service containerd.service
+After=network-online.target docker.socket firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
@@ -12,7 +12,7 @@ Type=notify
# for containers run by docker
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutSec=0
+TimeoutStartSec=0
RestartSec=2
Restart=always
```
in `docker.socket`:
```patch
git diff --no-index ~/go/src/github.com/docker/docker-ce-packaging/systemd/docker.socket contrib/init/systemd/docker.socket
diff --git a/Users/sebastiaan/go/src/github.com/docker/docker-ce-packaging/systemd/docker.socket b/contrib/init/systemd/docker.socket
index 9db5049150..c76a23296e 100644
--- a/Users/sebastiaan/go/src/github.com/docker/docker-ce-packaging/systemd/docker.socket
+++ b/contrib/init/systemd/docker.socket
@@ -2,7 +2,9 @@
Description=Docker Socket for the API
[Socket]
-ListenStream=/var/run/docker.sock
+# If /var/run is not implemented as a symlink to /run, you may need to
+# specify ListenStream=/var/run/docker.sock instead.
+ListenStream=/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
```
| null | 2021-05-12 09:46:19+00:00 | 2021-05-19 13:06:42+00:00 | contrib/init/systemd/docker.service | [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target docker.socket firewalld.service
Wants=network-online.target
Requires=docker.socket
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd -H fd://
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
| [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target docker.socket firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutStartSec=0
RestartSec=2
Restart=always
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
| thaJeztah | c706213427da1b57f8158a5224037ee9052887dd | 84bf80ac68725087031cb138071d99873f576e3e | Actually; I should probably pick the one from this repository; looks like `TimeoutSec` is deprecated in favour of `TimeoutStartSec`; https://github.com/systemd/systemd/issues/11155 | thaJeztah | 4,787 |
moby/moby | 42,373 | Upstream systemd unit files from docker-ce-packaging repo | The docker-ce-packaging repo currently maintains its own copy of the systemd unit files for docker; https://github.com/docker/docker-ce-packaging/tree/6857538d997e58d7b941c65522c9c0526c5fc9da/systemd
This PR moves the changes from those files back to the ones in this repository. There were some conflicts, so needs a review.
I also removed the (now unused) `.rpm` version of the systemd unit.
I tried to preserve history from the docker-ce-packaging repository, taking this approach;
```bash
# install filter-repo (https://github.com/newren/git-filter-repo/blob/main/INSTALL.md)
brew install git-filter-repo
# create a temporary clone of the packaging repo
git clone https://github.com/docker/docker-ce-packaging.git ~/Projects/packaging_tmp
cd ~/Projects/packaging_tmp
# remove all code, except for the files we need, and rename the files
git filter-repo \
--path-match systemd/ \
--path-rename systemd/docker.service:contrib/init/systemd/docker.service \
--path-rename systemd/docker.socket:contrib/init/systemd/docker.socket
# integrate the files in the docker repository
cd ~/go/src/github.com/docker/docker/
git checkout -b upstream_systemd_units
git remote add packaging_tmp ~/Projects/packaging_tmp
git fetch packaging_tmp
git merge --allow-unrelated-histories --signoff -S packaging_tmp/master
```
After this is merged, I'll update the containerd-packaging repository to remove the systemd unit files from that repository and to consume the unit files from this repository instead.
Differences between the results in this branch, and the units currently in docker-ce-packaging;
in `docker.service`:
```patch
git diff --no-index ~/go/src/github.com/docker/docker-ce-packaging/systemd/docker.service contrib/init/systemd/docker.service
diff --git a/Users/sebastiaan/go/src/github.com/docker/docker-ce-packaging/systemd/docker.service b/contrib/init/systemd/docker.service
index 942008045d..c40735a2b5 100644
--- a/Users/sebastiaan/go/src/github.com/docker/docker-ce-packaging/systemd/docker.service
+++ b/contrib/init/systemd/docker.service
@@ -1,7 +1,7 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target firewalld.service containerd.service
+After=network-online.target docker.socket firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
@@ -12,7 +12,7 @@ Type=notify
# for containers run by docker
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutSec=0
+TimeoutStartSec=0
RestartSec=2
Restart=always
```
in `docker.socket`:
```patch
git diff --no-index ~/go/src/github.com/docker/docker-ce-packaging/systemd/docker.socket contrib/init/systemd/docker.socket
diff --git a/Users/sebastiaan/go/src/github.com/docker/docker-ce-packaging/systemd/docker.socket b/contrib/init/systemd/docker.socket
index 9db5049150..c76a23296e 100644
--- a/Users/sebastiaan/go/src/github.com/docker/docker-ce-packaging/systemd/docker.socket
+++ b/contrib/init/systemd/docker.socket
@@ -2,7 +2,9 @@
Description=Docker Socket for the API
[Socket]
-ListenStream=/var/run/docker.sock
+# If /var/run is not implemented as a symlink to /run, you may need to
+# specify ListenStream=/var/run/docker.sock instead.
+ListenStream=/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
```
| null | 2021-05-12 09:46:19+00:00 | 2021-05-19 13:06:42+00:00 | contrib/init/systemd/docker.service | [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target docker.socket firewalld.service
Wants=network-online.target
Requires=docker.socket
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd -H fd://
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
| [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target docker.socket firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutStartSec=0
RestartSec=2
Restart=always
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
| thaJeztah | c706213427da1b57f8158a5224037ee9052887dd | 84bf80ac68725087031cb138071d99873f576e3e | There was a merge conflict here, because the unit file in docker-ce-packaging did not have `docker.socket` here; that was added in this repository through https://github.com/moby/moby/pull/7257, so I opted to keep that in the list and append the `containerd.service` from the docker-ce-packaging repository | thaJeztah | 4,788 |
moby/moby | 42,373 | Upstream systemd unit files from docker-ce-packaging repo | The docker-ce-packaging repo currently maintains its own copy of the systemd unit files for docker; https://github.com/docker/docker-ce-packaging/tree/6857538d997e58d7b941c65522c9c0526c5fc9da/systemd
This PR moves the changes from those files back to the ones in this repository. There were some conflicts, so needs a review.
I also removed the (now unused) `.rpm` version of the systemd unit.
I tried to preserve history from the docker-ce-packaging repository, taking this approach;
```bash
# install filter-repo (https://github.com/newren/git-filter-repo/blob/main/INSTALL.md)
brew install git-filter-repo
# create a temporary clone of the packaging repo
git clone https://github.com/docker/docker-ce-packaging.git ~/Projects/packaging_tmp
cd ~/Projects/packaging_tmp
# remove all code, except for the files we need, and rename the files
git filter-repo \
--path-match systemd/ \
--path-rename systemd/docker.service:contrib/init/systemd/docker.service \
--path-rename systemd/docker.socket:contrib/init/systemd/docker.socket
# integrate the files in the docker repository
cd ~/go/src/github.com/docker/docker/
git checkout -b upstream_systemd_units
git remote add packaging_tmp ~/Projects/packaging_tmp
git fetch packaging_tmp
git merge --allow-unrelated-histories --signoff -S packaging_tmp/master
```
After this is merged, I'll update the containerd-packaging repository to remove the systemd unit files from that repository and to consume the unit files from this repository instead.
Differences between the results in this branch, and the units currently in docker-ce-packaging;
in `docker.service`:
```patch
git diff --no-index ~/go/src/github.com/docker/docker-ce-packaging/systemd/docker.service contrib/init/systemd/docker.service
diff --git a/Users/sebastiaan/go/src/github.com/docker/docker-ce-packaging/systemd/docker.service b/contrib/init/systemd/docker.service
index 942008045d..c40735a2b5 100644
--- a/Users/sebastiaan/go/src/github.com/docker/docker-ce-packaging/systemd/docker.service
+++ b/contrib/init/systemd/docker.service
@@ -1,7 +1,7 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
-After=network-online.target firewalld.service containerd.service
+After=network-online.target docker.socket firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
@@ -12,7 +12,7 @@ Type=notify
# for containers run by docker
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
-TimeoutSec=0
+TimeoutStartSec=0
RestartSec=2
Restart=always
```
in `docker.socket`:
```patch
git diff --no-index ~/go/src/github.com/docker/docker-ce-packaging/systemd/docker.socket contrib/init/systemd/docker.socket
diff --git a/Users/sebastiaan/go/src/github.com/docker/docker-ce-packaging/systemd/docker.socket b/contrib/init/systemd/docker.socket
index 9db5049150..c76a23296e 100644
--- a/Users/sebastiaan/go/src/github.com/docker/docker-ce-packaging/systemd/docker.socket
+++ b/contrib/init/systemd/docker.socket
@@ -2,7 +2,9 @@
Description=Docker Socket for the API
[Socket]
-ListenStream=/var/run/docker.sock
+# If /var/run is not implemented as a symlink to /run, you may need to
+# specify ListenStream=/var/run/docker.sock instead.
+ListenStream=/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
```
| null | 2021-05-12 09:46:19+00:00 | 2021-05-19 13:06:42+00:00 | contrib/init/systemd/docker.service | [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target docker.socket firewalld.service
Wants=network-online.target
Requires=docker.socket
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd -H fd://
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=1048576
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
| [Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target docker.socket firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutStartSec=0
RestartSec=2
Restart=always
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
| thaJeztah | c706213427da1b57f8158a5224037ee9052887dd | 84bf80ac68725087031cb138071d99873f576e3e | Changed this one to `TimeoutStartSec` | thaJeztah | 4,789 |
moby/moby | 42,334 | rootless: disable overlay2 if running with SELinux | Contains https://github.com/moby/moby/pull/42199 (`dockerd-rootless.sh: avoid /run/xtables.lock EACCES on SELinux hosts`)
- - -
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Close #42333 (`rootless+overlay2 (kernel 5.11)+SELinux: mkdir /home/<USER>/.local/share/docker/overlay2/<CID>-init/merged/dev: permission denied. `)
**- How I did it**
Disable overlay2 when `$_DOCKERD_ROOTLESS_SELINUX` is set.
We can't rely on `go-selinux.GetEnabled()` because RootlessKit doesn't mount /sys/fs/selinux in the child: https://github.com/rootless-containers/rootlesskit/issues/94
**- How to verify it**
- Install to Fedora 34
- Make sure `docker info` shows `fuse-overlayfs` as the default storage driver on rootless, while retaining `overlay2` on rootful
- Make sure `docker run hello-world` works on rootless
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
rootless: disable overlay2 if running with SELinux
**- A picture of a cute animal (not mandatory but encouraged)**
:penguin:
| null | 2021-04-28 07:02:08+00:00 | 2021-06-03 17:33:27+00:00 | daemon/graphdriver/overlayutils/overlayutils.go | // +build linux
package overlayutils // import "github.com/docker/docker/daemon/graphdriver/overlayutils"
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"github.com/docker/docker/daemon/graphdriver"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
func ErrDTypeNotSupported(driver, backingFs string) error {
msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs)
if backingFs == "xfs" {
msg += " Reformat the filesystem with ftype=1 to enable d_type support."
}
if backingFs == "extfs" {
msg += " Reformat the filesystem (or use tune2fs) with -O filetype flag to enable d_type support."
}
msg += " Backing filesystems without d_type support are not supported."
return graphdriver.NotSupportedError(msg)
}
// SupportsOverlay checks if the system supports overlay filesystem
// by performing an actual overlay mount.
//
// checkMultipleLowers parameter enables check for multiple lowerdirs,
// which is required for the overlay2 driver.
func SupportsOverlay(d string, checkMultipleLowers bool) error {
td, err := ioutil.TempDir(d, "check-overlayfs-support")
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(td); err != nil {
logrus.Warnf("Failed to remove check directory %v: %v", td, err)
}
}()
for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} {
if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil {
return err
}
}
mnt := filepath.Join(td, "merged")
lowerDir := path.Join(td, "lower2")
if checkMultipleLowers {
lowerDir += ":" + path.Join(td, "lower1")
}
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, path.Join(td, "upper"), path.Join(td, "work"))
if err := unix.Mount("overlay", mnt, "overlay", 0, opts); err != nil {
return errors.Wrap(err, "failed to mount overlay")
}
if err := unix.Unmount(mnt, 0); err != nil {
logrus.Warnf("Failed to unmount check directory %v: %v", mnt, err)
}
return nil
}
| // +build linux
package overlayutils // import "github.com/docker/docker/daemon/graphdriver/overlayutils"
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"github.com/docker/docker/daemon/graphdriver"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
func ErrDTypeNotSupported(driver, backingFs string) error {
msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs)
if backingFs == "xfs" {
msg += " Reformat the filesystem with ftype=1 to enable d_type support."
}
if backingFs == "extfs" {
msg += " Reformat the filesystem (or use tune2fs) with -O filetype flag to enable d_type support."
}
msg += " Backing filesystems without d_type support are not supported."
return graphdriver.NotSupportedError(msg)
}
// SupportsOverlay checks if the system supports overlay filesystem
// by performing an actual overlay mount.
//
// checkMultipleLowers parameter enables check for multiple lowerdirs,
// which is required for the overlay2 driver.
func SupportsOverlay(d string, checkMultipleLowers bool) error {
// We can't rely on go-selinux.GetEnabled() to detect whether SELinux is enabled,
// because RootlessKit doesn't mount /sys/fs/selinux in the child: https://github.com/rootless-containers/rootlesskit/issues/94
// So we check $_DOCKERD_ROOTLESS_SELINUX, which is set by dockerd-rootless.sh .
if os.Getenv("_DOCKERD_ROOTLESS_SELINUX") == "1" {
// Kernel 5.11 introduced support for rootless overlayfs, but incompatible with SELinux,
// so fallback to fuse-overlayfs.
// https://github.com/moby/moby/issues/42333
return errors.New("overlay is not supported for Rootless with SELinux")
}
td, err := ioutil.TempDir(d, "check-overlayfs-support")
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(td); err != nil {
logrus.Warnf("Failed to remove check directory %v: %v", td, err)
}
}()
for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} {
if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil {
return err
}
}
mnt := filepath.Join(td, "merged")
lowerDir := path.Join(td, "lower2")
if checkMultipleLowers {
lowerDir += ":" + path.Join(td, "lower1")
}
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, path.Join(td, "upper"), path.Join(td, "work"))
if err := unix.Mount("overlay", mnt, "overlay", 0, opts); err != nil {
return errors.Wrap(err, "failed to mount overlay")
}
if err := unix.Unmount(mnt, 0); err != nil {
logrus.Warnf("Failed to unmount check directory %v: %v", mnt, err)
}
return nil
}
| AkihiroSuda | 59e8ebd7f313753726c8de380f72300ed07b402e | 20eb137e0a8512475ca5c66983dac85a650ca36f | There's no known fix yet? Is this a kernel bug? | thaJeztah | 4,790 |
moby/moby | 42,334 | rootless: disable overlay2 if running with SELinux | Contains https://github.com/moby/moby/pull/42199 (`dockerd-rootless.sh: avoid /run/xtables.lock EACCES on SELinux hosts`)
- - -
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Close #42333 (`rootless+overlay2 (kernel 5.11)+SELinux: mkdir /home/<USER>/.local/share/docker/overlay2/<CID>-init/merged/dev: permission denied. `)
**- How I did it**
Disable overlay2 when `$_DOCKERD_ROOTLESS_SELINUX` is set.
We can't rely on `go-selinux.GetEnabled()` because RootlessKit doesn't mount /sys/fs/selinux in the child: https://github.com/rootless-containers/rootlesskit/issues/94
**- How to verify it**
- Install to Fedora 34
- Make sure `docker info` shows `fuse-overlayfs` as the default storage driver on rootless, while retaining `overlay2` on rootful
- Make sure `docker run hello-world` works on rootless
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
rootless: disable overlay2 if running with SELinux
**- A picture of a cute animal (not mandatory but encouraged)**
:penguin:
| null | 2021-04-28 07:02:08+00:00 | 2021-06-03 17:33:27+00:00 | daemon/graphdriver/overlayutils/overlayutils.go | // +build linux
package overlayutils // import "github.com/docker/docker/daemon/graphdriver/overlayutils"
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"github.com/docker/docker/daemon/graphdriver"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
func ErrDTypeNotSupported(driver, backingFs string) error {
msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs)
if backingFs == "xfs" {
msg += " Reformat the filesystem with ftype=1 to enable d_type support."
}
if backingFs == "extfs" {
msg += " Reformat the filesystem (or use tune2fs) with -O filetype flag to enable d_type support."
}
msg += " Backing filesystems without d_type support are not supported."
return graphdriver.NotSupportedError(msg)
}
// SupportsOverlay checks if the system supports overlay filesystem
// by performing an actual overlay mount.
//
// checkMultipleLowers parameter enables check for multiple lowerdirs,
// which is required for the overlay2 driver.
func SupportsOverlay(d string, checkMultipleLowers bool) error {
td, err := ioutil.TempDir(d, "check-overlayfs-support")
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(td); err != nil {
logrus.Warnf("Failed to remove check directory %v: %v", td, err)
}
}()
for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} {
if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil {
return err
}
}
mnt := filepath.Join(td, "merged")
lowerDir := path.Join(td, "lower2")
if checkMultipleLowers {
lowerDir += ":" + path.Join(td, "lower1")
}
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, path.Join(td, "upper"), path.Join(td, "work"))
if err := unix.Mount("overlay", mnt, "overlay", 0, opts); err != nil {
return errors.Wrap(err, "failed to mount overlay")
}
if err := unix.Unmount(mnt, 0); err != nil {
logrus.Warnf("Failed to unmount check directory %v: %v", mnt, err)
}
return nil
}
| // +build linux
package overlayutils // import "github.com/docker/docker/daemon/graphdriver/overlayutils"
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"github.com/docker/docker/daemon/graphdriver"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
func ErrDTypeNotSupported(driver, backingFs string) error {
msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs)
if backingFs == "xfs" {
msg += " Reformat the filesystem with ftype=1 to enable d_type support."
}
if backingFs == "extfs" {
msg += " Reformat the filesystem (or use tune2fs) with -O filetype flag to enable d_type support."
}
msg += " Backing filesystems without d_type support are not supported."
return graphdriver.NotSupportedError(msg)
}
// SupportsOverlay checks if the system supports overlay filesystem
// by performing an actual overlay mount.
//
// checkMultipleLowers parameter enables check for multiple lowerdirs,
// which is required for the overlay2 driver.
func SupportsOverlay(d string, checkMultipleLowers bool) error {
// We can't rely on go-selinux.GetEnabled() to detect whether SELinux is enabled,
// because RootlessKit doesn't mount /sys/fs/selinux in the child: https://github.com/rootless-containers/rootlesskit/issues/94
// So we check $_DOCKERD_ROOTLESS_SELINUX, which is set by dockerd-rootless.sh .
if os.Getenv("_DOCKERD_ROOTLESS_SELINUX") == "1" {
// Kernel 5.11 introduced support for rootless overlayfs, but incompatible with SELinux,
// so fallback to fuse-overlayfs.
// https://github.com/moby/moby/issues/42333
return errors.New("overlay is not supported for Rootless with SELinux")
}
td, err := ioutil.TempDir(d, "check-overlayfs-support")
if err != nil {
return err
}
defer func() {
if err := os.RemoveAll(td); err != nil {
logrus.Warnf("Failed to remove check directory %v: %v", td, err)
}
}()
for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} {
if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil {
return err
}
}
mnt := filepath.Join(td, "merged")
lowerDir := path.Join(td, "lower2")
if checkMultipleLowers {
lowerDir += ":" + path.Join(td, "lower1")
}
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, path.Join(td, "upper"), path.Join(td, "work"))
if err := unix.Mount("overlay", mnt, "overlay", 0, opts); err != nil {
return errors.Wrap(err, "failed to mount overlay")
}
if err := unix.Unmount(mnt, 0); err != nil {
logrus.Warnf("Failed to unmount check directory %v: %v", mnt, err)
}
return nil
}
| AkihiroSuda | 59e8ebd7f313753726c8de380f72300ed07b402e | 20eb137e0a8512475ca5c66983dac85a650ca36f | I’m not sure, I guess we can revisit this after the release of Fedora 35 | AkihiroSuda | 4,791 |
moby/moby | 42,331 | hack/dind: fix cgroup v2 evacuation with `docker run --init` |
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Fix docker-library/docker#308
**- How I did it**
Evacuate all the processes in `/sys/fs/cgroup/cgroup.procs`, not just PID 1.
**- How to verify it**
Before:
```console
$ docker run --rm --privileged --init $(docker build -q .) cat /sys/fs/cgroup/cgroup.subtree_control
sed: couldn't flush stdout: Device or resource busy
```
After:
```console
$ docker run --rm --privileged --init $(docker build -q .) cat /sys/fs/cgroup/cgroup.subtree_control
cpuset cpu io memory hugetlb pids rdma
```
| null | 2021-04-27 09:03:20+00:00 | 2021-04-30 08:08:36+00:00 | hack/dind | #!/bin/sh
set -e
# DinD: a wrapper script which allows docker to be run inside a docker container.
# Original version by Jerome Petazzoni <[email protected]>
# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/
#
# This script should be executed inside a docker container in privileged mode
# ('docker run --privileged', introduced in docker 0.6).
# Usage: dind CMD [ARG...]
# apparmor sucks and Docker needs to know that it's in a container (c) @tianon
export container=docker
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
mount -t securityfs none /sys/kernel/security || {
echo >&2 'Could not mount /sys/kernel/security.'
echo >&2 'AppArmor detection and --privileged mode might break.'
}
fi
# Mount /tmp (conditionally)
if ! mountpoint -q /tmp; then
mount -t tmpfs none /tmp
fi
# cgroup v2: enable nesting
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
# move the init process (PID 1) from the root group to the /init group,
# otherwise writing subtree_control fails with EBUSY.
mkdir -p /sys/fs/cgroup/init
echo 1 > /sys/fs/cgroup/init/cgroup.procs
# enable controllers
sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
> /sys/fs/cgroup/cgroup.subtree_control
fi
if [ $# -gt 0 ]; then
exec "$@"
fi
echo >&2 'ERROR: No command specified.'
echo >&2 'You probably want to run hack/make.sh, or maybe a shell?'
| #!/bin/sh
set -e
# DinD: a wrapper script which allows docker to be run inside a docker container.
# Original version by Jerome Petazzoni <[email protected]>
# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/
#
# This script should be executed inside a docker container in privileged mode
# ('docker run --privileged', introduced in docker 0.6).
# Usage: dind CMD [ARG...]
# apparmor sucks and Docker needs to know that it's in a container (c) @tianon
export container=docker
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
mount -t securityfs none /sys/kernel/security || {
echo >&2 'Could not mount /sys/kernel/security.'
echo >&2 'AppArmor detection and --privileged mode might break.'
}
fi
# Mount /tmp (conditionally)
if ! mountpoint -q /tmp; then
mount -t tmpfs none /tmp
fi
# cgroup v2: enable nesting
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
# move the processes from the root group to the /init group,
# otherwise writing subtree_control fails with EBUSY.
# An error during moving non-existent process (i.e., "cat") is ignored.
mkdir -p /sys/fs/cgroup/init
xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
# enable controllers
sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
> /sys/fs/cgroup/cgroup.subtree_control
fi
if [ $# -gt 0 ]; then
exec "$@"
fi
echo >&2 'ERROR: No command specified.'
echo >&2 'You probably want to run hack/make.sh, or maybe a shell?'
| AkihiroSuda | dd3275c5f92948fe2488a330cfeaa8dcfaec42d0 | 59751bb23395a137253ca0b4fb99058ecec5a569 | Nice! :smile:
Doing a little code golf, I was able to have this work successfully with something like this: :innocent:
```suggestion
xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
```
Even starting a bunch of extra shell processes, it still seems to work properly:
```console
$ docker run -it --rm --name test --init --privileged docker:dind sh
/ # sh
/ # sh
/ # sh
/ # ps faux
PID USER TIME COMMAND
1 root 0:00 /sbin/docker-init -- dockerd-entrypoint.sh sh
8 root 0:00 sh
27 root 0:00 sh
28 root 0:00 sh
29 root 0:00 sh
30 root 0:00 ps faux
/ # mkdir -p /sys/fs/cgroup/init
/ # xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
/ # sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers > /sys/fs/cgroup/cgroup.subtree_control
/ # cat /sys/fs/cgroup/init/cgroup.procs
1
8
27
28
29
40
``` | tianon | 4,792 |
moby/moby | 42,331 | hack/dind: fix cgroup v2 evacuation with `docker run --init` |
<!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Fix docker-library/docker#308
**- How I did it**
Evacuate all the processes in `/sys/fs/cgroup/cgroup.procs`, not just PID 1.
**- How to verify it**
Before:
```console
$ docker run --rm --privileged --init $(docker build -q .) cat /sys/fs/cgroup/cgroup.subtree_control
sed: couldn't flush stdout: Device or resource busy
```
After:
```console
$ docker run --rm --privileged --init $(docker build -q .) cat /sys/fs/cgroup/cgroup.subtree_control
cpuset cpu io memory hugetlb pids rdma
```
| null | 2021-04-27 09:03:20+00:00 | 2021-04-30 08:08:36+00:00 | hack/dind | #!/bin/sh
set -e
# DinD: a wrapper script which allows docker to be run inside a docker container.
# Original version by Jerome Petazzoni <[email protected]>
# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/
#
# This script should be executed inside a docker container in privileged mode
# ('docker run --privileged', introduced in docker 0.6).
# Usage: dind CMD [ARG...]
# apparmor sucks and Docker needs to know that it's in a container (c) @tianon
export container=docker
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
mount -t securityfs none /sys/kernel/security || {
echo >&2 'Could not mount /sys/kernel/security.'
echo >&2 'AppArmor detection and --privileged mode might break.'
}
fi
# Mount /tmp (conditionally)
if ! mountpoint -q /tmp; then
mount -t tmpfs none /tmp
fi
# cgroup v2: enable nesting
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
# move the init process (PID 1) from the root group to the /init group,
# otherwise writing subtree_control fails with EBUSY.
mkdir -p /sys/fs/cgroup/init
echo 1 > /sys/fs/cgroup/init/cgroup.procs
# enable controllers
sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
> /sys/fs/cgroup/cgroup.subtree_control
fi
if [ $# -gt 0 ]; then
exec "$@"
fi
echo >&2 'ERROR: No command specified.'
echo >&2 'You probably want to run hack/make.sh, or maybe a shell?'
| #!/bin/sh
set -e
# DinD: a wrapper script which allows docker to be run inside a docker container.
# Original version by Jerome Petazzoni <[email protected]>
# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/
#
# This script should be executed inside a docker container in privileged mode
# ('docker run --privileged', introduced in docker 0.6).
# Usage: dind CMD [ARG...]
# apparmor sucks and Docker needs to know that it's in a container (c) @tianon
export container=docker
if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then
mount -t securityfs none /sys/kernel/security || {
echo >&2 'Could not mount /sys/kernel/security.'
echo >&2 'AppArmor detection and --privileged mode might break.'
}
fi
# Mount /tmp (conditionally)
if ! mountpoint -q /tmp; then
mount -t tmpfs none /tmp
fi
# cgroup v2: enable nesting
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
# move the processes from the root group to the /init group,
# otherwise writing subtree_control fails with EBUSY.
# An error during moving non-existent process (i.e., "cat") is ignored.
mkdir -p /sys/fs/cgroup/init
xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
# enable controllers
sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
> /sys/fs/cgroup/cgroup.subtree_control
fi
if [ $# -gt 0 ]; then
exec "$@"
fi
echo >&2 'ERROR: No command specified.'
echo >&2 'You probably want to run hack/make.sh, or maybe a shell?'
| AkihiroSuda | dd3275c5f92948fe2488a330cfeaa8dcfaec42d0 | 59751bb23395a137253ca0b4fb99058ecec5a569 | updated, thanks | AkihiroSuda | 4,793 |
moby/moby | 42,330 | version: add RootlessKit, slirp4netns, and VPNKit version | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Add RootlesKit info to the Version API.
**- How to verify it**
```console
$ docker --context=rootless version
Client:
Version: unknown-version
API version: 1.41
Go version: go1.16.3
Git commit: unknown-commit
Built: unknown-buildtime
OS/Arch: linux/amd64
Context: rootless
Experimental: true
Server:
Engine:
Version: library-import
API version: 1.41 (minimum version 1.12)
Go version: go1.16.3
Git commit: library-import
Built: library-import
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: v1.5.0
GitCommit: 8c906ff108ac28da23f69cc7b74f8e7a470d1df0
runc:
Version: 1.0.0-rc93+dev
GitCommit: c73d766ee6e1a24517545b0a71fe78d5f78d0449
docker-init:
Version: 0.19.0
GitCommit: de40ad0
rootlesskit:
Version: 0.14.2
ApiVersion: 1.1.1
NetworkDriver: slirp4netns
PortDriver: builtin
StateDir: /tmp/rootlesskit158605632
slirp4netns:
Version: 1.1.9
GitCommit: 4e37ea557562e0d7a64dc636eff156f64927335e
```
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
version: add RootlessKit, slirp4netns, and VPNKit version
**- A picture of a cute animal (not mandatory but encouraged)**
:penguin:
| null | 2021-04-27 07:55:15+00:00 | 2022-03-22 17:27:08+00:00 | daemon/info_unix.go | //go:build !windows
// +build !windows
package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"strings"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/sysinfo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// fillPlatformInfo fills the platform related info.
func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) {
v.CgroupDriver = daemon.getCgroupDriver()
v.CgroupVersion = "1"
if sysInfo.CgroupUnified {
v.CgroupVersion = "2"
}
if v.CgroupDriver != cgroupNoneDriver {
v.MemoryLimit = sysInfo.MemoryLimit
v.SwapLimit = sysInfo.SwapLimit
v.KernelMemory = sysInfo.KernelMemory
v.KernelMemoryTCP = sysInfo.KernelMemoryTCP
v.OomKillDisable = sysInfo.OomKillDisable
v.CPUCfsPeriod = sysInfo.CPUCfs
v.CPUCfsQuota = sysInfo.CPUCfs
v.CPUShares = sysInfo.CPUShares
v.CPUSet = sysInfo.Cpuset
v.PidsLimit = sysInfo.PidsLimit
}
v.Runtimes = daemon.configStore.GetAllRuntimes()
v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()
v.InitBinary = daemon.configStore.GetInitPath()
v.RuncCommit.ID = "N/A"
v.ContainerdCommit.ID = "N/A"
v.InitCommit.ID = "N/A"
defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path
if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil {
if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err)
} else {
v.RuncCommit.ID = commit
}
} else {
logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err)
}
if rv, err := daemon.containerd.Version(context.Background()); err == nil {
v.ContainerdCommit.ID = rv.Revision
} else {
logrus.Warnf("failed to retrieve containerd version: %v", err)
}
defaultInitBinary := daemon.configStore.GetInitPath()
if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil {
if _, commit, err := parseInitVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err)
} else {
v.InitCommit.ID = commit
}
} else {
logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err)
}
// Set expected and actual commits to the same value to prevent the client
// showing that the version does not match the "expected" version/commit.
v.RuncCommit.Expected = v.RuncCommit.ID
v.ContainerdCommit.Expected = v.ContainerdCommit.ID
v.InitCommit.Expected = v.InitCommit.ID
if v.CgroupDriver == cgroupNoneDriver {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.")
} else {
v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.")
}
} else {
if !v.MemoryLimit {
v.Warnings = append(v.Warnings, "WARNING: No memory limit support")
}
if !v.SwapLimit {
v.Warnings = append(v.Warnings, "WARNING: No swap limit support")
}
if !v.KernelMemoryTCP && v.CgroupVersion == "1" {
// kernel memory is not available for cgroup v2.
// Warning is not printed on cgroup v2, because there is no action user can take.
v.Warnings = append(v.Warnings, "WARNING: No kernel memory TCP limit support")
}
if !v.OomKillDisable && v.CgroupVersion == "1" {
// oom kill disable is not available for cgroup v2.
// Warning is not printed on cgroup v2, because there is no action user can take.
v.Warnings = append(v.Warnings, "WARNING: No oom kill disable support")
}
if !v.CPUCfsQuota {
v.Warnings = append(v.Warnings, "WARNING: No cpu cfs quota support")
}
if !v.CPUCfsPeriod {
v.Warnings = append(v.Warnings, "WARNING: No cpu cfs period support")
}
if !v.CPUShares {
v.Warnings = append(v.Warnings, "WARNING: No cpu shares support")
}
if !v.CPUSet {
v.Warnings = append(v.Warnings, "WARNING: No cpuset support")
}
// TODO add fields for these options in types.Info
if !sysInfo.BlkioWeight && v.CgroupVersion == "2" {
// blkio weight is not available on cgroup v1 since kernel 5.0.
// Warning is not printed on cgroup v1, because there is no action user can take.
// On cgroup v2, blkio weight is implemented using io.weight
v.Warnings = append(v.Warnings, "WARNING: No io.weight support")
}
if !sysInfo.BlkioWeightDevice && v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.weight (per device) support")
}
if !sysInfo.BlkioReadBpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (rbps) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_bps_device support")
}
}
if !sysInfo.BlkioWriteBpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (wbps) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_bps_device support")
}
}
if !sysInfo.BlkioReadIOpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (riops) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_iops_device support")
}
}
if !sysInfo.BlkioWriteIOpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (wiops) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_iops_device support")
}
}
}
if !v.IPv4Forwarding {
v.Warnings = append(v.Warnings, "WARNING: IPv4 forwarding is disabled")
}
if !v.BridgeNfIptables {
v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-iptables is disabled")
}
if !v.BridgeNfIP6tables {
v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-ip6tables is disabled")
}
}
func (daemon *Daemon) fillPlatformVersion(v *types.Version) {
if rv, err := daemon.containerd.Version(context.Background()); err == nil {
v.Components = append(v.Components, types.ComponentVersion{
Name: "containerd",
Version: rv.Version,
Details: map[string]string{
"GitCommit": rv.Revision,
},
})
}
defaultRuntime := daemon.configStore.GetDefaultRuntimeName()
defaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path
if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil {
if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err)
} else {
v.Components = append(v.Components, types.ComponentVersion{
Name: defaultRuntime,
Version: ver,
Details: map[string]string{
"GitCommit": commit,
},
})
}
} else {
logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err)
}
defaultInitBinary := daemon.configStore.GetInitPath()
if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil {
if ver, commit, err := parseInitVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err)
} else {
v.Components = append(v.Components, types.ComponentVersion{
Name: filepath.Base(defaultInitBinary),
Version: ver,
Details: map[string]string{
"GitCommit": commit,
},
})
}
} else {
logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err)
}
}
func fillDriverWarnings(v *types.Info) {
for _, pair := range v.DriverStatus {
if pair[0] == "Data loop file" {
msg := fmt.Sprintf("WARNING: %s: usage of loopback devices is "+
"strongly discouraged for production use.\n "+
"Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.", v.Driver)
v.Warnings = append(v.Warnings, msg)
continue
}
if pair[0] == "Supports d_type" && pair[1] == "false" {
backingFs := getBackingFs(v)
msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", v.Driver, backingFs)
if backingFs == "xfs" {
msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n"
}
msg += " Running without d_type support will not be supported in future releases."
v.Warnings = append(v.Warnings, msg)
continue
}
}
}
func getBackingFs(v *types.Info) string {
for _, pair := range v.DriverStatus {
if pair[0] == "Backing Filesystem" {
return pair[1]
}
}
return ""
}
// parseInitVersion parses a Tini version string, and extracts the "version"
// and "git commit" from the output.
//
// Output example from `docker-init --version`:
//
// tini version 0.18.0 - git.fec3683
func parseInitVersion(v string) (version string, commit string, err error) {
parts := strings.Split(v, " - ")
if len(parts) >= 2 {
gitParts := strings.Split(strings.TrimSpace(parts[1]), ".")
if len(gitParts) == 2 && gitParts[0] == "git" {
commit = gitParts[1]
}
}
parts[0] = strings.TrimSpace(parts[0])
if strings.HasPrefix(parts[0], "tini version ") {
version = strings.TrimPrefix(parts[0], "tini version ")
}
if version == "" && commit == "" {
err = errors.Errorf("unknown output format: %s", v)
}
return version, commit, err
}
// parseRuntimeVersion parses the output of `[runtime] --version` and extracts the
// "name", "version" and "git commit" from the output.
//
// Output example from `runc --version`:
//
// runc version 1.0.0-rc5+dev
// commit: 69663f0bd4b60df09991c08812a60108003fa340
// spec: 1.0.0
func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) {
lines := strings.Split(strings.TrimSpace(v), "\n")
for _, line := range lines {
if strings.Contains(line, "version") {
s := strings.Split(line, "version")
runtime = strings.TrimSpace(s[0])
version = strings.TrimSpace(s[len(s)-1])
continue
}
if strings.HasPrefix(line, "commit:") {
commit = strings.TrimSpace(strings.TrimPrefix(line, "commit:"))
continue
}
}
if version == "" && commit == "" {
err = errors.Errorf("unknown output format: %s", v)
}
return runtime, version, commit, err
}
func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool {
return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate()
}
// Rootless returns true if daemon is running in rootless mode
func (daemon *Daemon) Rootless() bool {
return daemon.configStore.Rootless
}
| //go:build !windows
// +build !windows
package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"strings"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/rootless"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// fillPlatformInfo fills the platform related info.
func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) {
v.CgroupDriver = daemon.getCgroupDriver()
v.CgroupVersion = "1"
if sysInfo.CgroupUnified {
v.CgroupVersion = "2"
}
if v.CgroupDriver != cgroupNoneDriver {
v.MemoryLimit = sysInfo.MemoryLimit
v.SwapLimit = sysInfo.SwapLimit
v.KernelMemory = sysInfo.KernelMemory
v.KernelMemoryTCP = sysInfo.KernelMemoryTCP
v.OomKillDisable = sysInfo.OomKillDisable
v.CPUCfsPeriod = sysInfo.CPUCfs
v.CPUCfsQuota = sysInfo.CPUCfs
v.CPUShares = sysInfo.CPUShares
v.CPUSet = sysInfo.Cpuset
v.PidsLimit = sysInfo.PidsLimit
}
v.Runtimes = daemon.configStore.GetAllRuntimes()
v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()
v.InitBinary = daemon.configStore.GetInitPath()
v.RuncCommit.ID = "N/A"
v.ContainerdCommit.ID = "N/A"
v.InitCommit.ID = "N/A"
defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path
if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil {
if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err)
} else {
v.RuncCommit.ID = commit
}
} else {
logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err)
}
if rv, err := daemon.containerd.Version(context.Background()); err == nil {
v.ContainerdCommit.ID = rv.Revision
} else {
logrus.Warnf("failed to retrieve containerd version: %v", err)
}
defaultInitBinary := daemon.configStore.GetInitPath()
if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil {
if _, commit, err := parseInitVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err)
} else {
v.InitCommit.ID = commit
}
} else {
logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err)
}
// Set expected and actual commits to the same value to prevent the client
// showing that the version does not match the "expected" version/commit.
v.RuncCommit.Expected = v.RuncCommit.ID
v.ContainerdCommit.Expected = v.ContainerdCommit.ID
v.InitCommit.Expected = v.InitCommit.ID
if v.CgroupDriver == cgroupNoneDriver {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.")
} else {
v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.")
}
} else {
if !v.MemoryLimit {
v.Warnings = append(v.Warnings, "WARNING: No memory limit support")
}
if !v.SwapLimit {
v.Warnings = append(v.Warnings, "WARNING: No swap limit support")
}
if !v.KernelMemoryTCP && v.CgroupVersion == "1" {
// kernel memory is not available for cgroup v2.
// Warning is not printed on cgroup v2, because there is no action user can take.
v.Warnings = append(v.Warnings, "WARNING: No kernel memory TCP limit support")
}
if !v.OomKillDisable && v.CgroupVersion == "1" {
// oom kill disable is not available for cgroup v2.
// Warning is not printed on cgroup v2, because there is no action user can take.
v.Warnings = append(v.Warnings, "WARNING: No oom kill disable support")
}
if !v.CPUCfsQuota {
v.Warnings = append(v.Warnings, "WARNING: No cpu cfs quota support")
}
if !v.CPUCfsPeriod {
v.Warnings = append(v.Warnings, "WARNING: No cpu cfs period support")
}
if !v.CPUShares {
v.Warnings = append(v.Warnings, "WARNING: No cpu shares support")
}
if !v.CPUSet {
v.Warnings = append(v.Warnings, "WARNING: No cpuset support")
}
// TODO add fields for these options in types.Info
if !sysInfo.BlkioWeight && v.CgroupVersion == "2" {
// blkio weight is not available on cgroup v1 since kernel 5.0.
// Warning is not printed on cgroup v1, because there is no action user can take.
// On cgroup v2, blkio weight is implemented using io.weight
v.Warnings = append(v.Warnings, "WARNING: No io.weight support")
}
if !sysInfo.BlkioWeightDevice && v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.weight (per device) support")
}
if !sysInfo.BlkioReadBpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (rbps) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_bps_device support")
}
}
if !sysInfo.BlkioWriteBpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (wbps) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_bps_device support")
}
}
if !sysInfo.BlkioReadIOpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (riops) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_iops_device support")
}
}
if !sysInfo.BlkioWriteIOpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (wiops) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_iops_device support")
}
}
}
if !v.IPv4Forwarding {
v.Warnings = append(v.Warnings, "WARNING: IPv4 forwarding is disabled")
}
if !v.BridgeNfIptables {
v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-iptables is disabled")
}
if !v.BridgeNfIP6tables {
v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-ip6tables is disabled")
}
}
func (daemon *Daemon) fillPlatformVersion(v *types.Version) {
if rv, err := daemon.containerd.Version(context.Background()); err == nil {
v.Components = append(v.Components, types.ComponentVersion{
Name: "containerd",
Version: rv.Version,
Details: map[string]string{
"GitCommit": rv.Revision,
},
})
}
defaultRuntime := daemon.configStore.GetDefaultRuntimeName()
defaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path
if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil {
if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err)
} else {
v.Components = append(v.Components, types.ComponentVersion{
Name: defaultRuntime,
Version: ver,
Details: map[string]string{
"GitCommit": commit,
},
})
}
} else {
logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err)
}
defaultInitBinary := daemon.configStore.GetInitPath()
if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil {
if ver, commit, err := parseInitVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err)
} else {
v.Components = append(v.Components, types.ComponentVersion{
Name: filepath.Base(defaultInitBinary),
Version: ver,
Details: map[string]string{
"GitCommit": commit,
},
})
}
} else {
logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err)
}
daemon.fillRootlessVersion(v)
}
func (daemon *Daemon) fillRootlessVersion(v *types.Version) {
if !rootless.RunningWithRootlessKit() {
return
}
rlc, err := rootless.GetRootlessKitClient()
if err != nil {
logrus.Warnf("failed to create RootlessKit client: %v", err)
return
}
rlInfo, err := rlc.Info(context.TODO())
if err != nil {
logrus.Warnf("failed to retrieve RootlessKit version: %v", err)
return
}
v.Components = append(v.Components, types.ComponentVersion{
Name: "rootlesskit",
Version: rlInfo.Version,
Details: map[string]string{
"ApiVersion": rlInfo.APIVersion,
"StateDir": rlInfo.StateDir,
"NetworkDriver": rlInfo.NetworkDriver.Driver,
"PortDriver": rlInfo.PortDriver.Driver,
},
})
switch rlInfo.NetworkDriver.Driver {
case "slirp4netns":
if rv, err := exec.Command("slirp4netns", "--version").Output(); err == nil {
if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse slirp4netns version: %v", err)
} else {
v.Components = append(v.Components, types.ComponentVersion{
Name: "slirp4netns",
Version: ver,
Details: map[string]string{
"GitCommit": commit,
},
})
}
} else {
logrus.Warnf("failed to retrieve slirp4netns version: %v", err)
}
case "vpnkit":
if rv, err := exec.Command("vpnkit", "--version").Output(); err == nil {
v.Components = append(v.Components, types.ComponentVersion{
Name: "vpnkit",
Version: strings.TrimSpace(string(rv)),
})
} else {
logrus.Warnf("failed to retrieve vpnkit version: %v", err)
}
}
}
func fillDriverWarnings(v *types.Info) {
for _, pair := range v.DriverStatus {
if pair[0] == "Data loop file" {
msg := fmt.Sprintf("WARNING: %s: usage of loopback devices is "+
"strongly discouraged for production use.\n "+
"Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.", v.Driver)
v.Warnings = append(v.Warnings, msg)
continue
}
if pair[0] == "Supports d_type" && pair[1] == "false" {
backingFs := getBackingFs(v)
msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", v.Driver, backingFs)
if backingFs == "xfs" {
msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n"
}
msg += " Running without d_type support will not be supported in future releases."
v.Warnings = append(v.Warnings, msg)
continue
}
}
}
func getBackingFs(v *types.Info) string {
for _, pair := range v.DriverStatus {
if pair[0] == "Backing Filesystem" {
return pair[1]
}
}
return ""
}
// parseInitVersion parses a Tini version string, and extracts the "version"
// and "git commit" from the output.
//
// Output example from `docker-init --version`:
//
// tini version 0.18.0 - git.fec3683
func parseInitVersion(v string) (version string, commit string, err error) {
parts := strings.Split(v, " - ")
if len(parts) >= 2 {
gitParts := strings.Split(strings.TrimSpace(parts[1]), ".")
if len(gitParts) == 2 && gitParts[0] == "git" {
commit = gitParts[1]
}
}
parts[0] = strings.TrimSpace(parts[0])
if strings.HasPrefix(parts[0], "tini version ") {
version = strings.TrimPrefix(parts[0], "tini version ")
}
if version == "" && commit == "" {
err = errors.Errorf("unknown output format: %s", v)
}
return version, commit, err
}
// parseRuntimeVersion parses the output of `[runtime] --version` and extracts the
// "name", "version" and "git commit" from the output.
//
// Output example from `runc --version`:
//
// runc version 1.0.0-rc5+dev
// commit: 69663f0bd4b60df09991c08812a60108003fa340
// spec: 1.0.0
func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) {
lines := strings.Split(strings.TrimSpace(v), "\n")
for _, line := range lines {
if strings.Contains(line, "version") {
s := strings.Split(line, "version")
runtime = strings.TrimSpace(s[0])
version = strings.TrimSpace(s[len(s)-1])
continue
}
if strings.HasPrefix(line, "commit:") {
commit = strings.TrimSpace(strings.TrimPrefix(line, "commit:"))
continue
}
}
if version == "" && commit == "" {
err = errors.Errorf("unknown output format: %s", v)
}
return runtime, version, commit, err
}
func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool {
return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate()
}
// Rootless returns true if daemon is running in rootless mode
func (daemon *Daemon) Rootless() bool {
return daemon.configStore.Rootless
}
| AkihiroSuda | 301eba7fa4871862fbc00753c4accfc535e0eedf | 6b9b445af6c7908992632cff6c30cbf6a4c617ac | Do you think we should use a `sync.Once` for these? (probably ok for a follow-up) | thaJeztah | 4,794 |
moby/moby | 42,330 | version: add RootlessKit, slirp4netns, and VPNKit version | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Add RootlesKit info to the Version API.
**- How to verify it**
```console
$ docker --context=rootless version
Client:
Version: unknown-version
API version: 1.41
Go version: go1.16.3
Git commit: unknown-commit
Built: unknown-buildtime
OS/Arch: linux/amd64
Context: rootless
Experimental: true
Server:
Engine:
Version: library-import
API version: 1.41 (minimum version 1.12)
Go version: go1.16.3
Git commit: library-import
Built: library-import
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: v1.5.0
GitCommit: 8c906ff108ac28da23f69cc7b74f8e7a470d1df0
runc:
Version: 1.0.0-rc93+dev
GitCommit: c73d766ee6e1a24517545b0a71fe78d5f78d0449
docker-init:
Version: 0.19.0
GitCommit: de40ad0
rootlesskit:
Version: 0.14.2
ApiVersion: 1.1.1
NetworkDriver: slirp4netns
PortDriver: builtin
StateDir: /tmp/rootlesskit158605632
slirp4netns:
Version: 1.1.9
GitCommit: 4e37ea557562e0d7a64dc636eff156f64927335e
```
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
version: add RootlessKit, slirp4netns, and VPNKit version
**- A picture of a cute animal (not mandatory but encouraged)**
:penguin:
| null | 2021-04-27 07:55:15+00:00 | 2022-03-22 17:27:08+00:00 | daemon/info_unix.go | //go:build !windows
// +build !windows
package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"strings"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/sysinfo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// fillPlatformInfo fills the platform related info.
func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) {
v.CgroupDriver = daemon.getCgroupDriver()
v.CgroupVersion = "1"
if sysInfo.CgroupUnified {
v.CgroupVersion = "2"
}
if v.CgroupDriver != cgroupNoneDriver {
v.MemoryLimit = sysInfo.MemoryLimit
v.SwapLimit = sysInfo.SwapLimit
v.KernelMemory = sysInfo.KernelMemory
v.KernelMemoryTCP = sysInfo.KernelMemoryTCP
v.OomKillDisable = sysInfo.OomKillDisable
v.CPUCfsPeriod = sysInfo.CPUCfs
v.CPUCfsQuota = sysInfo.CPUCfs
v.CPUShares = sysInfo.CPUShares
v.CPUSet = sysInfo.Cpuset
v.PidsLimit = sysInfo.PidsLimit
}
v.Runtimes = daemon.configStore.GetAllRuntimes()
v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()
v.InitBinary = daemon.configStore.GetInitPath()
v.RuncCommit.ID = "N/A"
v.ContainerdCommit.ID = "N/A"
v.InitCommit.ID = "N/A"
defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path
if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil {
if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err)
} else {
v.RuncCommit.ID = commit
}
} else {
logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err)
}
if rv, err := daemon.containerd.Version(context.Background()); err == nil {
v.ContainerdCommit.ID = rv.Revision
} else {
logrus.Warnf("failed to retrieve containerd version: %v", err)
}
defaultInitBinary := daemon.configStore.GetInitPath()
if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil {
if _, commit, err := parseInitVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err)
} else {
v.InitCommit.ID = commit
}
} else {
logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err)
}
// Set expected and actual commits to the same value to prevent the client
// showing that the version does not match the "expected" version/commit.
v.RuncCommit.Expected = v.RuncCommit.ID
v.ContainerdCommit.Expected = v.ContainerdCommit.ID
v.InitCommit.Expected = v.InitCommit.ID
if v.CgroupDriver == cgroupNoneDriver {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.")
} else {
v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.")
}
} else {
if !v.MemoryLimit {
v.Warnings = append(v.Warnings, "WARNING: No memory limit support")
}
if !v.SwapLimit {
v.Warnings = append(v.Warnings, "WARNING: No swap limit support")
}
if !v.KernelMemoryTCP && v.CgroupVersion == "1" {
// kernel memory is not available for cgroup v2.
// Warning is not printed on cgroup v2, because there is no action user can take.
v.Warnings = append(v.Warnings, "WARNING: No kernel memory TCP limit support")
}
if !v.OomKillDisable && v.CgroupVersion == "1" {
// oom kill disable is not available for cgroup v2.
// Warning is not printed on cgroup v2, because there is no action user can take.
v.Warnings = append(v.Warnings, "WARNING: No oom kill disable support")
}
if !v.CPUCfsQuota {
v.Warnings = append(v.Warnings, "WARNING: No cpu cfs quota support")
}
if !v.CPUCfsPeriod {
v.Warnings = append(v.Warnings, "WARNING: No cpu cfs period support")
}
if !v.CPUShares {
v.Warnings = append(v.Warnings, "WARNING: No cpu shares support")
}
if !v.CPUSet {
v.Warnings = append(v.Warnings, "WARNING: No cpuset support")
}
// TODO add fields for these options in types.Info
if !sysInfo.BlkioWeight && v.CgroupVersion == "2" {
// blkio weight is not available on cgroup v1 since kernel 5.0.
// Warning is not printed on cgroup v1, because there is no action user can take.
// On cgroup v2, blkio weight is implemented using io.weight
v.Warnings = append(v.Warnings, "WARNING: No io.weight support")
}
if !sysInfo.BlkioWeightDevice && v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.weight (per device) support")
}
if !sysInfo.BlkioReadBpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (rbps) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_bps_device support")
}
}
if !sysInfo.BlkioWriteBpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (wbps) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_bps_device support")
}
}
if !sysInfo.BlkioReadIOpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (riops) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_iops_device support")
}
}
if !sysInfo.BlkioWriteIOpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (wiops) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_iops_device support")
}
}
}
if !v.IPv4Forwarding {
v.Warnings = append(v.Warnings, "WARNING: IPv4 forwarding is disabled")
}
if !v.BridgeNfIptables {
v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-iptables is disabled")
}
if !v.BridgeNfIP6tables {
v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-ip6tables is disabled")
}
}
func (daemon *Daemon) fillPlatformVersion(v *types.Version) {
if rv, err := daemon.containerd.Version(context.Background()); err == nil {
v.Components = append(v.Components, types.ComponentVersion{
Name: "containerd",
Version: rv.Version,
Details: map[string]string{
"GitCommit": rv.Revision,
},
})
}
defaultRuntime := daemon.configStore.GetDefaultRuntimeName()
defaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path
if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil {
if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err)
} else {
v.Components = append(v.Components, types.ComponentVersion{
Name: defaultRuntime,
Version: ver,
Details: map[string]string{
"GitCommit": commit,
},
})
}
} else {
logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err)
}
defaultInitBinary := daemon.configStore.GetInitPath()
if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil {
if ver, commit, err := parseInitVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err)
} else {
v.Components = append(v.Components, types.ComponentVersion{
Name: filepath.Base(defaultInitBinary),
Version: ver,
Details: map[string]string{
"GitCommit": commit,
},
})
}
} else {
logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err)
}
}
func fillDriverWarnings(v *types.Info) {
for _, pair := range v.DriverStatus {
if pair[0] == "Data loop file" {
msg := fmt.Sprintf("WARNING: %s: usage of loopback devices is "+
"strongly discouraged for production use.\n "+
"Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.", v.Driver)
v.Warnings = append(v.Warnings, msg)
continue
}
if pair[0] == "Supports d_type" && pair[1] == "false" {
backingFs := getBackingFs(v)
msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", v.Driver, backingFs)
if backingFs == "xfs" {
msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n"
}
msg += " Running without d_type support will not be supported in future releases."
v.Warnings = append(v.Warnings, msg)
continue
}
}
}
func getBackingFs(v *types.Info) string {
for _, pair := range v.DriverStatus {
if pair[0] == "Backing Filesystem" {
return pair[1]
}
}
return ""
}
// parseInitVersion parses a Tini version string, and extracts the "version"
// and "git commit" from the output.
//
// Output example from `docker-init --version`:
//
// tini version 0.18.0 - git.fec3683
func parseInitVersion(v string) (version string, commit string, err error) {
parts := strings.Split(v, " - ")
if len(parts) >= 2 {
gitParts := strings.Split(strings.TrimSpace(parts[1]), ".")
if len(gitParts) == 2 && gitParts[0] == "git" {
commit = gitParts[1]
}
}
parts[0] = strings.TrimSpace(parts[0])
if strings.HasPrefix(parts[0], "tini version ") {
version = strings.TrimPrefix(parts[0], "tini version ")
}
if version == "" && commit == "" {
err = errors.Errorf("unknown output format: %s", v)
}
return version, commit, err
}
// parseRuntimeVersion parses the output of `[runtime] --version` and extracts the
// "name", "version" and "git commit" from the output.
//
// Output example from `runc --version`:
//
// runc version 1.0.0-rc5+dev
// commit: 69663f0bd4b60df09991c08812a60108003fa340
// spec: 1.0.0
func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) {
lines := strings.Split(strings.TrimSpace(v), "\n")
for _, line := range lines {
if strings.Contains(line, "version") {
s := strings.Split(line, "version")
runtime = strings.TrimSpace(s[0])
version = strings.TrimSpace(s[len(s)-1])
continue
}
if strings.HasPrefix(line, "commit:") {
commit = strings.TrimSpace(strings.TrimPrefix(line, "commit:"))
continue
}
}
if version == "" && commit == "" {
err = errors.Errorf("unknown output format: %s", v)
}
return runtime, version, commit, err
}
func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool {
return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate()
}
// Rootless returns true if daemon is running in rootless mode
func (daemon *Daemon) Rootless() bool {
return daemon.configStore.Rootless
}
| //go:build !windows
// +build !windows
package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"os/exec"
"path/filepath"
"strings"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/rootless"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// fillPlatformInfo fills the platform related info.
func (daemon *Daemon) fillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) {
v.CgroupDriver = daemon.getCgroupDriver()
v.CgroupVersion = "1"
if sysInfo.CgroupUnified {
v.CgroupVersion = "2"
}
if v.CgroupDriver != cgroupNoneDriver {
v.MemoryLimit = sysInfo.MemoryLimit
v.SwapLimit = sysInfo.SwapLimit
v.KernelMemory = sysInfo.KernelMemory
v.KernelMemoryTCP = sysInfo.KernelMemoryTCP
v.OomKillDisable = sysInfo.OomKillDisable
v.CPUCfsPeriod = sysInfo.CPUCfs
v.CPUCfsQuota = sysInfo.CPUCfs
v.CPUShares = sysInfo.CPUShares
v.CPUSet = sysInfo.Cpuset
v.PidsLimit = sysInfo.PidsLimit
}
v.Runtimes = daemon.configStore.GetAllRuntimes()
v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()
v.InitBinary = daemon.configStore.GetInitPath()
v.RuncCommit.ID = "N/A"
v.ContainerdCommit.ID = "N/A"
v.InitCommit.ID = "N/A"
defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path
if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil {
if _, _, commit, err := parseRuntimeVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err)
} else {
v.RuncCommit.ID = commit
}
} else {
logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err)
}
if rv, err := daemon.containerd.Version(context.Background()); err == nil {
v.ContainerdCommit.ID = rv.Revision
} else {
logrus.Warnf("failed to retrieve containerd version: %v", err)
}
defaultInitBinary := daemon.configStore.GetInitPath()
if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil {
if _, commit, err := parseInitVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err)
} else {
v.InitCommit.ID = commit
}
} else {
logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err)
}
// Set expected and actual commits to the same value to prevent the client
// showing that the version does not match the "expected" version/commit.
v.RuncCommit.Expected = v.RuncCommit.ID
v.ContainerdCommit.Expected = v.ContainerdCommit.ID
v.InitCommit.Expected = v.InitCommit.ID
if v.CgroupDriver == cgroupNoneDriver {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. Systemd is required to enable cgroups in rootless-mode.")
} else {
v.Warnings = append(v.Warnings, "WARNING: Running in rootless-mode without cgroups. To enable cgroups in rootless-mode, you need to boot the system in cgroup v2 mode.")
}
} else {
if !v.MemoryLimit {
v.Warnings = append(v.Warnings, "WARNING: No memory limit support")
}
if !v.SwapLimit {
v.Warnings = append(v.Warnings, "WARNING: No swap limit support")
}
if !v.KernelMemoryTCP && v.CgroupVersion == "1" {
// kernel memory is not available for cgroup v2.
// Warning is not printed on cgroup v2, because there is no action user can take.
v.Warnings = append(v.Warnings, "WARNING: No kernel memory TCP limit support")
}
if !v.OomKillDisable && v.CgroupVersion == "1" {
// oom kill disable is not available for cgroup v2.
// Warning is not printed on cgroup v2, because there is no action user can take.
v.Warnings = append(v.Warnings, "WARNING: No oom kill disable support")
}
if !v.CPUCfsQuota {
v.Warnings = append(v.Warnings, "WARNING: No cpu cfs quota support")
}
if !v.CPUCfsPeriod {
v.Warnings = append(v.Warnings, "WARNING: No cpu cfs period support")
}
if !v.CPUShares {
v.Warnings = append(v.Warnings, "WARNING: No cpu shares support")
}
if !v.CPUSet {
v.Warnings = append(v.Warnings, "WARNING: No cpuset support")
}
// TODO add fields for these options in types.Info
if !sysInfo.BlkioWeight && v.CgroupVersion == "2" {
// blkio weight is not available on cgroup v1 since kernel 5.0.
// Warning is not printed on cgroup v1, because there is no action user can take.
// On cgroup v2, blkio weight is implemented using io.weight
v.Warnings = append(v.Warnings, "WARNING: No io.weight support")
}
if !sysInfo.BlkioWeightDevice && v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.weight (per device) support")
}
if !sysInfo.BlkioReadBpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (rbps) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_bps_device support")
}
}
if !sysInfo.BlkioWriteBpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (wbps) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_bps_device support")
}
}
if !sysInfo.BlkioReadIOpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (riops) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.read_iops_device support")
}
}
if !sysInfo.BlkioWriteIOpsDevice {
if v.CgroupVersion == "2" {
v.Warnings = append(v.Warnings, "WARNING: No io.max (wiops) support")
} else {
v.Warnings = append(v.Warnings, "WARNING: No blkio throttle.write_iops_device support")
}
}
}
if !v.IPv4Forwarding {
v.Warnings = append(v.Warnings, "WARNING: IPv4 forwarding is disabled")
}
if !v.BridgeNfIptables {
v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-iptables is disabled")
}
if !v.BridgeNfIP6tables {
v.Warnings = append(v.Warnings, "WARNING: bridge-nf-call-ip6tables is disabled")
}
}
func (daemon *Daemon) fillPlatformVersion(v *types.Version) {
if rv, err := daemon.containerd.Version(context.Background()); err == nil {
v.Components = append(v.Components, types.ComponentVersion{
Name: "containerd",
Version: rv.Version,
Details: map[string]string{
"GitCommit": rv.Revision,
},
})
}
defaultRuntime := daemon.configStore.GetDefaultRuntimeName()
defaultRuntimeBinary := daemon.configStore.GetRuntime(defaultRuntime).Path
if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil {
if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %v", defaultRuntimeBinary, err)
} else {
v.Components = append(v.Components, types.ComponentVersion{
Name: defaultRuntime,
Version: ver,
Details: map[string]string{
"GitCommit": commit,
},
})
}
} else {
logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err)
}
defaultInitBinary := daemon.configStore.GetInitPath()
if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil {
if ver, commit, err := parseInitVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse %s version: %s", defaultInitBinary, err)
} else {
v.Components = append(v.Components, types.ComponentVersion{
Name: filepath.Base(defaultInitBinary),
Version: ver,
Details: map[string]string{
"GitCommit": commit,
},
})
}
} else {
logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err)
}
daemon.fillRootlessVersion(v)
}
func (daemon *Daemon) fillRootlessVersion(v *types.Version) {
if !rootless.RunningWithRootlessKit() {
return
}
rlc, err := rootless.GetRootlessKitClient()
if err != nil {
logrus.Warnf("failed to create RootlessKit client: %v", err)
return
}
rlInfo, err := rlc.Info(context.TODO())
if err != nil {
logrus.Warnf("failed to retrieve RootlessKit version: %v", err)
return
}
v.Components = append(v.Components, types.ComponentVersion{
Name: "rootlesskit",
Version: rlInfo.Version,
Details: map[string]string{
"ApiVersion": rlInfo.APIVersion,
"StateDir": rlInfo.StateDir,
"NetworkDriver": rlInfo.NetworkDriver.Driver,
"PortDriver": rlInfo.PortDriver.Driver,
},
})
switch rlInfo.NetworkDriver.Driver {
case "slirp4netns":
if rv, err := exec.Command("slirp4netns", "--version").Output(); err == nil {
if _, ver, commit, err := parseRuntimeVersion(string(rv)); err != nil {
logrus.Warnf("failed to parse slirp4netns version: %v", err)
} else {
v.Components = append(v.Components, types.ComponentVersion{
Name: "slirp4netns",
Version: ver,
Details: map[string]string{
"GitCommit": commit,
},
})
}
} else {
logrus.Warnf("failed to retrieve slirp4netns version: %v", err)
}
case "vpnkit":
if rv, err := exec.Command("vpnkit", "--version").Output(); err == nil {
v.Components = append(v.Components, types.ComponentVersion{
Name: "vpnkit",
Version: strings.TrimSpace(string(rv)),
})
} else {
logrus.Warnf("failed to retrieve vpnkit version: %v", err)
}
}
}
func fillDriverWarnings(v *types.Info) {
for _, pair := range v.DriverStatus {
if pair[0] == "Data loop file" {
msg := fmt.Sprintf("WARNING: %s: usage of loopback devices is "+
"strongly discouraged for production use.\n "+
"Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.", v.Driver)
v.Warnings = append(v.Warnings, msg)
continue
}
if pair[0] == "Supports d_type" && pair[1] == "false" {
backingFs := getBackingFs(v)
msg := fmt.Sprintf("WARNING: %s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.\n", v.Driver, backingFs)
if backingFs == "xfs" {
msg += " Reformat the filesystem with ftype=1 to enable d_type support.\n"
}
msg += " Running without d_type support will not be supported in future releases."
v.Warnings = append(v.Warnings, msg)
continue
}
}
}
func getBackingFs(v *types.Info) string {
for _, pair := range v.DriverStatus {
if pair[0] == "Backing Filesystem" {
return pair[1]
}
}
return ""
}
// parseInitVersion parses a Tini version string, and extracts the "version"
// and "git commit" from the output.
//
// Output example from `docker-init --version`:
//
// tini version 0.18.0 - git.fec3683
func parseInitVersion(v string) (version string, commit string, err error) {
parts := strings.Split(v, " - ")
if len(parts) >= 2 {
gitParts := strings.Split(strings.TrimSpace(parts[1]), ".")
if len(gitParts) == 2 && gitParts[0] == "git" {
commit = gitParts[1]
}
}
parts[0] = strings.TrimSpace(parts[0])
if strings.HasPrefix(parts[0], "tini version ") {
version = strings.TrimPrefix(parts[0], "tini version ")
}
if version == "" && commit == "" {
err = errors.Errorf("unknown output format: %s", v)
}
return version, commit, err
}
// parseRuntimeVersion parses the output of `[runtime] --version` and extracts the
// "name", "version" and "git commit" from the output.
//
// Output example from `runc --version`:
//
// runc version 1.0.0-rc5+dev
// commit: 69663f0bd4b60df09991c08812a60108003fa340
// spec: 1.0.0
func parseRuntimeVersion(v string) (runtime string, version string, commit string, err error) {
lines := strings.Split(strings.TrimSpace(v), "\n")
for _, line := range lines {
if strings.Contains(line, "version") {
s := strings.Split(line, "version")
runtime = strings.TrimSpace(s[0])
version = strings.TrimSpace(s[len(s)-1])
continue
}
if strings.HasPrefix(line, "commit:") {
commit = strings.TrimSpace(strings.TrimPrefix(line, "commit:"))
continue
}
}
if version == "" && commit == "" {
err = errors.Errorf("unknown output format: %s", v)
}
return runtime, version, commit, err
}
func (daemon *Daemon) cgroupNamespacesEnabled(sysInfo *sysinfo.SysInfo) bool {
return sysInfo.CgroupNamespaces && containertypes.CgroupnsMode(daemon.configStore.CgroupNamespaceMode).IsPrivate()
}
// Rootless returns true if daemon is running in rootless mode
func (daemon *Daemon) Rootless() bool {
return daemon.configStore.Rootless
}
| AkihiroSuda | 301eba7fa4871862fbc00753c4accfc535e0eedf | 6b9b445af6c7908992632cff6c30cbf6a4c617ac | RunningWithRootlessKit already has sync.Once: https://github.com/moby/moby/blob/33c332ad1982019cb9250ed0163ee2ed4955f596/rootless/rootless.go#L20
Calling the RootlessKit API should not be `Once`.
| AkihiroSuda | 4,795 |
moby/moby | 42,330 | version: add RootlessKit, slirp4netns, and VPNKit version | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Add RootlesKit info to the Version API.
**- How to verify it**
```console
$ docker --context=rootless version
Client:
Version: unknown-version
API version: 1.41
Go version: go1.16.3
Git commit: unknown-commit
Built: unknown-buildtime
OS/Arch: linux/amd64
Context: rootless
Experimental: true
Server:
Engine:
Version: library-import
API version: 1.41 (minimum version 1.12)
Go version: go1.16.3
Git commit: library-import
Built: library-import
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: v1.5.0
GitCommit: 8c906ff108ac28da23f69cc7b74f8e7a470d1df0
runc:
Version: 1.0.0-rc93+dev
GitCommit: c73d766ee6e1a24517545b0a71fe78d5f78d0449
docker-init:
Version: 0.19.0
GitCommit: de40ad0
rootlesskit:
Version: 0.14.2
ApiVersion: 1.1.1
NetworkDriver: slirp4netns
PortDriver: builtin
StateDir: /tmp/rootlesskit158605632
slirp4netns:
Version: 1.1.9
GitCommit: 4e37ea557562e0d7a64dc636eff156f64927335e
```
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
version: add RootlessKit, slirp4netns, and VPNKit version
**- A picture of a cute animal (not mandatory but encouraged)**
:penguin:
| null | 2021-04-27 07:55:15+00:00 | 2022-03-22 17:27:08+00:00 | rootless/rootless.go | package rootless // import "github.com/docker/docker/rootless"
import (
"os"
"sync"
)
const (
// RootlessKitDockerProxyBinary is the binary name of rootlesskit-docker-proxy
RootlessKitDockerProxyBinary = "rootlesskit-docker-proxy"
)
var (
runningWithRootlessKit bool
runningWithRootlessKitOnce sync.Once
)
// RunningWithRootlessKit returns true if running under RootlessKit namespaces.
func RunningWithRootlessKit() bool {
runningWithRootlessKitOnce.Do(func() {
u := os.Getenv("ROOTLESSKIT_STATE_DIR")
runningWithRootlessKit = u != ""
})
return runningWithRootlessKit
}
| package rootless // import "github.com/docker/docker/rootless"
import (
"os"
"path/filepath"
"sync"
"github.com/pkg/errors"
"github.com/rootless-containers/rootlesskit/pkg/api/client"
)
const (
// RootlessKitDockerProxyBinary is the binary name of rootlesskit-docker-proxy
RootlessKitDockerProxyBinary = "rootlesskit-docker-proxy"
)
var (
runningWithRootlessKit bool
runningWithRootlessKitOnce sync.Once
)
// RunningWithRootlessKit returns true if running under RootlessKit namespaces.
func RunningWithRootlessKit() bool {
runningWithRootlessKitOnce.Do(func() {
u := os.Getenv("ROOTLESSKIT_STATE_DIR")
runningWithRootlessKit = u != ""
})
return runningWithRootlessKit
}
// GetRootlessKitClient returns RootlessKit client
func GetRootlessKitClient() (client.Client, error) {
stateDir := os.Getenv("ROOTLESSKIT_STATE_DIR")
if stateDir == "" {
return nil, errors.New("environment variable `ROOTLESSKIT_STATE_DIR` is not set")
}
apiSock := filepath.Join(stateDir, "api.sock")
return client.New(apiSock)
}
| AkihiroSuda | 301eba7fa4871862fbc00753c4accfc535e0eedf | 6b9b445af6c7908992632cff6c30cbf6a4c617ac | This feels like we're making an impromptu API client; could we have a basic API client in the rootlesskit repository that we can vendor and consume? | thaJeztah | 4,796 |
moby/moby | 42,330 | version: add RootlessKit, slirp4netns, and VPNKit version | <!--
Please make sure you've read and understood our contributing guidelines;
https://github.com/moby/moby/blob/master/CONTRIBUTING.md
** Make sure all your commits include a signature generated with `git commit -s` **
For additional information on our contributing process, read our contributing
guide https://docs.docker.com/opensource/code/
If this is a bug fix, make sure your description includes "fixes #xxxx", or
"closes #xxxx"
Please provide the following information:
-->
**- What I did**
Add RootlesKit info to the Version API.
**- How to verify it**
```console
$ docker --context=rootless version
Client:
Version: unknown-version
API version: 1.41
Go version: go1.16.3
Git commit: unknown-commit
Built: unknown-buildtime
OS/Arch: linux/amd64
Context: rootless
Experimental: true
Server:
Engine:
Version: library-import
API version: 1.41 (minimum version 1.12)
Go version: go1.16.3
Git commit: library-import
Built: library-import
OS/Arch: linux/amd64
Experimental: false
containerd:
Version: v1.5.0
GitCommit: 8c906ff108ac28da23f69cc7b74f8e7a470d1df0
runc:
Version: 1.0.0-rc93+dev
GitCommit: c73d766ee6e1a24517545b0a71fe78d5f78d0449
docker-init:
Version: 0.19.0
GitCommit: de40ad0
rootlesskit:
Version: 0.14.2
ApiVersion: 1.1.1
NetworkDriver: slirp4netns
PortDriver: builtin
StateDir: /tmp/rootlesskit158605632
slirp4netns:
Version: 1.1.9
GitCommit: 4e37ea557562e0d7a64dc636eff156f64927335e
```
**- Description for the changelog**
<!--
Write a short (one line) summary that describes the changes in this
pull request for inclusion in the changelog:
-->
version: add RootlessKit, slirp4netns, and VPNKit version
**- A picture of a cute animal (not mandatory but encouraged)**
:penguin:
| null | 2021-04-27 07:55:15+00:00 | 2022-03-22 17:27:08+00:00 | rootless/rootless.go | package rootless // import "github.com/docker/docker/rootless"
import (
"os"
"sync"
)
const (
// RootlessKitDockerProxyBinary is the binary name of rootlesskit-docker-proxy
RootlessKitDockerProxyBinary = "rootlesskit-docker-proxy"
)
var (
runningWithRootlessKit bool
runningWithRootlessKitOnce sync.Once
)
// RunningWithRootlessKit returns true if running under RootlessKit namespaces.
func RunningWithRootlessKit() bool {
runningWithRootlessKitOnce.Do(func() {
u := os.Getenv("ROOTLESSKIT_STATE_DIR")
runningWithRootlessKit = u != ""
})
return runningWithRootlessKit
}
| package rootless // import "github.com/docker/docker/rootless"
import (
"os"
"path/filepath"
"sync"
"github.com/pkg/errors"
"github.com/rootless-containers/rootlesskit/pkg/api/client"
)
const (
// RootlessKitDockerProxyBinary is the binary name of rootlesskit-docker-proxy
RootlessKitDockerProxyBinary = "rootlesskit-docker-proxy"
)
var (
runningWithRootlessKit bool
runningWithRootlessKitOnce sync.Once
)
// RunningWithRootlessKit returns true if running under RootlessKit namespaces.
func RunningWithRootlessKit() bool {
runningWithRootlessKitOnce.Do(func() {
u := os.Getenv("ROOTLESSKIT_STATE_DIR")
runningWithRootlessKit = u != ""
})
return runningWithRootlessKit
}
// GetRootlessKitClient returns RootlessKit client
func GetRootlessKitClient() (client.Client, error) {
stateDir := os.Getenv("ROOTLESSKIT_STATE_DIR")
if stateDir == "" {
return nil, errors.New("environment variable `ROOTLESSKIT_STATE_DIR` is not set")
}
apiSock := filepath.Join(stateDir, "api.sock")
return client.New(apiSock)
}
| AkihiroSuda | 301eba7fa4871862fbc00753c4accfc535e0eedf | 6b9b445af6c7908992632cff6c30cbf6a4c617ac | I realize we may not do this currently for the other binaries, but the `/info` endpoint is already heavy on doing a _lot_ of things; would it make sense to use a `sync.Once`?
Or are there situations where the rootlesskit binary can be updated _without_ the daemon itself being restarted? (in that case we _could_ use a "dirty" trick to reset the `sync.Once` on SIGHUP (e.g.); I did that for a test in the CLI in;
https://github.com/docker/cli/blob/a32cd16160f1b41c1c4ae7bee4dac929d1484e59/cli/config/config.go#L27
```go
var (
initConfigDir = new(sync.Once)
```
https://github.com/docker/cli/blob/a32cd16160f1b41c1c4ae7bee4dac929d1484e59/cli/config/config.go#L45-L50
```go
// resetConfigDir is used in testing to reset the "configDir" package variable
// and its sync.Once to force re-lookup between tests.
func resetConfigDir() {
configDir = ""
initConfigDir = new(sync.Once)
}
``` | thaJeztah | 4,797 |
Subsets and Splits