Merge branch 'master' into registry-mirror

This commit is contained in:
Tejal Desai 2020-01-29 11:42:06 -08:00 committed by GitHub
commit 47ab3fe997
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1221 changed files with 110162 additions and 38765 deletions

View File

@ -1,10 +1,21 @@
language: go
os: linux
dist: bionic
env:
- IMAGE_REPO=localhost:5000
go:
- "1.13.3"
go_import_path: github.com/GoogleContainerTools/kaniko
before_install:
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
- sudo apt-get update
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
- curl -LO https://storage.googleapis.com/container-diff/latest/container-diff-linux-amd64 && chmod +x container-diff-linux-amd64 && sudo mv container-diff-linux-amd64 /usr/local/bin/container-diff
- docker run -d -p 5000:5000 --restart always --name registry registry:2
- ./integration/replace-gcr-with-local-registry.sh integration/dockerfiles
script:
- make test
- ./integration-test.sh --uploadToGCS=false
- make images

View File

@ -18,10 +18,11 @@ VERSION_MINOR ?= 16
VERSION_BUILD ?= 0
VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
VERSION_PACKAGE = $(REPOPATH/pkg/version)
SHELL := /bin/bash
GOOS ?= $(shell go env GOOS)
GOARCH = amd64
GOARCH ?= $(shell go env GOARCH)
ORG := github.com/GoogleContainerTools
PROJECT := kaniko
REGISTRY?=gcr.io/kaniko-project
@ -38,7 +39,7 @@ GO_LDFLAGS += '
EXECUTOR_PACKAGE = $(REPOPATH)/cmd/executor
WARMER_PACKAGE = $(REPOPATH)/cmd/warmer
KANIKO_PROJECT = $(REPOPATH)/kaniko
BUILD_ARG ?=
BUILD_ARG ?=
# Force using Go Modules and always read the dependencies from
# the `vendor` folder.
@ -62,9 +63,9 @@ integration-test:
.PHONY: images
images:
docker build ${BUILD_ARG} -t $(REGISTRY)/executor:latest -f deploy/Dockerfile .
docker build ${BUILD_ARG} -t $(REGISTRY)/executor:debug -f deploy/Dockerfile_debug .
docker build ${BUILD_ARG} -t $(REGISTRY)/warmer:latest -f deploy/Dockerfile_warmer .
docker build ${BUILD_ARG} --build-arg=GOARCH=$(GOARCH) -t $(REGISTRY)/executor:latest -f deploy/Dockerfile .
docker build ${BUILD_ARG} --build-arg=GOARCH=$(GOARCH) -t $(REGISTRY)/executor:debug -f deploy/Dockerfile_debug .
docker build ${BUILD_ARG} --build-arg=GOARCH=$(GOARCH) -t $(REGISTRY)/warmer:latest -f deploy/Dockerfile_warmer .
.PHONY: push
push:

View File

@ -68,6 +68,7 @@ _If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPME
- [--target](#--target)
- [--tarPath](#--tarpath)
- [--verbosity](#--verbosity)
- [--whitelist-var-run](#--whitelist-var-run)
- [Debug Image](#debug-image)
- [Security](#security)
- [Comparison with Other Tools](#comparison-with-other-tools)
@ -517,6 +518,10 @@ You need to set `--destination` as well (for example `--destination=image`).
Set this flag as `--verbosity=<panic|fatal|error|warn|info|debug>` to set the logging level. Defaults to `info`.
#### --whitelist-var-run
Ignore /var/run when taking image snapshot. Set it to false to preserve /var/run/* in destination image. (Default true).
### Debug Image
The kaniko executor image is based on scratch and doesn't contain a shell.

View File

@ -73,6 +73,8 @@ var RootCmd = &cobra.Command{
if len(opts.Destinations) == 0 && opts.ImageNameDigestFile != "" {
return errors.New("You must provide --destination if setting ImageNameDigestFile")
}
// Update whitelisted paths
util.UpdateWhitelist(opts.WhitelistVarRun)
}
return nil
},
@ -145,6 +147,7 @@ func addKanikoOptionsFlags() {
RootCmd.PersistentFlags().VarP(&opts.InsecureRegistries, "insecure-registry", "", "Insecure registry using plain HTTP to push and pull. Set it repeatedly for multiple registries.")
RootCmd.PersistentFlags().VarP(&opts.SkipTLSVerifyRegistries, "skip-tls-verify-registry", "", "Insecure registry ignoring TLS verify to push and pull. Set it repeatedly for multiple registries.")
RootCmd.PersistentFlags().StringVarP(&opts.RegistryMirror, "registry-mirror", "", "", "Registry mirror to use has pull-through cache instead of docker.io.")
RootCmd.PersistentFlags().BoolVarP(&opts.WhitelistVarRun, "whitelist-var-run", "", true, "Ignore /var/run directory when taking image snapshot. Set it to false to preserve /var/run/ in destination image. (Default true).")
}
// addHiddenFlags marks certain flags as hidden from the executor help text

View File

@ -15,6 +15,7 @@
# Builds the static Go image to execute in a Kubernetes job
FROM golang:1.12
ARG GOARCH=amd64
WORKDIR /go/src/github.com/GoogleContainerTools/kaniko
# Get GCR credential helper
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.5.0/docker-credential-gcr_linux_amd64-1.5.0.tar.gz /usr/local/bin/
@ -28,7 +29,7 @@ ADD https://aadacr.blob.core.windows.net/acr-docker-credential-helper/docker-cre
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-acr-linux-amd64.tar.gz
COPY . .
RUN make
RUN make GOARCH=${GOARCH}
FROM scratch
COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/executor /kaniko/executor

View File

@ -16,6 +16,7 @@
# Stage 0: Build the executor binary and get credential helpers
FROM golang:1.12
ARG GOARCH=amd64
WORKDIR /go/src/github.com/GoogleContainerTools/kaniko
# Get GCR credential helper
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.5.0/docker-credential-gcr_linux_amd64-1.5.0.tar.gz /usr/local/bin/
@ -25,7 +26,7 @@ RUN docker-credential-gcr configure-docker
RUN go get -u github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login
RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64
COPY . .
RUN make && make out/warmer
RUN make GOARCH=${GOARCH} && make out/warmer
# Stage 1: Get the busybox shell
FROM gcr.io/cloud-builders/bazel:latest

View File

@ -15,6 +15,7 @@
# Builds the static Go image to execute in a Kubernetes job
FROM golang:1.12
ARG GOARCH=amd64
WORKDIR /go/src/github.com/GoogleContainerTools/kaniko
# Get GCR credential helper
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.5.0/docker-credential-gcr_linux_amd64-1.5.0.tar.gz /usr/local/bin/
@ -25,7 +26,7 @@ RUN go get -u github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/dock
RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64
COPY . .
RUN make out/warmer
RUN make GOARCH=${GOARCH} out/warmer
FROM scratch
COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/warmer /kaniko/warmer

View File

@ -16,5 +16,4 @@ steps:
"-t", "gcr.io/kaniko-project/warmer:${COMMIT_SHA}", "."]
images: ["gcr.io/kaniko-project/executor:${COMMIT_SHA}",
"gcr.io/kaniko-project/executor:debug-${COMMIT_SHA}",
"gcr.io/kaniko-project/executor:debug",
"gcr.io/kaniko-project/warmer:${COMMIT_SHA}"]

83
go.mod
View File

@ -2,113 +2,70 @@ module github.com/GoogleContainerTools/kaniko
go 1.13
replace (
github.com/containerd/containerd v1.4.0-0.20191014053712-acdcf13d5eaf => github.com/containerd/containerd v0.0.0-20191014053712-acdcf13d5eaf
github.com/docker/docker v1.14.0-0.20190319215453-e7b5f7dbe98c => github.com/docker/docker v0.0.0-20190319215453-e7b5f7dbe98c
github.com/tonistiigi/fsutil v0.0.0-20190819224149-3d2716dd0a4d => github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1
)
require (
cloud.google.com/go v0.25.0
cloud.google.com/go v0.26.0
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
github.com/Azure/azure-sdk-for-go v19.1.0+incompatible // indirect
github.com/Azure/azure-storage-blob-go v0.8.0
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/Azure/go-autorest v10.15.0+incompatible // indirect
github.com/Microsoft/go-winio v0.4.9 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
github.com/aws/aws-sdk-go v1.25.19
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/containerd/containerd v1.1.2 // indirect
github.com/containerd/continuity v0.0.0-20180712174259-0377f7d76720 // indirect
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260 // indirect
github.com/coreos/etcd v3.3.9+incompatible // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/docker/distribution v2.6.0-rc.1.0.20180720172123-0dae0957e5fe+incompatible // indirect
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad // indirect
github.com/docker/docker v1.14.0-0.20190319215453-e7b5f7dbe98c
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 // indirect
github.com/docker/go-units v0.3.3 // indirect
github.com/docker/swarmkit v1.12.1-0.20180726190244-7567d47988d8 // indirect
github.com/emirpasic/gods v1.9.0 // indirect
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/fsnotify/fsnotify v1.4.7 // indirect
github.com/genuinetools/amicontained v0.4.3
github.com/ghodss/yaml v1.0.0 // indirect
github.com/gliderlabs/ssh v0.2.2 // indirect
github.com/gogo/protobuf v1.1.1 // indirect
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
github.com/golang/protobuf v1.1.0 // indirect
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a // indirect
github.com/google/go-cmp v0.2.0
github.com/google/go-containerregistry v0.0.0-20190820205713-31e00cede111
github.com/golang/mock v1.3.1
github.com/google/go-cmp v0.3.0
github.com/google/go-containerregistry v0.0.0-20191218175032-34fb8ff33bed
github.com/google/go-github v17.0.0+incompatible
github.com/google/go-querystring v1.0.0 // indirect
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect
github.com/google/martian v2.1.0+incompatible // indirect
github.com/google/uuid v1.0.0 // indirect
github.com/googleapis/gax-go v2.0.0+incompatible // indirect
github.com/googleapis/gnostic v0.2.0 // indirect
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa // indirect
github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 // indirect
github.com/hashicorp/go-uuid v1.0.1 // indirect
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be // indirect
github.com/karrick/godirwalk v1.7.7
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e // indirect
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect
github.com/mattn/go-shellwords v1.0.3 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/minio/highwayhash v1.0.0
github.com/mitchellh/go-homedir v1.0.0 // indirect
github.com/moby/buildkit v0.0.0-20180731175856-e57eed420c75
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/runc v1.0.0-rc5 // indirect
github.com/moby/buildkit v0.0.0-20191111154543-00bfbab0390c
github.com/opencontainers/runtime-spec v1.0.1 // indirect
github.com/opencontainers/selinux v1.0.0-rc1 // indirect
github.com/opentracing/opentracing-go v1.0.2 // indirect
github.com/otiai10/copy v1.0.2
github.com/pborman/uuid v1.2.0 // indirect
github.com/pelletier/go-buffruneio v0.2.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.8.0
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v0.9.0-pre1.0.20180210140205-a40133b69fbd // indirect
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 // indirect
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 // indirect
github.com/sergi/go-diff v1.0.0 // indirect
github.com/sirupsen/logrus v1.4.2
github.com/spf13/afero v1.2.1
github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.1
github.com/spf13/cobra v0.0.5
github.com/spf13/pflag v1.0.5
github.com/src-d/gcfg v1.3.0 // indirect
github.com/stretchr/testify v1.4.0 // indirect
github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e // indirect
github.com/tonistiigi/fsutil v0.0.0-20180725061210-b19464cd1b6a // indirect
github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1 // indirect
github.com/vbatts/tar-split v0.10.2 // indirect
github.com/xanzy/ssh-agent v0.2.0 // indirect
go.opencensus.io v0.14.0 // indirect
golang.org/x/net v0.0.0-20190311183353-d8887717615a
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
golang.org/x/sync v0.0.0-20190423024810-112230192c58
google.golang.org/api v0.0.0-20180730000901-31ca0e01cd79 // indirect
google.golang.org/appengine v1.1.0 // indirect
google.golang.org/genproto v0.0.0-20180731170733-daca94659cb5 // indirect
google.golang.org/grpc v1.2.1-0.20180320012744-8124abf74e76 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/src-d/go-billy.v4 v4.2.0 // indirect
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 // indirect
gopkg.in/src-d/go-git.v4 v4.6.0
gopkg.in/warnings.v0 v0.1.2 // indirect
gotest.tools v2.2.0+incompatible // indirect
k8s.io/api v0.0.0-20180711052118-183f3326a935 // indirect
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d // indirect
k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137 // indirect
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a // indirect
k8s.io/kubernetes v1.11.1 // indirect
)

351
go.sum
View File

@ -1,62 +1,97 @@
cloud.google.com/go v0.25.0 h1:6vD6xZTc8Jo6To8gHxFDRVsMvWFDgY3rugNszcDalN8=
cloud.google.com/go v0.25.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
github.com/Azure/azure-sdk-for-go v19.1.0+incompatible h1:ysqLW+tqZjJWOTE74heH/pDRbr4vlN3yV+dqQYgpyxw=
github.com/Azure/azure-sdk-for-go v19.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v19.1.1+incompatible h1:0nNLU6QNN8FGd3FCQa2e8LAtB3THCJ24aOZ4KbA4Jtk=
github.com/Azure/azure-sdk-for-go v19.1.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=
github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v10.15.0+incompatible h1:GqDO/9r+7tmkU8HI/DNLVkeucncU8jCul1DLeTaA3GI=
github.com/Azure/go-autorest v10.15.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ=
github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Azure/go-autorest v10.15.5+incompatible h1:vdxx6wM1rVkKt/3niByPVjguoLWkWImOcJNvEykgBzY=
github.com/Azure/go-autorest v10.15.5+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/hcsshim v0.8.5/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/aws/aws-sdk-go v1.15.90/go.mod h1:es1KtYUFs7le0xQ3rOihkuoVD90z7D0fR2Qm4S00/gU=
github.com/aws/aws-sdk-go v1.25.19 h1:sp3xP91qIAVhWufyn9qM6Zhhn6kX06WJQcmhRj7QTXc=
github.com/aws/aws-sdk-go v1.25.19/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/containerd/containerd v1.1.2 h1:gojrlFOL/4A5zP4BPgK3YrCaVIgAIHZfqGco/+mfJOs=
github.com/containerd/containerd v1.1.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20180712174259-0377f7d76720 h1:T5LkgEMACPq7+VPRnkh51WhyV+Q0waOGGtp37Y82org=
github.com/containerd/continuity v0.0.0-20180712174259-0377f7d76720/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260 h1:XGyg7oTtD0DoRFhbpV6x1WfV0flKC4UxXU7ab1zC08U=
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/coreos/etcd v3.3.9+incompatible h1:iKSVPXGNGqroBx4+RmUXv8emeU7y+ucRZSzTYgzLZwM=
github.com/coreos/etcd v3.3.9+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/containerd v0.0.0-20191014053712-acdcf13d5eaf h1:juYsQtzJOxIaX5TW/3IU7qlvYU/nALKULBOEdiydFd0=
github.com/containerd/containerd v0.0.0-20191014053712-acdcf13d5eaf/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.2.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20181001140422-bd77b46c8352/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw=
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c h1:KFbqHhDeaHM7IfFtXHfUHMDaUStpM2YwBR+iJCIOsKk=
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/containerd/go-cni v0.0.0-20190813230227-49fbd9b210f3/go.mod h1:2wlRxCQdiBY+OcjNg5x8kI+5mEL1fGt25L4IzQHYJsM=
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/docker/distribution v2.6.0-rc.1.0.20180720172123-0dae0957e5fe+incompatible h1:8hsJ081BmnqE+fkFjgasY9S8+otMhlWxHcKSYcKzrpk=
github.com/docker/distribution v2.6.0-rc.1.0.20180720172123-0dae0957e5fe+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197 h1:7X3lPJrEEhoUt1UnISqyUB4phKf9aAKVMdFXD63DJO8=
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/cli v0.0.0-20190321234815-f40f9c240ab0/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017 h1:2HQmlpI3yI9deH18Q6xiSOIjXD4sLI55Y/gfpa8/558=
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE=
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v0.0.0-20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v0.0.0-20190319215453-e7b5f7dbe98c h1:1Pev8v0EhB6Fbu9FHCLzZD74gJdJk+QVmlbezI6OToM=
github.com/docker/docker v0.0.0-20190319215453-e7b5f7dbe98c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad h1:VXIse57M5C6ezDuCPyq6QmMvEJ2xclYKZ35SfkXdm3E=
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 h1:yWHOI+vFjEsAakUTSrtqc/SAHrhSkmn48pqjidZX3QA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.3.1/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libnetwork v0.8.0-dev.2.0.20190604151032-3c26b4e7495e/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
github.com/docker/swarmkit v1.12.1-0.20180726190244-7567d47988d8 h1:ASmFyV8Sc6XrH1ng0TBPpYLspA8b7qRT84IbrQY1jSY=
github.com/docker/swarmkit v1.12.1-0.20180726190244-7567d47988d8/go.mod h1:n3Z4lIEl7g261ptkGDBcYi/3qBMDl9csaAhwi2MPejs=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo=
github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
@ -70,74 +105,119 @@ github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a h1:ZJu5NB1Bk5ms4vw0Xu4i+jD32SE9jQXyfnOvwhHqlT0=
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-containerregistry v0.0.0-20190820205713-31e00cede111 h1:5F39eE4QsUnAd6iGzt1/zBs3dhX877U2hJyOJHFmQF0=
github.com/google/go-containerregistry v0.0.0-20190820205713-31e00cede111/go.mod h1:yZAFP63pRshzrEYLXLGPmUt0Ay+2zdjmMN1loCnRLUk=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-containerregistry v0.0.0-20191218175032-34fb8ff33bed h1:0AwV9UBwwKPKrfpTYLOKr8ymevanUxrsSEkAo0uU9aA=
github.com/google/go-containerregistry v0.0.0-20191218175032-34fb8ff33bed/go.mod h1:rodaC7jYStJ2mjR8Y+5a/jCzcRPFRH74KmqSnJC88co=
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.2.2 h1:DcFegQ7+ECdmkJMfVwWlC+89I4esJ7p8nkGt9ainGDk=
github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa h1:0nA8i+6Rwqaq9xlpmVxxTwk6rxiEhX+E6Wh4vPNHiS8=
github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa/go.mod h1:6ij3Z20p+OhOkCSrA0gImAWoHYQRGbnlcuk6XYTiaRw=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 h1:yxxFgVz31vFoKKTtRUNbXLNe4GFnbLKqg+0N7yG42L8=
github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71/go.mod h1:kbfItVoBJwCfKXDXN4YoAXjxcFVZ7MRrJzyTX6H4giE=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c/go.mod h1:fHzc09UnyJyqyW+bFuq864eh+wC7dj65aXmXLRe5to0=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/ishidawataru/sctp v0.0.0-20180213033435-07191f837fed/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8=
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea/go.mod h1:QMdK4dGB3YhEW2BmA1wgGpPYI3HZy/5gD705PXKUVSg=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/karrick/godirwalk v1.7.7 h1:lLkPCA+C0u1pI4fLFseaupvh5/THlPJIqSPmnGGViKs=
github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34=
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8=
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=
@ -146,50 +226,72 @@ github.com/mattn/go-shellwords v1.0.3 h1:K/VxK7SZ+cvuPgFSLKi5QPI9Vr/ipOf4C1gN+nt
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
github.com/minio/highwayhash v1.0.0 h1:iMSDhgUILCr0TNm8LWlSjF8N0ZIj2qbO8WHp6Q/J2BA=
github.com/minio/highwayhash v1.0.0/go.mod h1:xQboMTeM9nY9v/LlAOxFctujiv5+Aq2hR5dxBpaMbdc=
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/moby/buildkit v0.0.0-20180731175856-e57eed420c75 h1:xlqeydzXCHEsRtsDyT0rvOBF7t3iTxNSG8gTQk2rfms=
github.com/moby/buildkit v0.0.0-20180731175856-e57eed420c75/go.mod h1:nnELdKPRkUAQR6pAB3mRU3+IlbqL3SSaAWqQL8k/K+4=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/buildkit v0.0.0-20191111154543-00bfbab0390c h1:QsHXQZ/5EQsj9IIXVVmTWaeEM0VAhZIrEMJ7XKB1Qu0=
github.com/moby/buildkit v0.0.0-20191111154543-00bfbab0390c/go.mod h1:UjmFBX/gZo+/+B1YEiH9Or4s7wBgwvOHhidh3rOtCXw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c h1:Hww8mOyEKTeON4bZn7FrlLismspbPc1teNRUVH7wLQ8=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c h1:eSfnfIuwhxZyULg1NNuZycJcYkjYVGYe7FczwQReM6U=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v1.0.0-rc5 h1:rYjdzMDXVly2Av0RLs3nf/iVkaWh2UrDhuTdTT2KggQ=
github.com/opencontainers/runc v1.0.0-rc5/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc6/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc8.0.20190621203724-f4982d86f7fd h1:w9DJ/JL7fK4VjMoGo4e9gsq2xRhZThNI4PFuAwN8dJ0=
github.com/opencontainers/runc v1.0.0-rc8.0.20190621203724-f4982d86f7fd/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.0.0-20180909173843-eba862dc2470/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.0.0-rc1 h1:Q70KvmpJSrYzryl/d0tC3vWUiTn23cSdStKodlokEPs=
github.com/opencontainers/selinux v1.0.0-rc1/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
github.com/opentracing-contrib/go-stdlib v0.0.0-20171029140428-b1a47cfbdd75/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w=
github.com/opentracing/opentracing-go v0.0.0-20171003133519-1361b9cd60be/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/otiai10/copy v0.0.0-20180813032824-7e9a647135a1 h1:A7kMXwDPBTfIVRv2l6XV3U6Su3SzLUzZjxnDDQVZDIY=
github.com/otiai10/copy v0.0.0-20180813032824-7e9a647135a1/go.mod h1:pXzZSDlN+HPzSdyIBnKNN9ptD9Hx7iZMWIJPTwo4FPE=
github.com/otiai10/copy v1.0.2 h1:DDNipYy6RkIkjMwy+AWzgKiNTyj2RUI9yEMeETEpVyc=
github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95 h1:+OLn68pqasWca0z5ryit9KGfp3sUsW4Lqg32iRMJyzs=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/mint v1.3.0 h1:Ady6MKVezQwHBkGzLFbrsywyp09Ah7rkmfjV3Bcr5uc=
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@ -199,70 +301,156 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 h1:osmNoEW2SCW3L7EX0km2LYM8HKpNWRiouxjE3XHkyGc=
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc=
github.com/sirupsen/logrus v1.0.3/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M=
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e h1:QjF5rxNgRSLHJDwKUvfYP3qOx1vTDzUi/+oSC8FXnCI=
github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tonistiigi/fsutil v0.0.0-20180725061210-b19464cd1b6a h1:qd/ItJwqOuirA+l39OBtUOLokgzKyqKsHMTdzHpoWFk=
github.com/tonistiigi/fsutil v0.0.0-20180725061210-b19464cd1b6a/go.mod h1:eden9dLzAAuNQ0L7whFr6/Mzgz6btsvQpUnxOOI+CCE=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1 h1:WRlNtJ2whFMKo95/e6uaNuAnn5TxLcMzczqMcfbIDxo=
github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1/go.mod h1:hP47OZfgT1aNVDJj28EnEKaKg6mjPEoS5Tb4BsWCTPs=
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v1.2.1/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/vbatts/tar-split v0.10.2 h1:CXd7HEKGkTLjBMinpObcJZU5Hm8EKlor2a1JtX6msXQ=
github.com/vbatts/tar-split v0.10.2/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.14.0 h1:1eTLxqxSIAylcKoxnNkdhvvBNZDA8JwkKNXxgyma0IA=
go.opencensus.io v0.14.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc h1:3ElrZeO6IBP+M8kgu5YFwRo92Gqr+zBg3aooYQ6ziqU=
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564 h1:o6ENHFwwr1TZ9CUPQcfo1HGvLP1OPsPOTB7xCIOPNmU=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262 h1:qsl9y/CJx34tuA7QCPNp86JNJe4spst6Ff8MjvPUdPg=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191205215504-7b8c8591a921/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
google.golang.org/api v0.0.0-20180730000901-31ca0e01cd79 h1:wCy2/9bhO1JeP2zZUALrj7ZdZuZoR4mRV57kTxjqRpo=
google.golang.org/api v0.0.0-20180730000901-31ca0e01cd79/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180731170733-daca94659cb5 h1:2PjFmwzH/sxgW9CRJDlEiwMHO8rOk1eMDzVL14HC1e4=
google.golang.org/genproto v0.0.0-20180731170733-daca94659cb5/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.2.1-0.20180320012744-8124abf74e76 h1:vTe0EiECbrLSj5+6SXMi+eWwYW/bjmd26LE5lv52YVs=
google.golang.org/grpc v1.2.1-0.20180320012744-8124abf74e76/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/src-d/go-billy.v4 v4.2.0 h1:VGbrP1EsYxtvVPEiHui+4//imr4E5MGEFLx66bQtusg=
@ -271,25 +459,40 @@ gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOA
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
gopkg.in/src-d/go-git.v4 v4.6.0 h1:3XrA9Qxiwfj7Iusd7dVYUqxMjJYPsLuBdUeQbwnL/NQ=
gopkg.in/src-d/go-git.v4 v4.6.0/go.mod h1:CzbUWqMn4pvmvndg3gnh5iZFmSsbhyhUWdI0IQ60AQo=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
k8s.io/api v0.0.0-20180711052118-183f3326a935 h1:K2hsi4kfw1BvCEKX5J1A51pQtXjico7lOEmSiIHQ3/E=
k8s.io/api v0.0.0-20180711052118-183f3326a935/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d h1:MZjlsu9igBoVPZkXpIGoxI6EonqNsXXZU7hhvfQLkd4=
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
k8s.io/api v0.0.0-20180904230853-4e7be11eab3f h1:DLRkv8Ps4Sdx8Srj+UtGisj4whV7v/HezlHx6QqiZqE=
k8s.io/api v0.0.0-20180904230853-4e7be11eab3f/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
k8s.io/apimachinery v0.0.0-20180904193909-def12e63c512 h1:/Z1m/6oEN6hE2SzWP4BHW2yATeUrBRr+1GxNf1Ny58Y=
k8s.io/apimachinery v0.0.0-20180904193909-def12e63c512/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137 h1:4DIWGqvAjLME47asVwjb14H+6bDRu+4h43Ssw6tMAHc=
k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
k8s.io/code-generator v0.0.0-20191121015212-c4c8f8345c7e/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kubernetes v1.11.1 h1:wHOPX+teuYaSlUWfL/b24jMH0n7HECbj4Xt8i7kSZIw=
k8s.io/kubernetes v1.11.1/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/kubernetes v1.11.10 h1:wCo67+wmguioiYv0ipIiTaXbVPfFBBjOTgIngeGGG+A=
k8s.io/kubernetes v1.11.10/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

View File

@ -1,4 +1,4 @@
FROM marketplace.gcr.io/google/ubuntu1804@sha256:4649ae6b381090fba6db38137eb05e03f44bf43c40149f734241c9f96aa0e001
FROM ubuntu:18.04
ENV dir /tmp/dir/
ONBUILD RUN echo "onbuild" > /tmp/onbuild
ONBUILD RUN mkdir $dir

View File

@ -1,4 +1,4 @@
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11
# First, try adding some regular files
ADD context/foo foo
ADD context/foo /foodir/

View File

@ -16,7 +16,7 @@
# If the image is built twice, /date should be the same in both images
# if the cache is implemented correctly
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11
RUN date > /date
COPY context/foo /foo
RUN echo hey

View File

@ -16,7 +16,7 @@
# /date should be the same regardless of when this image is built
# if the cache is implemented correctly
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11
WORKDIR /foo
RUN apt-get update && apt-get install -y make
COPY context/bar /context

View File

@ -1,5 +1,4 @@
FROM gcr.io/distroless/base@sha256:628939ac8bf3f49571d05c6c76b8688cb4a851af6c7088e599388259875bde20 AS first
FROM debian:10.2 AS first
CMD ["mycmd"]
FROM first
ENTRYPOINT ["myentrypoint"] # This should clear out CMD in the config metadata

View File

@ -0,0 +1,6 @@
FROM busybox as t
RUN echo "hello" > /tmp/target
RUN ln -s /tmp/target /tmp/link
FROM scratch
COPY --from=t /tmp/link /tmp

View File

@ -0,0 +1,5 @@
FROM alpine
RUN mkdir -p /some/dir/ && echo 'first' > /some/dir/first.txt
RUN rm /some/dir/first.txt

View File

@ -1,4 +1,4 @@
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11
ENV hey hey
ENV PATH /usr/local
ENV testmultipleeq="this=is a=test string=with a=lot of=equals"

View File

@ -1,4 +1,4 @@
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11
EXPOSE 80
EXPOSE 81/udp
ENV protocol tcp

View File

@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11

View File

@ -0,0 +1,13 @@
FROM alpine as base_stage
RUN echo base_stage
FROM base_stage as BUG_stage
RUN echo BUG_stage
FROM BUG_stage as final_stage
RUN echo final_stage

View File

@ -1,4 +1,4 @@
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11
LABEL foo=bar
LABEL "baz"="bat"
ENV label1 "mylabel"

View File

@ -1,15 +1,16 @@
ARG REGISTRY=gcr.io
ARG REPO=google-appengine
ARG REGISTRY=docker.io
ARG IMAGE=debian
ARG TAG=9.11
ARG WORD=hello
ARG W0RD2=hey
FROM ${REGISTRY}/${REPO}/debian9 as stage1
FROM ${REGISTRY}/${IMAGE}:${TAG} as stage1
# Should evaluate WORD and create /tmp/hello
ARG WORD
RUN touch /${WORD}
FROM ${REGISTRY}/${REPO}/debian9
FROM ${REGISTRY}/${IMAGE}:${TAG}
COPY --from=stage1 /hello /tmp

View File

@ -1,4 +1,4 @@
FROM gcr.io/distroless/base@sha256:628939ac8bf3f49571d05c6c76b8688cb4a851af6c7088e599388259875bde20
FROM debian:10.2
CMD ["command", "one"]
CMD ["command", "two"]
CMD echo "hello"

View File

@ -1,4 +1,4 @@
FROM gcr.io/google-appengine/debian9@sha256:f0159d14385afcb58a9b2fa8955c0cb64bd3abc365e8589f8c2dd38150fbfdbe as base
FROM debian:9.11 as base
COPY . .
FROM scratch as second
@ -20,4 +20,4 @@ FROM fedora@sha256:c4cc32b09c6ae3f1353e7e33a8dda93dc41676b923d6d89afa996b421cc5a
FROM fourth
ARG file
COPY --from=second /foo ${file}
COPY --from=gcr.io/google-appengine/debian9@sha256:00109fa40230a081f5ecffe0e814725042ff62a03e2d1eae0563f1f82eaeae9b /etc/os-release /new
COPY --from=debian:9.11 /etc/os-release /new

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11
RUN echo "hey" > /etc/foo
RUN echo "baz" > /etc/baz
RUN cp /etc/baz /etc/bar

View File

@ -15,6 +15,6 @@
# Test to make sure the executor builds an image correctly
# when no files are changed
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11
RUN echo "hey"
MAINTAINER kaniko

View File

@ -12,6 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11
USER testuser:testgroup

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11
RUN useradd testuser
RUN groupadd testgroup
USER testuser:testgroup

View File

@ -1,4 +1,4 @@
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11
RUN mkdir /foo
RUN echo "hello" > /foo/hey
VOLUME /foo/bar /tmp /qux/quux

View File

@ -1,4 +1,4 @@
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
FROM debian:9.11
VOLUME /foo1
RUN echo "hello" > /foo1/hello
WORKDIR /foo1/bar

View File

@ -1,4 +1,4 @@
FROM gcr.io/google-appengine/debian9@sha256:6b3aa04751aa2ac3b0c7be4ee71148b66d693ad212ce6d3244bd2a2a147f314a
FROM debian:9.11
COPY context/foo foo
WORKDIR /test
# Test that this will be appended on to the previous command, to create /test/workdir

View File

@ -220,8 +220,8 @@ func (d *DockerFileBuilder) BuildImage(config *gcpConfig, dockerfilesPath, docke
additionalFlags = append(buildArgs, additionalKanikoFlagsMap[dockerfile]...)
kanikoImage := GetKanikoImage(imageRepo, dockerfile)
fmt.Printf("Going to build image with kaniko: %s, flags: %s \n", kanikoImage, additionalFlags)
dockerRunFlags := []string{
"run", "-e", benchmarkEnv,
dockerRunFlags := []string{"run", "--net=host",
"-e", benchmarkEnv,
"-v", cwd + ":/workspace",
"-v", benchmarkDir + ":/kaniko/benchmarks",
}
@ -251,7 +251,7 @@ func populateVolumeCache() error {
_, ex, _, _ := runtime.Caller(0)
cwd := filepath.Dir(ex)
warmerCmd := exec.Command("docker",
append([]string{"run",
append([]string{"run", "--net=host",
"-d",
"-v", os.Getenv("HOME") + "/.config/gcloud:/root/.config/gcloud",
"-v", cwd + ":/workspace",
@ -284,7 +284,7 @@ func (d *DockerFileBuilder) buildCachedImages(config *gcpConfig, cacheRepo, dock
}
kanikoImage := GetVersionedKanikoImage(imageRepo, dockerfile, version)
dockerRunFlags := []string{"run",
dockerRunFlags := []string{"run", "--net=host",
"-v", cwd + ":/workspace",
"-e", benchmarkEnv}
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, serviceAccount)
@ -315,7 +315,7 @@ func (d *DockerFileBuilder) buildRelativePathsImage(imageRepo, dockerfile, servi
buildContextPath := "./relative-subdirectory"
kanikoImage := GetKanikoImage(imageRepo, dockerfile)
dockerRunFlags := []string{"run", "-v", cwd + ":/workspace"}
dockerRunFlags := []string{"run", "--net=host", "-v", cwd + ":/workspace"}
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, serviceAccount)
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
"-f", dockerfile,

View File

@ -89,26 +89,28 @@ func TestMain(m *testing.M) {
}
config = initGCPConfig()
contextFile, err := CreateIntegrationTarball()
if err != nil {
fmt.Println("Failed to create tarball of integration files for build context", err)
os.Exit(1)
}
if config.uploadToGCS {
contextFile, err := CreateIntegrationTarball()
if err != nil {
fmt.Println("Failed to create tarball of integration files for build context", err)
os.Exit(1)
}
fileInBucket, err := UploadFileToBucket(config.gcsBucket, contextFile, contextFile)
if err != nil {
fmt.Println("Failed to upload build context", err)
os.Exit(1)
}
fileInBucket, err := UploadFileToBucket(config.gcsBucket, contextFile, contextFile)
if err != nil {
fmt.Println("Failed to upload build context", err)
os.Exit(1)
}
err = os.Remove(contextFile)
if err != nil {
fmt.Printf("Failed to remove tarball at %s: %s\n", contextFile, err)
os.Exit(1)
}
err = os.Remove(contextFile)
if err != nil {
fmt.Printf("Failed to remove tarball at %s: %s\n", contextFile, err)
os.Exit(1)
}
RunOnInterrupt(func() { DeleteFromBucket(fileInBucket) })
defer DeleteFromBucket(fileInBucket)
RunOnInterrupt(func() { DeleteFromBucket(fileInBucket) })
defer DeleteFromBucket(fileInBucket)
}
setupCommands := []struct {
name string
@ -198,53 +200,26 @@ func TestRun(t *testing.T) {
}
}
func TestGitBuildcontext(t *testing.T) {
repo := "github.com/GoogleContainerTools/kaniko"
dockerfile := dockerfileTestRun
// Build with docker
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_test_git")
dockerCmd := exec.Command("docker",
append([]string{"build",
"-t", dockerImage,
"-f", dockerfile,
repo})...)
out, err := RunCommandWithoutTest(dockerCmd)
if err != nil {
t.Errorf("Failed to build image %s with docker command \"%s\": %s %s", dockerImage, dockerCmd.Args, err, string(out))
func getGitRepo() string {
var branch, repoSlug string
if _, ok := os.LookupEnv("TRAVIS"); ok {
if os.Getenv("TRAVIS_PULL_REQUEST") != "false" {
branch = os.Getenv("TRAVIS_PULL_REQUEST_BRANCH")
repoSlug = os.Getenv("TRAVIS_PULL_REQUEST_SLUG")
log.Printf("Travis CI Pull request source repo: %s branch: %s\n", repoSlug, branch)
} else {
branch = os.Getenv("TRAVIS_BRANCH")
repoSlug = os.Getenv("TRAVIS_REPO_SLUG")
log.Printf("Travis CI pepo: %s branch: %s\n", repoSlug, branch)
}
return "github.com/" + repoSlug + "#refs/heads/" + branch
}
// Build with kaniko
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_git")
dockerRunFlags := []string{"run"}
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
"-f", dockerfile,
"-d", kanikoImage,
"-c", fmt.Sprintf("git://%s", repo))
kanikoCmd := exec.Command("docker", dockerRunFlags...)
out, err = RunCommandWithoutTest(kanikoCmd)
if err != nil {
t.Errorf("Failed to build image %s with kaniko command \"%s\": %v %s", dockerImage, kanikoCmd.Args, err, string(out))
}
// container-diff
daemonDockerImage := daemonPrefix + dockerImage
containerdiffCmd := exec.Command("container-diff", "diff", "--no-cache",
daemonDockerImage, kanikoImage,
"-q", "--type=file", "--type=metadata", "--json")
diff := RunCommand(containerdiffCmd, t)
t.Logf("diff = %s", string(diff))
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
return "github.com/GoogleContainerTools/kaniko"
}
func TestGitBuildContextWithBranch(t *testing.T) {
repo := "github.com/GoogleContainerTools/kaniko#refs/tags/v0.10.0"
dockerfile := dockerfileTestRun
func TestGitBuildcontext(t *testing.T) {
repo := getGitRepo()
dockerfile := "integration/dockerfiles/Dockerfile_test_run_2"
// Build with docker
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_test_git")
@ -260,7 +235,7 @@ func TestGitBuildContextWithBranch(t *testing.T) {
// Build with kaniko
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_git")
dockerRunFlags := []string{"run"}
dockerRunFlags := []string{"run", "--net=host"}
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
"-f", dockerfile,
@ -614,6 +589,7 @@ type gcpConfig struct {
hardlinkBaseImage string
serviceAccount string
dockerMajorVersion int
uploadToGCS bool
}
type imageDetails struct {
@ -631,6 +607,7 @@ func initGCPConfig() *gcpConfig {
flag.StringVar(&c.gcsBucket, "bucket", "gs://kaniko-test-bucket", "The gcs bucket argument to uploaded the tar-ed contents of the `integration` dir to.")
flag.StringVar(&c.imageRepo, "repo", "gcr.io/kaniko-test", "The (docker) image repo to build and push images to during the test. `gcloud` must be authenticated with this repo or serviceAccount must be set.")
flag.StringVar(&c.serviceAccount, "serviceAccount", "", "The path to the service account push images to GCR and upload/download files to GCS.")
flag.BoolVar(&c.uploadToGCS, "uploadToGCS", true, "Upload the tar-ed contents of `integration` dir to GCS bucket. Default is true. Set this to false to prevent uploading.")
flag.Parse()
if len(c.serviceAccount) > 0 {

View File

@ -0,0 +1,43 @@
#!/bin/bash
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is needed due to the following bug:
# https://github.com/GoogleContainerTools/kaniko/issues/966
if [ "$#" -ne 1 ]; then
echo "Please specify path to dockerfiles as first argument."
echo "Usage: `basename $0` integration/dockerfiles"
exit 2
fi
dir_with_docker_files=$1
for dockerfile in $dir_with_docker_files/*; do
cat $dockerfile | grep '^FROM' | grep "gcr" | while read -r line; do
gcr_repo=$(echo "$line" | awk '{ print $2 }')
local_repo=$(echo "$gcr_repo" | sed -e "s/^.*gcr.io\(\/.*\)$/localhost:5000\1/")
remove_digest=$(echo "$local_repo" | cut -f1 -d"@")
echo "Running docker pull $gcr_repo"
docker pull "$gcr_repo"
echo "Running docker tag $gcr_repo $remove_digest"
docker tag "$gcr_repo" "$remove_digest"
echo "Running docker push $remove_digest"
docker push "$remove_digest"
echo "Updating dockerfile $dockerfile to use local repo $local_repo"
sed -i -e "s/^\(FROM \).*gcr.io\(.*\)$/\1localhost:5000\2/" $dockerfile
done
done

View File

@ -20,13 +20,13 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/pkg/util"
v1 "github.com/google/go-containerregistry/pkg/v1"
@ -59,11 +59,15 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu
if err != nil {
return err
}
if fi.IsDir() && !strings.HasSuffix(fullPath, string(os.PathSeparator)) {
fullPath += "/"
}
cwd := config.WorkingDir
if cwd == "" {
cwd = constants.RootDir
}
destPath, err := util.DestinationFilepath(src, dest, cwd)
destPath, err := util.DestinationFilepath(fullPath, dest, cwd)
if err != nil {
return err
}
@ -76,16 +80,12 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu
}
if fi.IsDir() {
if !filepath.IsAbs(dest) {
// we need to add '/' to the end to indicate the destination is a directory
dest = filepath.Join(cwd, dest) + "/"
}
copiedFiles, err := util.CopyDir(fullPath, dest, c.buildcontext)
copiedFiles, err := util.CopyDir(fullPath, destPath, c.buildcontext)
if err != nil {
return err
}
c.snapshotFiles = append(c.snapshotFiles, copiedFiles...)
} else if fi.Mode()&os.ModeSymlink != 0 {
} else if util.IsSymlink(fi) {
// If file is a symlink, we want to create the same relative symlink
exclude, err := util.CopySymlink(fullPath, destPath, c.buildcontext)
if err != nil {
@ -167,9 +167,15 @@ func (cr *CachingCopyCommand) ExecuteCommand(config *v1.Config, buildArgs *docke
if cr.img == nil {
return errors.New(fmt.Sprintf("cached command image is nil %v", cr.String()))
}
cr.extractedFiles, err = util.GetFSFromImage(RootDir, cr.img, cr.extractFn)
logrus.Infof("extractedFiles: %s", cr.extractedFiles)
layers, err := cr.img.Layers()
if err != nil {
return errors.Wrapf(err, "retrieve image layers")
}
cr.extractedFiles, err = util.GetFSFromLayers(RootDir, layers, util.ExtractFunc(cr.extractFn), util.IncludeWhiteout())
logrus.Debugf("extractedFiles: %s", cr.extractedFiles)
if err != nil {
return errors.Wrap(err, "extracting fs from image")
}
@ -182,7 +188,10 @@ func (cr *CachingCopyCommand) FilesUsedFromContext(config *v1.Config, buildArgs
}
func (cr *CachingCopyCommand) FilesToSnapshot() []string {
return cr.extractedFiles
f := cr.extractedFiles
logrus.Debugf("files extracted by caching copy command %s", f)
return f
}
func (cr *CachingCopyCommand) String() string {
@ -197,21 +206,42 @@ func (cr *CachingCopyCommand) From() string {
}
func resolveIfSymlink(destPath string) (string, error) {
baseDir := filepath.Dir(destPath)
if info, err := os.Lstat(baseDir); err == nil {
switch mode := info.Mode(); {
case mode&os.ModeSymlink != 0:
linkPath, err := os.Readlink(baseDir)
if err != nil {
return "", errors.Wrap(err, "error reading symlink")
}
absLinkPath := filepath.Join(filepath.Dir(baseDir), linkPath)
newPath := filepath.Join(absLinkPath, filepath.Base(destPath))
logrus.Tracef("Updating destination path from %v to %v due to symlink", destPath, newPath)
return newPath, nil
}
if !filepath.IsAbs(destPath) {
return "", errors.New("dest path must be abs")
}
return destPath, nil
var nonexistentPaths []string
newPath := destPath
for newPath != "/" {
_, err := os.Lstat(newPath)
if err != nil {
if os.IsNotExist(err) {
dir, file := filepath.Split(newPath)
newPath = filepath.Clean(dir)
nonexistentPaths = append(nonexistentPaths, file)
continue
} else {
return "", errors.Wrap(err, "failed to lstat")
}
}
newPath, err = filepath.EvalSymlinks(newPath)
if err != nil {
return "", errors.Wrap(err, "failed to eval symlinks")
}
break
}
for i := len(nonexistentPaths) - 1; i >= 0; i-- {
newPath = filepath.Join(newPath, nonexistentPaths[i])
}
if destPath != newPath {
logrus.Tracef("Updating destination path from %v to %v due to symlink", destPath, newPath)
}
return filepath.Clean(newPath), nil
}
func copyCmdFilesUsedFromContext(

20
pkg/commands/copy_test.go Normal file → Executable file
View File

@ -194,14 +194,28 @@ func Test_resolveIfSymlink(t *testing.T) {
if err != nil {
t.Error(err)
}
cases := []testCase{{destPath: thepath, expectedPath: thepath, err: nil}}
cases := []testCase{
{destPath: thepath, expectedPath: thepath, err: nil},
{destPath: "/", expectedPath: "/", err: nil},
}
baseDir = tmpDir
symLink := filepath.Join(baseDir, "symlink")
if err := os.Symlink(filepath.Base(thepath), symLink); err != nil {
t.Error(err)
}
cases = append(cases, testCase{filepath.Join(symLink, "foo.txt"), filepath.Join(thepath, "foo.txt"), nil})
cases = append(cases,
testCase{filepath.Join(symLink, "foo.txt"), filepath.Join(thepath, "foo.txt"), nil},
testCase{filepath.Join(symLink, "inner", "foo.txt"), filepath.Join(thepath, "inner", "foo.txt"), nil},
)
absSymlink := filepath.Join(tmpDir, "abs-symlink")
if err := os.Symlink(thepath, absSymlink); err != nil {
t.Error(err)
}
cases = append(cases,
testCase{filepath.Join(absSymlink, "foo.txt"), filepath.Join(thepath, "foo.txt"), nil},
testCase{filepath.Join(absSymlink, "inner", "foo.txt"), filepath.Join(thepath, "inner", "foo.txt"), nil},
)
for i, c := range cases {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {

View File

@ -68,13 +68,12 @@ func (r *RunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
cmd.Env = addDefaultHOME(config.User, replacementEnvs)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
var userStr string
// If specified, run the command as a specific user
if config.User != "" {
userAndGroup := strings.Split(config.User, ":")
userStr := userAndGroup[0]
userStr = userAndGroup[0]
var groupStr string
if len(userAndGroup) > 1 {
groupStr = userAndGroup[1]
@ -101,6 +100,7 @@ func (r *RunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui
}
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uid, Gid: gid}
}
cmd.Env = addDefaultHOME(userStr, replacementEnvs)
if err := cmd.Start(); err != nil {
return errors.Wrap(err, "starting command")
@ -197,7 +197,18 @@ func (cr *CachingRunCommand) ExecuteCommand(config *v1.Config, buildArgs *docker
if cr.img == nil {
return errors.New(fmt.Sprintf("command image is nil %v", cr.String()))
}
cr.extractedFiles, err = util.GetFSFromImage(constants.RootDir, cr.img, cr.extractFn)
layers, err := cr.img.Layers()
if err != nil {
return errors.Wrap(err, "retrieving image layers")
}
cr.extractedFiles, err = util.GetFSFromLayers(
constants.RootDir,
layers,
util.ExtractFunc(cr.extractFn),
util.IncludeWhiteout(),
)
if err != nil {
return errors.Wrap(err, "extracting fs from image")
}
@ -206,7 +217,10 @@ func (cr *CachingRunCommand) ExecuteCommand(config *v1.Config, buildArgs *docker
}
func (cr *CachingRunCommand) FilesToSnapshot() []string {
return cr.extractedFiles
f := cr.extractedFiles
logrus.Debugf("files extracted from caching run command %s", f)
return f
}
func (cr *CachingRunCommand) String() string {

View File

@ -40,7 +40,7 @@ func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu
if err != nil {
return err
}
var groupStr string
groupStr := userStr
if len(userAndGroup) > 1 {
groupStr, err = util.ResolveEnvironmentReplacement(userAndGroup[1], replacementEnvs, false)
if err != nil {
@ -48,9 +48,8 @@ func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu
}
}
if groupStr != "" {
userStr = userStr + ":" + groupStr
}
userStr = userStr + ":" + groupStr
config.User = userStr
return nil
}

View File

@ -28,22 +28,23 @@ import (
var userTests = []struct {
user string
expectedUID string
expectedGID string
}{
{
user: "root",
expectedUID: "root",
expectedUID: "root:root",
},
{
user: "root-add",
expectedUID: "root-add",
expectedUID: "root-add:root-add",
},
{
user: "0",
expectedUID: "0",
expectedUID: "0:0",
},
{
user: "fakeUser",
expectedUID: "fakeUser",
expectedUID: "fakeUser:fakeUser",
},
{
user: "root:root",
@ -56,6 +57,7 @@ var userTests = []struct {
{
user: "root:0",
expectedUID: "root:0",
expectedGID: "f0",
},
{
user: "0:0",
@ -63,11 +65,15 @@ var userTests = []struct {
},
{
user: "$envuser",
expectedUID: "root",
expectedUID: "root:root",
},
{
user: "root:$envgroup",
expectedUID: "root:root",
expectedUID: "root:grp",
},
{
user: "some:grp",
expectedUID: "some:grp",
},
}
@ -76,7 +82,7 @@ func TestUpdateUser(t *testing.T) {
cfg := &v1.Config{
Env: []string{
"envuser=root",
"envgroup=root",
"envgroup=grp",
},
}
cmd := UserCommand{

View File

@ -39,8 +39,13 @@ type KanikoOptions struct {
DigestFile string
ImageNameDigestFile string
OCILayoutPath string
RegistryMirror string
InsecureRegistries multiArg
SkipTLSVerifyRegistries multiArg
Destinations multiArg
BuildArgs multiArg
InsecureRegistries multiArg
SkipTLSVerifyRegistries multiArg
Insecure bool
SkipTLSVerify bool
InsecurePull bool
@ -50,9 +55,7 @@ type KanikoOptions struct {
NoPush bool
Cache bool
Cleanup bool
InsecureRegistries multiArg
SkipTLSVerifyRegistries multiArg
RegistryMirror string
WhitelistVarRun bool
}
// WarmerOptions are options that are set by command line arguments to the cache warmer.

View File

@ -90,14 +90,17 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) {
// baseImageIndex returns the index of the stage the current stage is built off
// returns -1 if the current stage isn't built off a previous stage
func baseImageIndex(currentStage int, stages []instructions.Stage) int {
currentStageBaseName := strings.ToLower(stages[currentStage].BaseName)
for i, stage := range stages {
if i > currentStage {
break
}
if stage.Name == stages[currentStage].BaseName {
if stage.Name == currentStageBaseName {
return i
}
}
return -1
}
@ -245,15 +248,19 @@ func ParseCommands(cmdArray []string) ([]instructions.Command, error) {
// SaveStage returns true if the current stage will be needed later in the Dockerfile
func saveStage(index int, stages []instructions.Stage) bool {
currentStageName := stages[index].Name
for stageIndex, stage := range stages {
if stageIndex <= index {
continue
}
if stage.BaseName == stages[index].Name {
if strings.ToLower(stage.BaseName) == currentStageName {
if stage.BaseName != "" {
return true
}
}
}
return false
}

View File

@ -23,8 +23,6 @@ import (
"strconv"
"time"
otiai10Cpy "github.com/otiai10/copy"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
@ -158,8 +156,10 @@ func (s *stageBuilder) populateCompositeKey(command fmt.Stringer, files []string
compositeKey = s.populateCopyCmdCompositeKey(command, v.From(), compositeKey)
}
srcCtx := s.opts.SrcContext
for _, f := range files {
if err := compositeKey.AddPath(f); err != nil {
if err := compositeKey.AddPath(f, srcCtx); err != nil {
return compositeKey, err
}
}
@ -574,8 +574,8 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
return nil, err
}
for _, p := range filesToSave {
logrus.Infof("Saving file %s for later use.", p)
otiai10Cpy.Copy(p, filepath.Join(dstDir, p))
logrus.Infof("Saving file %s for later use", p)
util.CopyFileOrSymlink(p, dstDir)
}
// Delete the filesystem
@ -587,16 +587,23 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
return nil, err
}
// fileToSave returns all the files matching the given pattern in deps.
// If a file is a symlink, it also returns the target file.
func filesToSave(deps []string) ([]string, error) {
allFiles := []string{}
srcFiles := []string{}
for _, src := range deps {
srcs, err := filepath.Glob(src)
if err != nil {
return nil, err
}
allFiles = append(allFiles, srcs...)
for _, f := range srcs {
if link, err := util.EvalSymLink(f); err == nil {
srcFiles = append(srcFiles, link)
}
srcFiles = append(srcFiles, f)
}
}
return allFiles, nil
return srcFiles, nil
}
func fetchExtraStages(stages []config.KanikoStage, opts *config.KanikoOptions) error {
@ -684,7 +691,7 @@ func getHasher(snapshotMode string) (func(string) (string, error), error) {
}
func resolveOnBuild(stage *config.KanikoStage, config *v1.Config) error {
if config.OnBuild == nil {
if config.OnBuild == nil || len(config.OnBuild) == 0 {
return nil
}
// Otherwise, parse into commands

View File

@ -538,7 +538,7 @@ func Test_stageBuilder_build(t *testing.T) {
filePath := filepath.Join(dir, file)
ch := NewCompositeCache("", "meow")
ch.AddPath(filePath)
ch.AddPath(filePath, "")
hash, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
@ -570,7 +570,7 @@ func Test_stageBuilder_build(t *testing.T) {
filePath := filepath.Join(dir, file)
ch := NewCompositeCache("", "meow")
ch.AddPath(filePath)
ch.AddPath(filePath, "")
hash, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
@ -618,7 +618,7 @@ func Test_stageBuilder_build(t *testing.T) {
tarContent := generateTar(t, dir, filename)
ch := NewCompositeCache("", "")
ch.AddPath(filepath)
ch.AddPath(filepath, "")
hash, err := ch.Hash()
if err != nil {
@ -662,7 +662,7 @@ func Test_stageBuilder_build(t *testing.T) {
}
filePath := filepath.Join(dir, filename)
ch := NewCompositeCache("", "")
ch.AddPath(filePath)
ch.AddPath(filePath, "")
hash, err := ch.Hash()
if err != nil {
@ -713,7 +713,7 @@ func Test_stageBuilder_build(t *testing.T) {
}
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
ch.AddPath(filePath)
ch.AddPath(filePath, "")
hash2, err := ch.Hash()
if err != nil {
@ -721,7 +721,7 @@ func Test_stageBuilder_build(t *testing.T) {
}
ch = NewCompositeCache("", fmt.Sprintf("COPY %s foo.txt", filename))
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
ch.AddPath(filePath)
ch.AddPath(filePath, "")
image := fakeImage{
ImageLayers: []v1.Layer{
@ -777,14 +777,14 @@ COPY %s bar.txt
}
filePath := filepath.Join(dir, filename)
ch := NewCompositeCache("", fmt.Sprintf("COPY %s foo.txt", filename))
ch.AddPath(filePath)
ch.AddPath(filePath, "")
hash1, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
}
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
ch.AddPath(filePath)
ch.AddPath(filePath, "")
hash2, err := ch.Hash()
if err != nil {
@ -792,7 +792,7 @@ COPY %s bar.txt
}
ch = NewCompositeCache("", fmt.Sprintf("COPY %s foo.txt", filename))
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
ch.AddPath(filePath)
ch.AddPath(filePath, "")
image := fakeImage{
ImageLayers: []v1.Layer{

View File

@ -54,18 +54,28 @@ func (s *CompositeCache) Hash() (string, error) {
return util.SHA256(strings.NewReader(s.Key()))
}
func (s *CompositeCache) AddPath(p string) error {
func (s *CompositeCache) AddPath(p, context string) error {
sha := sha256.New()
fi, err := os.Lstat(p)
if err != nil {
return err
}
if fi.Mode().IsDir() {
k, err := HashDir(p)
empty, k, err := hashDir(p, context)
if err != nil {
return err
}
s.keys = append(s.keys, k)
// Only add the hash of this directory to the key
// if there is any whitelisted content.
if !empty || !util.ExcludeFile(p, context) {
s.keys = append(s.keys, k)
}
return nil
}
if util.ExcludeFile(p, context) {
return nil
}
fh, err := util.CacheHasher()(p)
@ -81,12 +91,18 @@ func (s *CompositeCache) AddPath(p string) error {
}
// HashDir returns a hash of the directory.
func HashDir(p string) (string, error) {
func hashDir(p, context string) (bool, string, error) {
sha := sha256.New()
empty := true
if err := filepath.Walk(p, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
exclude := util.ExcludeFile(path, context)
if exclude {
return nil
}
fileHash, err := util.CacheHasher()(path)
if err != nil {
return err
@ -94,10 +110,11 @@ func HashDir(p string) (string, error) {
if _, err := sha.Write([]byte(fileHash)); err != nil {
return err
}
empty = false
return nil
}); err != nil {
return "", err
return false, "", err
}
return fmt.Sprintf("%x", sha.Sum(nil)), nil
return empty, fmt.Sprintf("%x", sha.Sum(nil)), nil
}

View File

@ -19,9 +19,12 @@ package executor
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"reflect"
"testing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
)
func Test_NewCompositeCache(t *testing.T) {
@ -77,7 +80,7 @@ func Test_CompositeCache_AddPath_dir(t *testing.T) {
fn := func() string {
r := NewCompositeCache()
if err := r.AddPath(tmpDir); err != nil {
if err := r.AddPath(tmpDir, ""); err != nil {
t.Errorf("expected error to be nil but was %v", err)
}
@ -115,7 +118,7 @@ func Test_CompositeCache_AddPath_file(t *testing.T) {
p := tmpfile.Name()
fn := func() string {
r := NewCompositeCache()
if err := r.AddPath(p); err != nil {
if err := r.AddPath(p, ""); err != nil {
t.Errorf("expected error to be nil but was %v", err)
}
@ -135,3 +138,433 @@ func Test_CompositeCache_AddPath_file(t *testing.T) {
t.Errorf("expected hash %v to equal hash %v", hash1, hash2)
}
}
func createFilesystemStructure(root string, directories, files []string) error {
for _, d := range directories {
dirPath := path.Join(root, d)
if err := os.MkdirAll(dirPath, 0755); err != nil {
return err
}
}
for _, fileName := range files {
filePath := path.Join(root, fileName)
err := ioutil.WriteFile(filePath, []byte(fileName), 0644)
if err != nil {
return err
}
}
return nil
}
func setIgnoreContext(content string) error {
dockerIgnoreDir, err := ioutil.TempDir("", "")
if err != nil {
return err
}
defer os.RemoveAll(dockerIgnoreDir)
err = ioutil.WriteFile(dockerIgnoreDir+".dockerignore", []byte(content), 0644)
if err != nil {
return err
}
err = util.GetExcludedFiles(dockerIgnoreDir, "")
if err != nil {
return err
}
return nil
}
func hashDirectory(dirpath string) (string, error) {
cache1 := NewCompositeCache()
err := cache1.AddPath(dirpath, dirpath)
if err != nil {
return "", err
}
hash, err := cache1.Hash()
if err != nil {
return "", err
}
return hash, nil
}
func Test_CompositeKey_AddPath_Works(t *testing.T) {
tests := []struct {
name string
directories []string
files []string
}{
{
name: "empty",
directories: []string{},
files: []string{},
},
{
name: "dirs",
directories: []string{"foo", "bar", "foobar", "f/o/o"},
files: []string{},
},
{
name: "files",
directories: []string{},
files: []string{"foo", "bar", "foobar"},
},
{
name: "all",
directories: []string{"foo", "bar"},
files: []string{"foo/bar", "bar/baz", "foobar"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testDir1, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Error creating tempdir: %s", err)
}
defer os.RemoveAll(testDir1)
err = createFilesystemStructure(testDir1, test.directories, test.files)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
testDir2, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Error creating tempdir: %s", err)
}
defer os.RemoveAll(testDir2)
err = createFilesystemStructure(testDir2, test.directories, test.files)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
hash1, err := hashDirectory(testDir1)
if err != nil {
t.Fatalf("Failed to calculate hash: %s", err)
}
hash2, err := hashDirectory(testDir2)
if err != nil {
t.Fatalf("Failed to calculate hash: %s", err)
}
if hash1 != hash2 {
t.Errorf("Expected equal hashes, got: %s and %s", hash1, hash2)
}
})
}
}
func Test_CompositeKey_AddPath_WithExtraFile_Works(t *testing.T) {
tests := []struct {
name string
directories []string
files []string
extraFile string
}{
{
name: "empty",
directories: []string{},
files: []string{},
extraFile: "file",
},
{
name: "dirs",
directories: []string{"foo", "bar", "foobar", "f/o/o"},
files: []string{},
extraFile: "f/o/o/extra",
},
{
name: "files",
directories: []string{},
files: []string{"foo", "bar", "foobar"},
extraFile: "foo.extra",
},
{
name: "all",
directories: []string{"foo", "bar"},
files: []string{"foo/bar", "bar/baz", "foobar"},
extraFile: "bar/extra",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testDir1, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Error creating tempdir: %s", err)
}
defer os.RemoveAll(testDir1)
err = createFilesystemStructure(testDir1, test.directories, test.files)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
testDir2, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Error creating tempdir: %s", err)
}
defer os.RemoveAll(testDir2)
err = createFilesystemStructure(testDir2, test.directories, test.files)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
extraPath := path.Join(testDir2, test.extraFile)
err = ioutil.WriteFile(extraPath, []byte(test.extraFile), 0644)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
hash1, err := hashDirectory(testDir1)
if err != nil {
t.Fatalf("Failed to calculate hash: %s", err)
}
hash2, err := hashDirectory(testDir2)
if err != nil {
t.Fatalf("Failed to calculate hash: %s", err)
}
if hash1 == hash2 {
t.Errorf("Expected different hashes, got: %s and %s", hash1, hash2)
}
})
}
}
func Test_CompositeKey_AddPath_WithExtraDir_Works(t *testing.T) {
tests := []struct {
name string
directories []string
files []string
extraDir string
}{
{
name: "empty",
directories: []string{},
files: []string{},
extraDir: "extra",
},
{
name: "dirs",
directories: []string{"foo", "bar", "foobar", "f/o/o"},
files: []string{},
extraDir: "f/o/o/extra",
},
{
name: "files",
directories: []string{},
files: []string{"foo", "bar", "foobar"},
extraDir: "foo.extra",
},
{
name: "all",
directories: []string{"foo", "bar"},
files: []string{"foo/bar", "bar/baz", "foobar"},
extraDir: "bar/extra",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testDir1, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Error creating tempdir: %s", err)
}
defer os.RemoveAll(testDir1)
err = createFilesystemStructure(testDir1, test.directories, test.files)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
testDir2, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Error creating tempdir: %s", err)
}
defer os.RemoveAll(testDir2)
err = createFilesystemStructure(testDir2, test.directories, test.files)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
extraPath := path.Join(testDir2, test.extraDir)
err = os.MkdirAll(extraPath, 0644)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
hash1, err := hashDirectory(testDir1)
if err != nil {
t.Fatalf("Failed to calculate hash: %s", err)
}
hash2, err := hashDirectory(testDir2)
if err != nil {
t.Fatalf("Failed to calculate hash: %s", err)
}
if hash1 == hash2 {
t.Errorf("Expected different hashes, got: %s and %s", hash1, hash2)
}
})
}
}
func Test_CompositeKey_AddPath_WithExtraFilIgnored_Works(t *testing.T) {
tests := []struct {
name string
directories []string
files []string
extraFile string
}{
{
name: "empty",
directories: []string{},
files: []string{},
extraFile: "extra",
},
{
name: "dirs",
directories: []string{"foo", "bar", "foobar", "f/o/o"},
files: []string{},
extraFile: "f/o/o/extra",
},
{
name: "files",
directories: []string{},
files: []string{"foo", "bar", "foobar"},
extraFile: "extra",
},
{
name: "all",
directories: []string{"foo", "bar"},
files: []string{"foo/bar", "bar/baz", "foobar"},
extraFile: "bar/extra",
},
}
err := setIgnoreContext("**/extra")
if err != nil {
t.Fatalf("Error setting exlusion context: %s", err)
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testDir1, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Error creating tempdir: %s", err)
}
defer os.RemoveAll(testDir1)
err = createFilesystemStructure(testDir1, test.directories, test.files)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
testDir2, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Error creating tempdir: %s", err)
}
defer os.RemoveAll(testDir2)
err = createFilesystemStructure(testDir2, test.directories, test.files)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
extraPath := path.Join(testDir2, test.extraFile)
err = ioutil.WriteFile(extraPath, []byte(test.extraFile), 0644)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
hash1, err := hashDirectory(testDir1)
if err != nil {
t.Fatalf("Failed to calculate hash: %s", err)
}
hash2, err := hashDirectory(testDir2)
if err != nil {
t.Fatalf("Failed to calculate hash: %s", err)
}
if hash1 != hash2 {
t.Errorf("Expected equal hashes, got: %s and %s", hash1, hash2)
}
})
}
}
func Test_CompositeKey_AddPath_WithExtraDirIgnored_Works(t *testing.T) {
tests := []struct {
name string
directories []string
files []string
extraDir string
}{
{
name: "empty",
directories: []string{},
files: []string{},
extraDir: "extra",
},
{
name: "dirs",
directories: []string{"foo", "bar", "foobar", "f/o/o"},
files: []string{},
extraDir: "f/o/o/extra",
},
{
name: "files",
directories: []string{},
files: []string{"foo", "bar", "foobar"},
extraDir: "extra",
},
{
name: "all",
directories: []string{"foo", "bar"},
files: []string{"foo/bar", "bar/baz", "foobar"},
extraDir: "bar/extra",
},
}
err := setIgnoreContext("**/extra")
if err != nil {
t.Fatalf("Error setting exlusion context: %s", err)
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testDir1, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Error creating tempdir: %s", err)
}
defer os.RemoveAll(testDir1)
err = createFilesystemStructure(testDir1, test.directories, test.files)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
testDir2, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Error creating tempdir: %s", err)
}
defer os.RemoveAll(testDir2)
err = createFilesystemStructure(testDir2, test.directories, test.files)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
extraPath := path.Join(testDir2, test.extraDir)
err = os.MkdirAll(extraPath, 0644)
if err != nil {
t.Fatalf("Error creating filesytem structure: %s", err)
}
hash1, err := hashDirectory(testDir1)
if err != nil {
t.Fatalf("Failed to calculate hash: %s", err)
}
hash2, err := hashDirectory(testDir2)
if err != nil {
t.Fatalf("Failed to calculate hash: %s", err)
}
if hash1 != hash2 {
t.Errorf("Expected equal hashes, got: %s and %s", hash1, hash2)
}
})
}
}

View File

@ -0,0 +1,143 @@
/*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/google/go-containerregistry/pkg/v1 (interfaces: Layer)
// Package mockv1 is a generated GoMock package.
package mockv1
import (
io "io"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
v1 "github.com/google/go-containerregistry/pkg/v1"
types "github.com/google/go-containerregistry/pkg/v1/types"
)
// MockLayer is a mock of Layer interface
type MockLayer struct {
ctrl *gomock.Controller
recorder *MockLayerMockRecorder
}
// MockLayerMockRecorder is the mock recorder for MockLayer
type MockLayerMockRecorder struct {
mock *MockLayer
}
// NewMockLayer creates a new mock instance
func NewMockLayer(ctrl *gomock.Controller) *MockLayer {
mock := &MockLayer{ctrl: ctrl}
mock.recorder = &MockLayerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockLayer) EXPECT() *MockLayerMockRecorder {
return m.recorder
}
// Compressed mocks base method
func (m *MockLayer) Compressed() (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Compressed")
ret0, _ := ret[0].(io.ReadCloser)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Compressed indicates an expected call of Compressed
func (mr *MockLayerMockRecorder) Compressed() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Compressed", reflect.TypeOf((*MockLayer)(nil).Compressed))
}
// DiffID mocks base method
func (m *MockLayer) DiffID() (v1.Hash, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DiffID")
ret0, _ := ret[0].(v1.Hash)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// DiffID indicates an expected call of DiffID
func (mr *MockLayerMockRecorder) DiffID() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiffID", reflect.TypeOf((*MockLayer)(nil).DiffID))
}
// Digest mocks base method
func (m *MockLayer) Digest() (v1.Hash, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Digest")
ret0, _ := ret[0].(v1.Hash)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Digest indicates an expected call of Digest
func (mr *MockLayerMockRecorder) Digest() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Digest", reflect.TypeOf((*MockLayer)(nil).Digest))
}
// MediaType mocks base method
func (m *MockLayer) MediaType() (types.MediaType, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "MediaType")
ret0, _ := ret[0].(types.MediaType)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// MediaType indicates an expected call of MediaType
func (mr *MockLayerMockRecorder) MediaType() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MediaType", reflect.TypeOf((*MockLayer)(nil).MediaType))
}
// Size mocks base method
func (m *MockLayer) Size() (int64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Size")
ret0, _ := ret[0].(int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Size indicates an expected call of Size
func (mr *MockLayerMockRecorder) Size() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockLayer)(nil).Size))
}
// Uncompressed mocks base method
func (m *MockLayer) Uncompressed() (io.ReadCloser, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Uncompressed")
ret0, _ := ret[0].(io.ReadCloser)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Uncompressed indicates an expected call of Uncompressed
func (mr *MockLayerMockRecorder) Uncompressed() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Uncompressed", reflect.TypeOf((*MockLayer)(nil).Uncompressed))
}

View File

@ -19,6 +19,7 @@ package snapshot
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"syscall"
@ -112,7 +113,6 @@ func (s *Snapshotter) TakeSnapshotFS() (string, error) {
if err := writeToTar(t, filesToAdd, filesToWhiteOut); err != nil {
return "", err
}
return f.Name(), nil
}
@ -179,8 +179,13 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
return nil, nil, fmt.Errorf("could not check if file has changed %s %s", path, err)
}
if fileChanged {
logrus.Tracef("Adding %s to layer, because it was changed.", path)
filesToAdd = append(filesToAdd, path)
// Get target file for symlinks so the symlink is not a dead link.
files, err := filesWithLinks(path)
if err != nil {
return nil, nil, err
}
logrus.Tracef("Adding files %s to layer, because it was changed.", files)
filesToAdd = append(filesToAdd, files...)
}
}
@ -188,14 +193,12 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
filesToAdd = filesWithParentDirs(filesToAdd)
sort.Strings(filesToAdd)
// Add files to the layered map
for _, file := range filesToAdd {
if err := s.l.Add(file); err != nil {
return nil, nil, fmt.Errorf("unable to add file %s to layered map: %s", file, err)
}
}
return filesToAdd, filesToWhiteOut, nil
}
@ -236,3 +239,21 @@ func filesWithParentDirs(files []string) []string {
return newFiles
}
// filesWithLinks returns the symlink and the target path if its exists.
func filesWithLinks(path string) ([]string, error) {
link, err := util.GetSymLink(path)
if err == util.ErrNotSymLink {
return []string{path}, nil
} else if err != nil {
return nil, err
}
// Add symlink if it exists in the FS
if !filepath.IsAbs(link) {
link = filepath.Join(filepath.Dir(path), link)
}
if _, err := os.Stat(link); err != nil {
return []string{path}, nil
}
return []string{path, link}, nil
}

View File

@ -31,7 +31,7 @@ import (
)
func TestSnapshotFSFileChange(t *testing.T) {
testDir, snapshotter, cleanup, err := setUpTestDir()
testDir, snapshotter, cleanup, err := setUpTest()
testDirWithoutLeadingSlash := strings.TrimLeft(testDir, "/")
defer cleanup()
if err != nil {
@ -90,7 +90,7 @@ func TestSnapshotFSFileChange(t *testing.T) {
}
func TestSnapshotFSIsReproducible(t *testing.T) {
testDir, snapshotter, cleanup, err := setUpTestDir()
testDir, snapshotter, cleanup, err := setUpTest()
defer cleanup()
if err != nil {
t.Fatal(err)
@ -129,7 +129,7 @@ func TestSnapshotFSIsReproducible(t *testing.T) {
}
func TestSnapshotFSChangePermissions(t *testing.T) {
testDir, snapshotter, cleanup, err := setUpTestDir()
testDir, snapshotter, cleanup, err := setUpTest()
testDirWithoutLeadingSlash := strings.TrimLeft(testDir, "/")
defer cleanup()
if err != nil {
@ -180,7 +180,7 @@ func TestSnapshotFSChangePermissions(t *testing.T) {
}
func TestSnapshotFiles(t *testing.T) {
testDir, snapshotter, cleanup, err := setUpTestDir()
testDir, snapshotter, cleanup, err := setUpTest()
testDirWithoutLeadingSlash := strings.TrimLeft(testDir, "/")
defer cleanup()
if err != nil {
@ -230,7 +230,7 @@ func TestSnapshotFiles(t *testing.T) {
}
func TestEmptySnapshotFS(t *testing.T) {
_, snapshotter, cleanup, err := setUpTestDir()
_, snapshotter, cleanup, err := setUpTest()
if err != nil {
t.Fatal(err)
}
@ -253,12 +253,97 @@ func TestEmptySnapshotFS(t *testing.T) {
}
}
func setUpTestDir() (string, *Snapshotter, func(), error) {
testDir, err := ioutil.TempDir("", "")
if err != nil {
return "", nil, nil, errors.Wrap(err, "setting up temp dir")
func TestFileWithLinks(t *testing.T) {
link := "baz/link"
tcs := []struct {
name string
path string
linkFileTarget string
expected []string
shouldErr bool
}{
{
name: "given path is a symlink that points to a valid target",
path: link,
linkFileTarget: "file",
expected: []string{link, "baz/file"},
},
{
name: "given path is a symlink points to non existing path",
path: link,
linkFileTarget: "does-not-exists",
expected: []string{link},
},
{
name: "given path is a regular file",
path: "kaniko/file",
linkFileTarget: "file",
expected: []string{"kaniko/file"},
},
}
for _, tt := range tcs {
t.Run(tt.name, func(t *testing.T) {
testDir, cleanup, err := setUpTestDir()
if err != nil {
t.Fatal(err)
}
defer cleanup()
if err := setupSymlink(testDir, link, tt.linkFileTarget); err != nil {
t.Fatalf("could not set up symlink due to %s", err)
}
actual, err := filesWithLinks(filepath.Join(testDir, tt.path))
if err != nil {
t.Fatalf("unexpected error %s", err)
}
sortAndCompareFilepaths(t, testDir, tt.expected, actual)
})
}
}
func setupSymlink(dir string, link string, target string) error {
return os.Symlink(target, filepath.Join(dir, link))
}
func sortAndCompareFilepaths(t *testing.T, testDir string, expected []string, actual []string) {
expectedFullPaths := make([]string, len(expected))
for i, file := range expected {
expectedFullPaths[i] = filepath.Join(testDir, file)
}
sort.Strings(expectedFullPaths)
sort.Strings(actual)
testutil.CheckDeepEqual(t, expectedFullPaths, actual)
}
func setUpTestDir() (string, func(), error) {
testDir, err := ioutil.TempDir("", "")
if err != nil {
return "", nil, errors.Wrap(err, "setting up temp dir")
}
files := map[string]string{
"foo": "baz1",
"bar/bat": "baz2",
"kaniko/file": "file",
"baz/file": "testfile",
}
// Set up initial files
if err := testutil.SetupFiles(testDir, files); err != nil {
return "", nil, errors.Wrap(err, "setting up file system")
}
cleanup := func() {
os.RemoveAll(testDir)
}
return testDir, cleanup, nil
}
func setUpTest() (string, *Snapshotter, func(), error) {
testDir, dirCleanUp, err := setUpTestDir()
if err != nil {
return "", nil, nil, err
}
snapshotPath, err := ioutil.TempDir("", "")
if err != nil {
return "", nil, nil, errors.Wrap(err, "setting up temp dir")
@ -266,16 +351,6 @@ func setUpTestDir() (string, *Snapshotter, func(), error) {
snapshotPathPrefix = snapshotPath
files := map[string]string{
"foo": "baz1",
"bar/bat": "baz2",
"kaniko/file": "file",
}
// Set up initial files
if err := testutil.SetupFiles(testDir, files); err != nil {
return "", nil, nil, errors.Wrap(err, "setting up file system")
}
// Take the initial snapshot
l := NewLayeredMap(util.Hasher(), util.CacheHasher())
snapshotter := NewSnapshotter(l, testDir)
@ -285,7 +360,7 @@ func setUpTestDir() (string, *Snapshotter, func(), error) {
cleanup := func() {
os.RemoveAll(snapshotPath)
os.RemoveAll(testDir)
dirCleanUp()
}
return testDir, snapshotter, cleanup, nil

View File

@ -164,20 +164,25 @@ func IsDestDir(path string) bool {
// If dest is a dir, copy it to /dest/relpath
// If dest is a file, copy directly to dest
// If source is a dir:
// Assume dest is also a dir, and copy to dest/relpath
// Assume dest is also a dir, and copy to dest/
// If dest is not an absolute filepath, add /cwd to the beginning
func DestinationFilepath(src, dest, cwd string) (string, error) {
if IsDestDir(dest) {
destPath := filepath.Join(dest, filepath.Base(src))
if filepath.IsAbs(dest) {
return destPath, nil
}
return filepath.Join(cwd, destPath), nil
_, srcFileName := filepath.Split(src)
newDest := dest
if IsDestDir(newDest) {
newDest = filepath.Join(newDest, srcFileName)
}
if filepath.IsAbs(dest) {
return dest, nil
if !filepath.IsAbs(newDest) {
newDest = filepath.Join(cwd, newDest)
}
return filepath.Join(cwd, dest), nil
if len(srcFileName) <= 0 && !strings.HasSuffix(newDest, "/") {
newDest += "/"
}
return newDest, nil
}
// URLDestinationFilepath gives the destination a file from a remote URL should be saved to
@ -208,7 +213,7 @@ func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, resolvedSources []stri
if !ContainsWildcards(srcs) {
totalSrcs := 0
for _, src := range srcs {
if excludeFile(src, root) {
if ExcludeFile(src, root) {
continue
}
totalSrcs++
@ -245,7 +250,7 @@ func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, resolvedSources []stri
return errors.Wrap(err, "failed to get relative files")
}
for _, file := range files {
if excludeFile(file, root) {
if ExcludeFile(file, root) {
continue
}
totalFiles++

View File

@ -153,13 +153,13 @@ var destinationFilepathTests = []struct {
src: "context/bar/",
cwd: "/",
dest: "pkg/",
expectedFilepath: "/pkg/bar",
expectedFilepath: "/pkg/",
},
{
src: "context/bar/",
cwd: "/newdir",
dest: "pkg/",
expectedFilepath: "/newdir/pkg/bar",
expectedFilepath: "/newdir/pkg/",
},
{
src: "./context/empty",
@ -177,7 +177,7 @@ var destinationFilepathTests = []struct {
src: "./",
cwd: "/",
dest: "/dir",
expectedFilepath: "/dir",
expectedFilepath: "/dir/",
},
{
src: "context/foo",

View File

@ -30,6 +30,8 @@ import (
"syscall"
"time"
otiai10Cpy "github.com/otiai10/copy"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/docker/docker/builder/dockerignore"
"github.com/docker/docker/pkg/fileutils"
@ -48,13 +50,6 @@ var initialWhitelist = []WhitelistEntry{
Path: "/kaniko",
PrefixMatchOnly: false,
},
{
// /var/run is a special case. It's common to mount in /var/run/docker.sock or something similar
// which leads to a special mount on the /var/run/docker.sock file itself, but the directory to exist
// in the image with no way to tell if it came from the base image or not.
Path: "/var/run",
PrefixMatchOnly: false,
},
{
// similarly, we whitelist /etc/mtab, since there is no way to know if the file was mounted or came
// from the base image
@ -71,25 +66,58 @@ var excluded []string
type ExtractFunction func(string, *tar.Header, io.Reader) error
type FSConfig struct {
includeWhiteout bool
extractFunc ExtractFunction
}
type FSOpt func(*FSConfig)
func IncludeWhiteout() FSOpt {
return func(opts *FSConfig) {
opts.includeWhiteout = true
}
}
func ExtractFunc(extractFunc ExtractFunction) FSOpt {
return func(opts *FSConfig) {
opts.extractFunc = extractFunc
}
}
// GetFSFromImage extracts the layers of img to root
// It returns a list of all files extracted
func GetFSFromImage(root string, img v1.Image, extract ExtractFunction) ([]string, error) {
if extract == nil {
return nil, errors.New("must supply an extract function")
}
if img == nil {
return nil, errors.New("image cannot be nil")
}
if err := DetectFilesystemWhitelist(constants.WhitelistPath); err != nil {
return nil, err
}
logrus.Debugf("Mounted directories: %v", whitelist)
layers, err := img.Layers()
if err != nil {
return nil, err
}
extractedFiles := []string{}
return GetFSFromLayers(root, layers, ExtractFunc(extract))
}
func GetFSFromLayers(root string, layers []v1.Layer, opts ...FSOpt) ([]string, error) {
cfg := new(FSConfig)
for _, opt := range opts {
opt(cfg)
}
if cfg.extractFunc == nil {
return nil, errors.New("must supply an extract function")
}
if err := DetectFilesystemWhitelist(constants.WhitelistPath); err != nil {
return nil, err
}
logrus.Debugf("Mounted directories: %v", whitelist)
extractedFiles := []string{}
for i, l := range layers {
if mediaType, err := l.MediaType(); err == nil {
logrus.Tracef("Extracting layer %d of media type %s", i, mediaType)
@ -102,29 +130,41 @@ func GetFSFromImage(root string, img v1.Image, extract ExtractFunction) ([]strin
return nil, err
}
defer r.Close()
tr := tar.NewReader(r)
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("error reading tar %d", i))
}
path := filepath.Join(root, filepath.Clean(hdr.Name))
base := filepath.Base(path)
dir := filepath.Dir(path)
if strings.HasPrefix(base, ".wh.") {
logrus.Debugf("Whiting out %s", path)
name := strings.TrimPrefix(base, ".wh.")
if err := os.RemoveAll(filepath.Join(dir, name)); err != nil {
return nil, errors.Wrapf(err, "removing whiteout %s", hdr.Name)
}
continue
if !cfg.includeWhiteout {
logrus.Debug("not including whiteout files")
continue
}
}
if err := extract(root, hdr, tr); err != nil {
if err := cfg.extractFunc(root, hdr, tr); err != nil {
return nil, err
}
extractedFiles = append(extractedFiles, filepath.Join(root, filepath.Clean(hdr.Name)))
}
}
@ -190,7 +230,7 @@ func unTar(r io.Reader, dest string) ([]string, error) {
if err := ExtractFile(dest, hdr, tr); err != nil {
return nil, err
}
extractedFiles = append(extractedFiles, dest)
extractedFiles = append(extractedFiles, filepath.Join(dest, filepath.Clean(hdr.Name)))
}
return extractedFiles, nil
}
@ -215,6 +255,7 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error {
switch hdr.Typeflag {
case tar.TypeReg:
logrus.Tracef("creating file %s", path)
// It's possible a file is in the tar before its directory,
// or a file was copied over a directory prior to now
fi, err := os.Stat(dir)
@ -225,6 +266,7 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error {
return err
}
}
// Check if something already exists at path (symlinks etc.)
// If so, delete it
if FilepathExists(path) {
@ -232,16 +274,26 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error {
return errors.Wrapf(err, "error removing %s to make way for new file.", path)
}
}
currFile, err := os.Create(path)
if err != nil {
return err
}
if _, err = io.Copy(currFile, tr); err != nil {
return err
}
if err = setFilePermissions(path, mode, uid, gid); err != nil {
return err
}
// We set AccessTime because its a required arg but we only care about
// ModTime. The file will get accessed again so AccessTime will change.
if err := os.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return err
}
currFile.Close()
case tar.TypeDir:
logrus.Tracef("creating dir %s", path)
@ -431,18 +483,8 @@ func FilepathExists(path string) bool {
// CreateFile creates a file at path and copies over contents from the reader
func CreateFile(path string, reader io.Reader, perm os.FileMode, uid uint32, gid uint32) error {
// Create directory path if it doesn't exist
baseDir := filepath.Dir(path)
if info, err := os.Lstat(baseDir); os.IsNotExist(err) {
logrus.Tracef("baseDir %s for file %s does not exist. Creating.", baseDir, path)
if err := os.MkdirAll(baseDir, 0755); err != nil {
return err
}
} else {
switch mode := info.Mode(); {
case mode&os.ModeSymlink != 0:
logrus.Infof("destination cannot be a symlink %v", baseDir)
return errors.New("destination cannot be a symlink")
}
if err := createParentDirectory(path); err != nil {
return err
}
dest, err := os.Create(path)
if err != nil {
@ -502,9 +544,10 @@ func CopyDir(src, dest, buildcontext string) ([]string, error) {
fullPath := filepath.Join(src, file)
fi, err := os.Lstat(fullPath)
if err != nil {
fmt.Println(" i am returning from here this", err)
return nil, err
}
if excludeFile(fullPath, buildcontext) {
if ExcludeFile(fullPath, buildcontext) {
logrus.Debugf("%s found in .dockerignore, ignoring", src)
continue
}
@ -519,7 +562,7 @@ func CopyDir(src, dest, buildcontext string) ([]string, error) {
if err := mkdirAllWithPermissions(destPath, mode, uid, gid); err != nil {
return nil, err
}
} else if fi.Mode()&os.ModeSymlink != 0 {
} else if IsSymlink(fi) {
// If file is a symlink, we want to create the same relative symlink
if _, err := CopySymlink(fullPath, destPath, buildcontext); err != nil {
return nil, err
@ -537,7 +580,7 @@ func CopyDir(src, dest, buildcontext string) ([]string, error) {
// CopySymlink copies the symlink at src to dest
func CopySymlink(src, dest, buildcontext string) (bool, error) {
if excludeFile(src, buildcontext) {
if ExcludeFile(src, buildcontext) {
logrus.Debugf("%s found in .dockerignore, ignoring", src)
return true, nil
}
@ -550,12 +593,15 @@ func CopySymlink(src, dest, buildcontext string) (bool, error) {
return false, err
}
}
if err := createParentDirectory(dest); err != nil {
return false, err
}
return false, os.Symlink(link, dest)
}
// CopyFile copies the file at src to dest
func CopyFile(src, dest, buildcontext string) (bool, error) {
if excludeFile(src, buildcontext) {
if ExcludeFile(src, buildcontext) {
logrus.Debugf("%s found in .dockerignore, ignoring", src)
return true, nil
}
@ -599,8 +645,8 @@ func GetExcludedFiles(dockerfilepath string, buildcontext string) error {
return err
}
// excludeFile returns true if the .dockerignore specified this file should be ignored
func excludeFile(path, buildcontext string) bool {
// ExcludeFile returns true if the .dockerignore specified this file should be ignored
func ExcludeFile(path, buildcontext string) bool {
if HasFilepathPrefix(path, buildcontext, false) {
var err error
path, err = filepath.Rel(buildcontext, path)
@ -647,6 +693,7 @@ func mkdirAllWithPermissions(path string, mode os.FileMode, uid, gid int) error
if err := os.MkdirAll(path, mode); err != nil {
return err
}
if err := os.Chown(path, uid, gid); err != nil {
return err
}
@ -677,3 +724,78 @@ func CreateTargetTarfile(tarpath string) (*os.File, error) {
return os.Create(tarpath)
}
// Returns true if a file is a symlink
func IsSymlink(fi os.FileInfo) bool {
return fi.Mode()&os.ModeSymlink != 0
}
var ErrNotSymLink = fmt.Errorf("not a symlink")
func GetSymLink(path string) (string, error) {
if err := getSymlink(path); err != nil {
return "", err
}
return os.Readlink(path)
}
func EvalSymLink(path string) (string, error) {
if err := getSymlink(path); err != nil {
return "", err
}
return filepath.EvalSymlinks(path)
}
func getSymlink(path string) error {
fi, err := os.Lstat(path)
if err != nil {
return err
}
if !IsSymlink(fi) {
return ErrNotSymLink
}
return nil
}
// For cross stage dependencies kaniko must persist the referenced path so that it can be used in
// the dependent stage. For symlinks we copy the target path because copying the symlink would
// result in a dead link
func CopyFileOrSymlink(src string, destDir string) error {
destFile := filepath.Join(destDir, src)
if fi, _ := os.Lstat(src); IsSymlink(fi) {
link, err := os.Readlink(src)
if err != nil {
return err
}
return os.Symlink(link, destFile)
}
return otiai10Cpy.Copy(src, destFile)
}
func createParentDirectory(path string) error {
baseDir := filepath.Dir(path)
if info, err := os.Lstat(baseDir); os.IsNotExist(err) {
logrus.Tracef("baseDir %s for file %s does not exist. Creating.", baseDir, path)
if err := os.MkdirAll(baseDir, 0755); err != nil {
return err
}
} else if IsSymlink(info) {
logrus.Infof("destination cannot be a symlink %v", baseDir)
return errors.New("destination cannot be a symlink")
}
return nil
}
// UpdateInitialWhitelist will add /var/run to whitelisted paths if
func UpdateWhitelist(whitelistVarRun bool) {
if !whitelistVarRun {
return
}
whitelist = append(initialWhitelist, WhitelistEntry{
// /var/run is a special case. It's common to mount in /var/run/docker.sock or something similar
// which leads to a special mount on the /var/run/docker.sock file itself, but the directory to exist
// in the image with no way to tell if it came from the base image or not.
Path: "/var/run",
PrefixMatchOnly: false,
})
}

View File

@ -20,6 +20,7 @@ import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
@ -27,8 +28,13 @@ import (
"sort"
"strings"
"testing"
"time"
"github.com/GoogleContainerTools/kaniko/pkg/mocks/go-containerregistry/mockv1"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/golang/mock/gomock"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/types"
)
func Test_DetectFilesystemWhitelist(t *testing.T) {
@ -58,7 +64,6 @@ func Test_DetectFilesystemWhitelist(t *testing.T) {
{"/dev", false},
{"/dev/pts", false},
{"/sys", false},
{"/var/run", false},
{"/etc/mtab", false},
}
actualWhitelist := whitelist
@ -445,6 +450,19 @@ func fileMatches(p string, c []byte) checker {
}
}
func timesMatch(p string, fTime time.Time) checker {
return func(root string, t *testing.T) {
fi, err := os.Stat(filepath.Join(root, p))
if err != nil {
t.Fatalf("error statting file %s", p)
}
if fi.ModTime().UTC() != fTime.UTC() {
t.Errorf("Expected modtime to equal %v but was %v", fTime, fi.ModTime())
}
}
}
func permissionsMatch(p string, perms os.FileMode) checker {
return func(root string, t *testing.T) {
fi, err := os.Stat(filepath.Join(root, p))
@ -488,14 +506,16 @@ func filesAreHardlinks(first, second string) checker {
}
}
func fileHeader(name string, contents string, mode int64) *tar.Header {
func fileHeader(name string, contents string, mode int64, fTime time.Time) *tar.Header {
return &tar.Header{
Name: name,
Size: int64(len(contents)),
Mode: mode,
Typeflag: tar.TypeReg,
Uid: os.Getuid(),
Gid: os.Getgid(),
Name: name,
Size: int64(len(contents)),
Mode: mode,
Typeflag: tar.TypeReg,
Uid: os.Getuid(),
Gid: os.Getgid(),
AccessTime: fTime,
ModTime: fTime,
}
}
@ -528,6 +548,87 @@ func dirHeader(name string, mode int64) *tar.Header {
}
}
func createUncompressedTar(fileContents map[string]string, tarFileName, testDir string) error {
if err := testutil.SetupFiles(testDir, fileContents); err != nil {
return err
}
tarFile, err := os.Create(filepath.Join(testDir, tarFileName))
if err != nil {
return err
}
t := NewTar(tarFile)
defer t.Close()
for file := range fileContents {
filePath := filepath.Join(testDir, file)
if err := t.AddFileToTar(filePath); err != nil {
return err
}
}
return nil
}
func Test_unTar(t *testing.T) {
tcs := []struct {
name string
setupTarContents map[string]string
tarFileName string
destination string
expectedFileList []string
errorExpected bool
}{
{
name: "multfile tar",
setupTarContents: map[string]string{
"foo/file1": "hello World",
"bar/file1": "hello World",
"bar/file2": "hello World",
"file1": "hello World",
},
tarFileName: "test.tar",
destination: "/",
expectedFileList: []string{"foo/file1", "bar/file1", "bar/file2", "file1"},
errorExpected: false,
},
{
name: "empty tar",
setupTarContents: map[string]string{},
tarFileName: "test.tar",
destination: "/",
expectedFileList: nil,
errorExpected: false,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
testDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(testDir)
if err := createUncompressedTar(tc.setupTarContents, tc.tarFileName, testDir); err != nil {
t.Fatal(err)
}
file, err := os.Open(filepath.Join(testDir, tc.tarFileName))
if err != nil {
t.Fatal(err)
}
fileList, err := unTar(file, tc.destination)
if err != nil {
t.Fatal(err)
}
// update expectedFileList to take into factor temp directory
for i, file := range tc.expectedFileList {
tc.expectedFileList[i] = filepath.Join(testDir, file)
}
// sort both slices to ensure objects are in the same order for deep equals
sort.Strings(tc.expectedFileList)
sort.Strings(fileList)
testutil.CheckErrorAndDeepEqual(t, tc.errorExpected, err, tc.expectedFileList, fileList)
})
}
}
func TestExtractFile(t *testing.T) {
type tc struct {
name string
@ -537,21 +638,27 @@ func TestExtractFile(t *testing.T) {
checkers []checker
}
defaultTestTime, err := time.Parse(time.RFC3339, "1912-06-23T00:00:00Z")
if err != nil {
t.Fatal(err)
}
tcs := []tc{
{
name: "normal file",
contents: []byte("helloworld"),
hdrs: []*tar.Header{fileHeader("./bar", "helloworld", 0644)},
hdrs: []*tar.Header{fileHeader("./bar", "helloworld", 0644, defaultTestTime)},
checkers: []checker{
fileExists("/bar"),
fileMatches("/bar", []byte("helloworld")),
permissionsMatch("/bar", 0644),
timesMatch("/bar", defaultTestTime),
},
},
{
name: "normal file, directory does not exist",
contents: []byte("helloworld"),
hdrs: []*tar.Header{fileHeader("./foo/bar", "helloworld", 0644)},
hdrs: []*tar.Header{fileHeader("./foo/bar", "helloworld", 0644, defaultTestTime)},
checkers: []checker{
fileExists("/foo/bar"),
fileMatches("/foo/bar", []byte("helloworld")),
@ -563,7 +670,7 @@ func TestExtractFile(t *testing.T) {
name: "normal file, directory is created after",
contents: []byte("helloworld"),
hdrs: []*tar.Header{
fileHeader("./foo/bar", "helloworld", 0644),
fileHeader("./foo/bar", "helloworld", 0644, defaultTestTime),
dirHeader("./foo", 0722),
},
checkers: []checker{
@ -607,7 +714,7 @@ func TestExtractFile(t *testing.T) {
name: "hardlink",
tmpdir: "/tmp/hardlink",
hdrs: []*tar.Header{
fileHeader("/bin/gzip", "gzip-binary", 0751),
fileHeader("/bin/gzip", "gzip-binary", 0751, defaultTestTime),
hardlinkHeader("/bin/uncompress", "/bin/gzip"),
},
checkers: []checker{
@ -618,7 +725,7 @@ func TestExtractFile(t *testing.T) {
{
name: "file with setuid bit",
contents: []byte("helloworld"),
hdrs: []*tar.Header{fileHeader("./bar", "helloworld", 04644)},
hdrs: []*tar.Header{fileHeader("./bar", "helloworld", 04644, defaultTestTime)},
checkers: []checker{
fileExists("/bar"),
fileMatches("/bar", []byte("helloworld")),
@ -630,7 +737,7 @@ func TestExtractFile(t *testing.T) {
contents: []byte("helloworld"),
hdrs: []*tar.Header{
dirHeader("./foo", 01755),
fileHeader("./foo/bar", "helloworld", 0644),
fileHeader("./foo/bar", "helloworld", 0644, defaultTestTime),
},
checkers: []checker{
fileExists("/foo/bar"),
@ -698,6 +805,9 @@ func TestCopySymlink(t *testing.T) {
tc := tc
t.Parallel()
r, err := ioutil.TempDir("", "")
os.MkdirAll(filepath.Join(r, filepath.Dir(tc.linkTarget)), 0777)
tc.linkTarget = filepath.Join(r, tc.linkTarget)
ioutil.WriteFile(tc.linkTarget, nil, 0644)
if err != nil {
t.Fatal(err)
}
@ -811,14 +921,14 @@ func Test_correctDockerignoreFileIsUsed(t *testing.T) {
}
for _, excl := range tt.args.excluded {
t.Run(tt.name+" to exclude "+excl, func(t *testing.T) {
if !excludeFile(excl, tt.args.buildcontext) {
if !ExcludeFile(excl, tt.args.buildcontext) {
t.Errorf("'%v' not excluded", excl)
}
})
}
for _, incl := range tt.args.included {
t.Run(tt.name+" to include "+incl, func(t *testing.T) {
if excludeFile(incl, tt.args.buildcontext) {
if ExcludeFile(incl, tt.args.buildcontext) {
t.Errorf("'%v' not included", incl)
}
})
@ -863,3 +973,325 @@ func Test_CopyFile_skips_self(t *testing.T) {
t.Fatalf("expected file contents to be %q, but got %q", expected, actual)
}
}
func fakeExtract(dest string, hdr *tar.Header, tr io.Reader) error {
return nil
}
func Test_GetFSFromLayers_with_whiteouts_include_whiteout_enabled(t *testing.T) {
ctrl := gomock.NewController(t)
root, err := ioutil.TempDir("", "layers-test")
if err != nil {
t.Fatal(err)
}
defer os.Remove(root)
opts := []FSOpt{
// I'd rather use the real func (util.ExtractFile)
// but you have to be root to chown
ExtractFunc(fakeExtract),
IncludeWhiteout(),
}
expectErr := false
f := func(expectedFiles []string, tw *tar.Writer) {
for _, f := range expectedFiles {
f := strings.TrimPrefix(strings.TrimPrefix(f, root), "/")
hdr := &tar.Header{
Name: f,
Mode: 0644,
Size: int64(len("Hello World\n")),
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err := tw.Write([]byte("Hello World\n")); err != nil {
t.Fatal(err)
}
}
if err := tw.Close(); err != nil {
t.Fatal(err)
}
}
expectedFiles := []string{
filepath.Join(root, "foobar"),
}
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
f(expectedFiles, tw)
mockLayer := mockv1.NewMockLayer(ctrl)
mockLayer.EXPECT().MediaType().Return(types.OCILayer, nil)
rc := ioutil.NopCloser(buf)
mockLayer.EXPECT().Uncompressed().Return(rc, nil)
secondLayerFiles := []string{
filepath.Join(root, ".wh.foobar"),
}
buf = new(bytes.Buffer)
tw = tar.NewWriter(buf)
f(secondLayerFiles, tw)
mockLayer2 := mockv1.NewMockLayer(ctrl)
mockLayer2.EXPECT().MediaType().Return(types.OCILayer, nil)
rc = ioutil.NopCloser(buf)
mockLayer2.EXPECT().Uncompressed().Return(rc, nil)
layers := []v1.Layer{
mockLayer,
mockLayer2,
}
expectedFiles = append(expectedFiles, secondLayerFiles...)
actualFiles, err := GetFSFromLayers(root, layers, opts...)
assertGetFSFromLayers(
t,
actualFiles,
expectedFiles,
err,
expectErr,
)
}
func Test_GetFSFromLayers_with_whiteouts_include_whiteout_disabled(t *testing.T) {
ctrl := gomock.NewController(t)
root, err := ioutil.TempDir("", "layers-test")
if err != nil {
t.Fatal(err)
}
defer os.Remove(root)
opts := []FSOpt{
// I'd rather use the real func (util.ExtractFile)
// but you have to be root to chown
ExtractFunc(fakeExtract),
}
expectErr := false
f := func(expectedFiles []string, tw *tar.Writer) {
for _, f := range expectedFiles {
f := strings.TrimPrefix(strings.TrimPrefix(f, root), "/")
hdr := &tar.Header{
Name: f,
Mode: 0644,
Size: int64(len("Hello world\n")),
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err := tw.Write([]byte("Hello world\n")); err != nil {
t.Fatal(err)
}
}
if err := tw.Close(); err != nil {
t.Fatal(err)
}
}
expectedFiles := []string{
filepath.Join(root, "foobar"),
}
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
f(expectedFiles, tw)
mockLayer := mockv1.NewMockLayer(ctrl)
mockLayer.EXPECT().MediaType().Return(types.OCILayer, nil)
rc := ioutil.NopCloser(buf)
mockLayer.EXPECT().Uncompressed().Return(rc, nil)
secondLayerFiles := []string{
filepath.Join(root, ".wh.foobar"),
}
buf = new(bytes.Buffer)
tw = tar.NewWriter(buf)
f(secondLayerFiles, tw)
mockLayer2 := mockv1.NewMockLayer(ctrl)
mockLayer2.EXPECT().MediaType().Return(types.OCILayer, nil)
rc = ioutil.NopCloser(buf)
mockLayer2.EXPECT().Uncompressed().Return(rc, nil)
layers := []v1.Layer{
mockLayer,
mockLayer2,
}
actualFiles, err := GetFSFromLayers(root, layers, opts...)
assertGetFSFromLayers(
t,
actualFiles,
expectedFiles,
err,
expectErr,
)
}
func Test_GetFSFromLayers(t *testing.T) {
ctrl := gomock.NewController(t)
root, err := ioutil.TempDir("", "layers-test")
if err != nil {
t.Fatal(err)
}
defer os.Remove(root)
opts := []FSOpt{
// I'd rather use the real func (util.ExtractFile)
// but you have to be root to chown
ExtractFunc(fakeExtract),
}
expectErr := false
expectedFiles := []string{
filepath.Join(root, "foobar"),
}
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, f := range expectedFiles {
f := strings.TrimPrefix(strings.TrimPrefix(f, root), "/")
hdr := &tar.Header{
Name: f,
Mode: 0644,
Size: int64(len("Hello world\n")),
}
if err := tw.WriteHeader(hdr); err != nil {
t.Fatal(err)
}
if _, err := tw.Write([]byte("Hello world\n")); err != nil {
t.Fatal(err)
}
}
if err := tw.Close(); err != nil {
t.Fatal(err)
}
mockLayer := mockv1.NewMockLayer(ctrl)
mockLayer.EXPECT().MediaType().Return(types.OCILayer, nil)
rc := ioutil.NopCloser(buf)
mockLayer.EXPECT().Uncompressed().Return(rc, nil)
layers := []v1.Layer{
mockLayer,
}
actualFiles, err := GetFSFromLayers(root, layers, opts...)
assertGetFSFromLayers(
t,
actualFiles,
expectedFiles,
err,
expectErr,
)
}
func assertGetFSFromLayers(
t *testing.T,
actualFiles []string,
expectedFiles []string,
err error,
expectErr bool,
) {
if !expectErr && err != nil {
t.Error(err)
t.FailNow()
} else if expectErr && err == nil {
t.Error("expected err to not be nil")
t.FailNow()
}
if len(actualFiles) != len(expectedFiles) {
t.Errorf("expected %s to equal %s", actualFiles, expectedFiles)
t.FailNow()
}
for i := range expectedFiles {
if actualFiles[i] != expectedFiles[i] {
t.Errorf("expected %s to equal %s", actualFiles[i], expectedFiles[i])
}
}
}
func TestUpdateWhitelist(t *testing.T) {
tests := []struct {
name string
whitelistVarRun bool
expected []WhitelistEntry
}{
{
name: "var/run whitelisted",
whitelistVarRun: true,
expected: []WhitelistEntry{
{
Path: "/kaniko",
PrefixMatchOnly: false,
},
{
Path: "/etc/mtab",
PrefixMatchOnly: false,
},
{
Path: "/var/run",
PrefixMatchOnly: false,
},
},
},
{
name: "var/run not whitelisted",
expected: []WhitelistEntry{
{
Path: "/kaniko",
PrefixMatchOnly: false,
},
{
Path: "/etc/mtab",
PrefixMatchOnly: false,
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
whitelist = initialWhitelist
defer func() { whitelist = initialWhitelist }()
UpdateWhitelist(tt.whitelistVarRun)
testutil.CheckDeepEqual(t, tt.expected, whitelist)
})
}
}

View File

@ -49,12 +49,12 @@ var (
func RetrieveSourceImage(stage config.KanikoStage, opts *config.KanikoOptions) (v1.Image, error) {
t := timing.Start("Retrieving Source Image")
defer timing.DefaultRun.Stop(t)
buildArgs := opts.BuildArgs
var metaArgsString []string
var buildArgs []string
for _, arg := range stage.MetaArgs {
metaArgsString = append(metaArgsString, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString()))
buildArgs = append(buildArgs, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString()))
}
buildArgs = append(buildArgs, metaArgsString...)
buildArgs = append(buildArgs, opts.BuildArgs...)
currentBaseName, err := ResolveEnvironmentReplacement(stage.BaseName, buildArgs, false)
if err != nil {
return nil, err
@ -148,7 +148,10 @@ func remoteOptions(registryName string, opts *config.KanikoOptions) []remote.Opt
}
}
return []remote.Option{remote.WithTransport(tr), remote.WithAuthFromKeychain(creds.GetKeychain())}
// on which v1.Platform is this currently running?
platform := currentPlatform()
return []remote.Option{remote.WithTransport(tr), remote.WithAuthFromKeychain(creds.GetKeychain()), remote.WithPlatform(platform)}
}
func cachedImage(opts *config.KanikoOptions, image string) (v1.Image, error) {

View File

@ -22,6 +22,7 @@ import (
"encoding/hex"
"io"
"os"
"runtime"
"strconv"
"sync"
"syscall"
@ -29,6 +30,8 @@ import (
"github.com/minio/highwayhash"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1 "github.com/google/go-containerregistry/pkg/v1"
)
// ConfigureLogging sets the logrus logging level and forces logs to be colorful (!)
@ -138,3 +141,11 @@ func SHA256(r io.Reader) (string, error) {
}
return hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), nil
}
// CurrentPlatform returns the v1.Platform on which the code runs
func currentPlatform() v1.Platform {
return v1.Platform{
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
}
}

View File

@ -30,15 +30,21 @@ if [[ ! -z "$4" ]]; then
cache=$4
fi
if [[ ! -e $HOME/.config/gcloud/application_default_credentials.json ]]; then
echo "Application Default Credentials do not exist. Run [gcloud auth application-default login] to configure them"
exit 1
if [[ $destination == *"gcr"* ]]; then
if [[ ! -e $HOME/.config/gcloud/application_default_credentials.json ]]; then
echo "Application Default Credentials do not exist. Run [gcloud auth application-default login] to configure them"
exit 1
fi
docker run \
-v "$HOME"/.config/gcloud:/root/.config/gcloud \
-v "$context":/workspace \
gcr.io/kaniko-project/executor:latest \
--dockerfile "${dockerfile}" --destination "${destination}" --context dir:///workspace/ \
--cache="${cache}"
else
docker run \
-v "$context":/workspace \
gcr.io/kaniko-project/executor:latest \
--dockerfile "${dockerfile}" --destination "${destination}" --context dir:///workspace/ \
--cache="${cache}"
fi
docker run \
-v "$HOME"/.config/gcloud:/root/.config/gcloud \
-v "$context":/workspace \
gcr.io/kaniko-project/executor:latest \
--dockerfile "${dockerfile}" --destination "${destination}" --context dir:///workspace/ \
--cache="${cache}"

View File

@ -48,10 +48,21 @@ const (
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
)
// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
// ACLRule represents a grant for a role to an entity (user, group or team) for a
// Google Cloud Storage object or bucket.
type ACLRule struct {
Entity ACLEntity
Role ACLRole
Entity ACLEntity
EntityID string
Role ACLRole
Domain string
Email string
ProjectTeam *ProjectTeam
}
// ProjectTeam is the project team associated with the entity, if any.
type ProjectTeam struct {
ProjectNumber string
Team string
}
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
@ -77,7 +88,7 @@ func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
return a.bucketDelete(ctx, entity)
}
// Set sets the permission level for the given entity.
// Set sets the role for the given entity.
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
defer func() { trace.EndSpan(ctx, err) }()
@ -117,7 +128,7 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
if err != nil {
return nil, err
}
return toACLRules(acls.Items), nil
return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
@ -140,12 +151,7 @@ func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
if err != nil {
return nil, err
}
r := make([]ACLRule, len(acls.Items))
for i, v := range acls.Items {
r[i].Entity = ACLEntity(v.Entity)
r[i].Role = ACLRole(v.Role)
}
return r, nil
return toBucketACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
@ -167,15 +173,11 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
}
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error {
return runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
a.configureCall(req, ctx)
return req.Do()
})
if err != nil {
return err
}
return nil
}
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
@ -190,7 +192,7 @@ func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
if err != nil {
return nil, err
}
return toACLRules(acls.Items), nil
return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
@ -225,9 +227,7 @@ func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
})
}
func (a *ACLHandle) configureCall(call interface {
Header() http.Header
}, ctx context.Context) {
func (a *ACLHandle) configureCall(call interface{ Header() http.Header }, ctx context.Context) {
vc := reflect.ValueOf(call)
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
if a.userProject != "" {
@ -236,10 +236,100 @@ func (a *ACLHandle) configureCall(call interface {
setClientHeader(call.Header())
}
func toACLRules(items []*raw.ObjectAccessControl) []ACLRule {
r := make([]ACLRule, 0, len(items))
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)})
rs = append(rs, toObjectACLRule(item))
}
return rs
}
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
rs = append(rs, toBucketACLRule(item))
}
return rs
}
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toObjectProjectTeam(a.ProjectTeam),
}
}
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toBucketProjectTeam(a.ProjectTeam),
}
}
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.ObjectAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
}
return r
}
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.BucketAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
}
return r
}
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl {
return &raw.BucketAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl {
return &raw.ObjectAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}

View File

@ -82,6 +82,12 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
}
req := b.c.raw.Buckets.Insert(projectID, bkt)
setClientHeader(req.Header())
if attrs != nil && attrs.PredefinedACL != "" {
req.PredefinedAcl(attrs.PredefinedACL)
}
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
}
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
}
@ -188,6 +194,12 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
if err != nil {
return nil, err
}
if uattrs.PredefinedACL != "" {
req.PredefinedAcl(uattrs.PredefinedACL)
}
if uattrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
}
// TODO(jba): retry iff metagen is set?
rb, err := req.Context(ctx).Do()
if err != nil {
@ -223,6 +235,20 @@ type BucketAttrs struct {
// apply to new objects when no object ACL is provided.
DefaultObjectACL []ACLRule
// If not empty, applies a predefined set of access controls. It should be set
// only when creating a bucket.
// It is always empty for BucketAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
// for valid values.
PredefinedACL string
// If not empty, applies a predefined set of default object access controls.
// It should be set only when creating a bucket.
// It is always empty for BucketAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
// for valid values.
PredefinedDefaultObjectACL string
// Location is the location of the bucket. It defaults to "US".
Location string
@ -272,6 +298,12 @@ type BucketAttrs struct {
// The encryption configuration used by default for newly inserted objects.
Encryption *BucketEncryption
// The logging configuration.
Logging *BucketLogging
// The website configuration.
Website *BucketWebsite
}
// Lifecycle is the lifecycle configuration for objects in the bucket.
@ -389,6 +421,34 @@ type LifecycleCondition struct {
NumNewerVersions int64
}
// BucketLogging holds the bucket's logging configuration, which defines the
// destination bucket and optional name prefix for the current bucket's
// logs.
type BucketLogging struct {
// The destination bucket where the current bucket's logs
// should be placed.
LogBucket string
// A prefix for log object names.
LogObjectPrefix string
}
// Website holds the bucket's website configuration, controlling how the
// service behaves when accessing bucket contents as a web site. See
// https://cloud.google.com/storage/docs/static-website for more information.
type BucketWebsite struct {
// If the requested object path is missing, the service will ensure the path has
// a trailing '/', append this suffix, and attempt to retrieve the resulting
// object. This allows the creation of index.html objects to represent directory
// pages.
MainPageSuffix string
// If the requested object path is missing, and any mainPageSuffix object is
// missing, if applicable, the service will return the named object from this
// bucket as the content for a 404 Not Found result.
NotFoundPage string
}
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if b == nil {
return nil, nil
@ -397,52 +457,28 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if err != nil {
return nil, err
}
bucket := &BucketAttrs{
return &BucketAttrs{
Name: b.Name,
Location: b.Location,
MetaGeneration: b.Metageneration,
StorageClass: b.StorageClass,
Created: convertTime(b.TimeCreated),
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
ACL: toBucketACLRules(b.Acl),
DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl),
Labels: b.Labels,
RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
Lifecycle: toLifecycle(b.Lifecycle),
RetentionPolicy: rp,
CORS: toCORS(b.Cors),
Encryption: toBucketEncryption(b.Encryption),
}
acl := make([]ACLRule, len(b.Acl))
for i, rule := range b.Acl {
acl[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
bucket.ACL = acl
objACL := make([]ACLRule, len(b.DefaultObjectAcl))
for i, rule := range b.DefaultObjectAcl {
objACL[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
bucket.DefaultObjectACL = objACL
return bucket, nil
Logging: toBucketLogging(b.Logging),
Website: toBucketWebsite(b.Website),
}, nil
}
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
func (b *BucketAttrs) toRawBucket() *raw.Bucket {
var acl []*raw.BucketAccessControl
if len(b.ACL) > 0 {
acl = make([]*raw.BucketAccessControl, len(b.ACL))
for i, rule := range b.ACL {
acl[i] = &raw.BucketAccessControl{
Entity: string(rule.Entity),
Role: string(rule.Role),
}
}
}
dACL := toRawObjectACL(b.DefaultObjectACL)
// Copy label map.
var labels map[string]string
if len(b.Labels) > 0 {
@ -464,10 +500,10 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
}
return &raw.Bucket{
Name: b.Name,
DefaultObjectAcl: dACL,
Location: b.Location,
StorageClass: b.StorageClass,
Acl: acl,
Acl: toRawBucketACL(b.ACL),
DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL),
Versioning: v,
Labels: labels,
Billing: bb,
@ -475,6 +511,8 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
Cors: toRawCORS(b.CORS),
Encryption: b.Encryption.toRawBucketEncryption(),
Logging: b.Logging.toRawBucketLogging(),
Website: b.Website.toRawBucketWebsite(),
}
}
@ -536,6 +574,20 @@ type BucketAttrsToUpdate struct {
// If set, replaces the lifecycle configuration of the bucket.
Lifecycle *Lifecycle
// If set, replaces the logging configuration of the bucket.
Logging *BucketLogging
// If set, replaces the website configuration of the bucket.
Website *BucketWebsite
// If not empty, applies a predefined set of access controls.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
PredefinedACL string
// If not empty, applies a predefined set of default object access controls.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
PredefinedDefaultObjectACL string
setLabels map[string]string
deleteLabels map[string]bool
}
@ -595,6 +647,32 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
if ua.Lifecycle != nil {
rb.Lifecycle = toRawLifecycle(*ua.Lifecycle)
}
if ua.Logging != nil {
if *ua.Logging == (BucketLogging{}) {
rb.NullFields = append(rb.NullFields, "Logging")
rb.Logging = nil
} else {
rb.Logging = ua.Logging.toRawBucketLogging()
}
}
if ua.Website != nil {
if *ua.Website == (BucketWebsite{}) {
rb.NullFields = append(rb.NullFields, "Website")
rb.Website = nil
} else {
rb.Website = ua.Website.toRawBucketWebsite()
}
}
if ua.PredefinedACL != "" {
// Clear ACL or the call will fail.
rb.Acl = nil
rb.ForceSendFields = append(rb.ForceSendFields, "Acl")
}
if ua.PredefinedDefaultObjectACL != "" {
// Clear ACLs or the call will fail.
rb.DefaultObjectAcl = nil
rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl")
}
if ua.setLabels != nil || ua.deleteLabels != nil {
rb.Labels = map[string]string{}
for k, v := range ua.setLabels {
@ -836,6 +914,46 @@ func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption {
return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName}
}
func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging {
if b == nil {
return nil
}
return &raw.BucketLogging{
LogBucket: b.LogBucket,
LogObjectPrefix: b.LogObjectPrefix,
}
}
func toBucketLogging(b *raw.BucketLogging) *BucketLogging {
if b == nil {
return nil
}
return &BucketLogging{
LogBucket: b.LogBucket,
LogObjectPrefix: b.LogObjectPrefix,
}
}
func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite {
if w == nil {
return nil
}
return &raw.BucketWebsite{
MainPageSuffix: w.MainPageSuffix,
NotFoundPage: w.NotFoundPage,
}
}
func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite {
if w == nil {
return nil
}
return &BucketWebsite{
MainPageSuffix: w.MainPageSuffix,
NotFoundPage: w.NotFoundPage,
}
}
// Objects returns an iterator over the objects in the bucket that match the Query q.
// If q is nil, no filtering is done.
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {

View File

@ -115,6 +115,9 @@ func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.Rewr
if c.DestinationKMSKeyName != "" {
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
@ -209,6 +212,9 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
if c.dst.userProject != "" {
call.UserProject(c.dst.userProject)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err
}

View File

@ -22,9 +22,11 @@ https://cloud.google.com/storage/docs.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
All of the methods of this package use exponential backoff to retry calls
that fail with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff.
All of the methods of this package use exponential backoff to retry calls that fail
with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues
indefinitely unless the controlling context is canceled or the client is closed. See
context.WithTimeout and context.WithCancel.
Creating a Client

View File

@ -24,6 +24,8 @@ func shouldRetry(err error) bool {
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}

View File

@ -34,6 +34,8 @@ func shouldRetry(err error) bool {
// Unfortunately the error type is unexported, so we resort to string
// matching.
return strings.Contains(e.Error(), "REFUSED_STREAM")
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}

View File

@ -467,6 +467,9 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
if o.userProject != "" {
call.UserProject(o.userProject)
}
if uattrs.PredefinedACL != "" {
call.PredefinedAcl(uattrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err
}
@ -501,6 +504,10 @@ type ObjectAttrsToUpdate struct {
CacheControl optional.String
Metadata map[string]string // set to map[string]string{} to delete
ACL []ACLRule
// If not empty, applies a predefined set of access controls. ACL must be nil.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/patch.
PredefinedACL string
}
// Delete deletes the single specified object.
@ -595,23 +602,8 @@ func parseKey(key []byte) (*rsa.PrivateKey, error) {
return parsed, nil
}
func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl {
var acl []*raw.ObjectAccessControl
if len(oldACL) > 0 {
acl = make([]*raw.ObjectAccessControl, len(oldACL))
for i, rule := range oldACL {
acl[i] = &raw.ObjectAccessControl{
Entity: string(rule.Entity),
Role: string(rule.Role),
}
}
}
return acl
}
// toRawObject copies the editable attributes from o to the raw library's Object type.
func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
acl := toRawObjectACL(o.ACL)
return &raw.Object{
Bucket: bucket,
Name: o.Name,
@ -621,7 +613,7 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
CacheControl: o.CacheControl,
ContentDisposition: o.ContentDisposition,
StorageClass: o.StorageClass,
Acl: acl,
Acl: toRawObjectACL(o.ACL),
Metadata: o.Metadata,
}
}
@ -649,6 +641,14 @@ type ObjectAttrs struct {
// ACL is the list of access control rules for the object.
ACL []ACLRule
// If not empty, applies a predefined set of access controls. It should be set
// only when writing, copying or composing an object. When copying or composing,
// it acts as the destinationPredefinedAcl parameter.
// PredefinedACL is always empty for ObjectAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/insert
// for valid values.
PredefinedACL string
// Owner is the owner of the object. This field is read-only.
//
// If non-zero, it is in the form of "user-<userId>".
@ -751,13 +751,6 @@ func newObject(o *raw.Object) *ObjectAttrs {
if o == nil {
return nil
}
acl := make([]ACLRule, len(o.Acl))
for i, rule := range o.Acl {
acl[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
owner := ""
if o.Owner != nil {
owner = o.Owner.Entity
@ -774,7 +767,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
ContentType: o.ContentType,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
ACL: acl,
ACL: toObjectACLRules(o.Acl),
Owner: owner,
ContentEncoding: o.ContentEncoding,
ContentDisposition: o.ContentDisposition,

File diff suppressed because one or more lines are too long

View File

@ -125,6 +125,9 @@ func (w *Writer) open() error {
if attrs.KMSKeyName != "" {
call.KmsKeyName(attrs.KMSKeyName)
}
if attrs.PredefinedACL != "" {
call.PredefinedAcl(attrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
w.mu.Lock()
w.err = err

View File

@ -18,4 +18,4 @@ package version
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// Number contains the semantic version of this SDK.
const Number = "v19.1.0"
const Number = "v19.1.1"

View File

@ -119,7 +119,10 @@ func (f *Future) Done(sender autorest.Sender) (bool, error) {
if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil {
return false, err
}
if err := f.pt.updateHeaders(); err != nil {
if err := f.pt.initPollingMethod(); err != nil {
return false, err
}
if err := f.pt.updatePollingMethod(); err != nil {
return false, err
}
return f.pt.hasTerminated(), f.pt.pollingError()
@ -264,7 +267,7 @@ type pollingTracker interface {
// these methods can differ per tracker
// checks the response headers and status code to determine the polling mechanism
updateHeaders() error
updatePollingMethod() error
// checks the response for tracker-specific error conditions
checkForErrors() error
@ -274,6 +277,10 @@ type pollingTracker interface {
// methods common to all trackers
// initializes a tracker's polling URL and method, called for each iteration.
// these values can be overridden by each polling tracker as required.
initPollingMethod() error
// initializes the tracker's internal state, call this when the tracker is created
initializeState() error
@ -348,6 +355,10 @@ func (pt *pollingTrackerBase) initializeState() error {
case http.StatusOK:
if ps := pt.getProvisioningState(); ps != nil {
pt.State = *ps
if pt.hasFailed() {
pt.updateErrorFromResponse()
return pt.pollingError()
}
} else {
pt.State = operationSucceeded
}
@ -364,8 +375,9 @@ func (pt *pollingTrackerBase) initializeState() error {
default:
pt.State = operationFailed
pt.updateErrorFromResponse()
return pt.pollingError()
}
return nil
return pt.initPollingMethod()
}
func (pt pollingTrackerBase) getProvisioningState() *string {
@ -416,12 +428,14 @@ func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error {
} else {
// check response body for error content
pt.updateErrorFromResponse()
err = pt.pollingError()
}
return err
}
// attempts to unmarshal a ServiceError type from the response body.
// if that fails then make a best attempt at creating something meaningful.
// NOTE: this assumes that the async operation has failed.
func (pt *pollingTrackerBase) updateErrorFromResponse() {
var err error
if pt.resp.ContentLength != 0 {
@ -431,8 +445,7 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
re := respErr{}
defer pt.resp.Body.Close()
var b []byte
b, err = ioutil.ReadAll(pt.resp.Body)
if err != nil {
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil {
goto Default
}
if err = json.Unmarshal(b, &re); err != nil {
@ -445,20 +458,29 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
goto Default
}
}
if re.ServiceError != nil {
// the unmarshaller will ensure re.ServiceError is non-nil
// even if there was no content unmarshalled so check the code.
if re.ServiceError.Code != "" {
pt.Err = re.ServiceError
return
}
}
Default:
se := &ServiceError{
Code: fmt.Sprintf("HTTP status code %v", pt.resp.StatusCode),
Message: pt.resp.Status,
Code: pt.pollingStatus(),
Message: "The async operation failed.",
}
if err != nil {
se.InnerError = make(map[string]interface{})
se.InnerError["unmarshalError"] = err.Error()
}
// stick the response body into the error object in hopes
// it contains something useful to help diagnose the failure.
if len(pt.rawBody) > 0 {
se.AdditionalInfo = []map[string]interface{}{
pt.rawBody,
}
}
pt.Err = se
}
@ -538,13 +560,33 @@ func (pt pollingTrackerBase) baseCheckForErrors() error {
return nil
}
// default initialization of polling URL/method. each verb tracker will update this as required.
func (pt *pollingTrackerBase) initPollingMethod() error {
if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil {
return err
} else if ao != "" {
pt.URI = ao
pt.Pm = PollingAsyncOperation
return nil
}
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
return err
} else if lh != "" {
pt.URI = lh
pt.Pm = PollingLocation
return nil
}
// it's ok if we didn't find a polling header, this will be handled elsewhere
return nil
}
// DELETE
type pollingTrackerDelete struct {
pollingTrackerBase
}
func (pt *pollingTrackerDelete) updateHeaders() error {
func (pt *pollingTrackerDelete) updatePollingMethod() error {
// for 201 the Location header is required
if pt.resp.StatusCode == http.StatusCreated {
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
@ -600,7 +642,7 @@ type pollingTrackerPatch struct {
pollingTrackerBase
}
func (pt *pollingTrackerPatch) updateHeaders() error {
func (pt *pollingTrackerPatch) updatePollingMethod() error {
// by default we can use the original URL for polling and final GET
if pt.URI == "" {
pt.URI = pt.resp.Request.URL.String()
@ -658,7 +700,7 @@ type pollingTrackerPost struct {
pollingTrackerBase
}
func (pt *pollingTrackerPost) updateHeaders() error {
func (pt *pollingTrackerPost) updatePollingMethod() error {
// 201 requires Location header
if pt.resp.StatusCode == http.StatusCreated {
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
@ -714,7 +756,7 @@ type pollingTrackerPut struct {
pollingTrackerBase
}
func (pt *pollingTrackerPut) updateHeaders() error {
func (pt *pollingTrackerPut) updatePollingMethod() error {
// by default we can use the original URL for polling and final GET
if pt.URI == "" {
pt.URI = pt.resp.Request.URL.String()
@ -808,7 +850,7 @@ func createPollingTracker(resp *http.Response) (pollingTracker, error) {
// this initializes the polling header values, we do this during creation in case the
// initial response send us invalid values; this way the API call will return a non-nil
// error (not doing this means the error shows up in Future.Done)
return pt, pt.updateHeaders()
return pt, pt.updatePollingMethod()
}
// gets the polling URL from the Azure-AsyncOperation header.

View File

@ -234,7 +234,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
}
delayed := DelayWithRetryAfter(resp, r.Context().Done())
if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) {
return nil, r.Context().Err()
return resp, r.Context().Err()
}
// don't count a 429 against the number of attempts
// so that we continue to retry until it succeeds

View File

@ -282,7 +282,7 @@ func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) {
}
fl.mu.Lock()
defer fl.mu.Unlock()
fmt.Fprintf(fl.logFile, b.String())
fmt.Fprint(fl.logFile, b.String())
fl.logFile.Sync()
}
@ -311,7 +311,7 @@ func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) {
}
fl.mu.Lock()
defer fl.mu.Unlock()
fmt.Fprintf(fl.logFile, b.String())
fmt.Fprint(fl.logFile, b.String())
fl.logFile.Sync()
}

View File

@ -20,7 +20,7 @@ import (
)
// Number contains the semantic version of this SDK.
const Number = "v10.15.0"
const Number = "v10.15.5"
var (
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",

View File

@ -1,137 +1,137 @@
package winio
import (
"bytes"
"encoding/binary"
"errors"
)
type fileFullEaInformation struct {
NextEntryOffset uint32
Flags uint8
NameLength uint8
ValueLength uint16
}
var (
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
errEaNameTooLarge = errors.New("extended attribute name too large")
errEaValueTooLarge = errors.New("extended attribute value too large")
)
// ExtendedAttribute represents a single Windows EA.
type ExtendedAttribute struct {
Name string
Value []byte
Flags uint8
}
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
var info fileFullEaInformation
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
return
}
nameOffset := fileFullEaInformationSize
nameLen := int(info.NameLength)
valueOffset := nameOffset + int(info.NameLength) + 1
valueLen := int(info.ValueLength)
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
return
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
ea.Value = b[valueOffset : valueOffset+valueLen]
ea.Flags = info.Flags
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
return
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
for len(b) != 0 {
ea, nb, err := parseEa(b)
if err != nil {
return nil, err
}
eas = append(eas, ea)
b = nb
}
return
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
if int(uint8(len(ea.Name))) != len(ea.Name) {
return errEaNameTooLarge
}
if int(uint16(len(ea.Value))) != len(ea.Value) {
return errEaValueTooLarge
}
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
withPadding := (entrySize + 3) &^ 3
nextOffset := uint32(0)
if !last {
nextOffset = withPadding
}
info := fileFullEaInformation{
NextEntryOffset: nextOffset,
Flags: ea.Flags,
NameLength: uint8(len(ea.Name)),
ValueLength: uint16(len(ea.Value)),
}
err := binary.Write(buf, binary.LittleEndian, &info)
if err != nil {
return err
}
_, err = buf.Write([]byte(ea.Name))
if err != nil {
return err
}
err = buf.WriteByte(0)
if err != nil {
return err
}
_, err = buf.Write(ea.Value)
if err != nil {
return err
}
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
if err != nil {
return err
}
return nil
}
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
// buffer for use with BackupWrite, ZwSetEaFile, etc.
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
var buf bytes.Buffer
for i := range eas {
last := false
if i == len(eas)-1 {
last = true
}
err := writeEa(&buf, &eas[i], last)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}
package winio
import (
"bytes"
"encoding/binary"
"errors"
)
type fileFullEaInformation struct {
NextEntryOffset uint32
Flags uint8
NameLength uint8
ValueLength uint16
}
var (
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
errEaNameTooLarge = errors.New("extended attribute name too large")
errEaValueTooLarge = errors.New("extended attribute value too large")
)
// ExtendedAttribute represents a single Windows EA.
type ExtendedAttribute struct {
Name string
Value []byte
Flags uint8
}
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
var info fileFullEaInformation
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
return
}
nameOffset := fileFullEaInformationSize
nameLen := int(info.NameLength)
valueOffset := nameOffset + int(info.NameLength) + 1
valueLen := int(info.ValueLength)
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
return
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
ea.Value = b[valueOffset : valueOffset+valueLen]
ea.Flags = info.Flags
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
return
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
for len(b) != 0 {
ea, nb, err := parseEa(b)
if err != nil {
return nil, err
}
eas = append(eas, ea)
b = nb
}
return
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
if int(uint8(len(ea.Name))) != len(ea.Name) {
return errEaNameTooLarge
}
if int(uint16(len(ea.Value))) != len(ea.Value) {
return errEaValueTooLarge
}
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
withPadding := (entrySize + 3) &^ 3
nextOffset := uint32(0)
if !last {
nextOffset = withPadding
}
info := fileFullEaInformation{
NextEntryOffset: nextOffset,
Flags: ea.Flags,
NameLength: uint8(len(ea.Name)),
ValueLength: uint16(len(ea.Value)),
}
err := binary.Write(buf, binary.LittleEndian, &info)
if err != nil {
return err
}
_, err = buf.Write([]byte(ea.Name))
if err != nil {
return err
}
err = buf.WriteByte(0)
if err != nil {
return err
}
_, err = buf.Write(ea.Value)
if err != nil {
return err
}
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
if err != nil {
return err
}
return nil
}
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
// buffer for use with BackupWrite, ZwSetEaFile, etc.
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
var buf bytes.Buffer
for i := range eas {
last := false
if i == len(eas)-1 {
last = true
}
err := writeEa(&buf, &eas[i], last)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}

View File

@ -16,6 +16,7 @@ import (
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
type atomicBool int32
@ -79,6 +80,7 @@ type win32File struct {
wg sync.WaitGroup
wgLock sync.RWMutex
closing atomicBool
socket bool
readDeadline deadlineHandler
writeDeadline deadlineHandler
}
@ -109,7 +111,13 @@ func makeWin32File(h syscall.Handle) (*win32File, error) {
}
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
return makeWin32File(h)
// If we return the result of makeWin32File directly, it can result in an
// interface-wrapped nil, rather than a nil interface value.
f, err := makeWin32File(h)
if err != nil {
return nil, err
}
return f, nil
}
// closeHandle closes the resources associated with a Win32 handle
@ -190,6 +198,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
if f.closing.isSet() {
err = ErrFileClosed
}
} else if err != nil && f.socket {
// err is from Win32. Query the overlapped structure to get the winsock error.
var bytes, flags uint32
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags)
}
case <-timeout:
cancelIoEx(f.handle, &c.o)
@ -265,6 +277,10 @@ func (f *win32File) Flush() error {
return syscall.FlushFileBuffers(f.handle)
}
func (f *win32File) Fd() uintptr {
return uintptr(f.handle)
}
func (d *deadlineHandler) set(deadline time.Time) error {
d.setLock.Lock()
defer d.setLock.Unlock()

View File

@ -20,7 +20,8 @@ const (
// FileBasicInfo contains file access time and file attributes information.
type FileBasicInfo struct {
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime
FileAttributes uintptr // includes padding
FileAttributes uint32
pad uint32 // padding
}
// GetFileBasicInfo retrieves times and attributes for a file.

9
vendor/github.com/Microsoft/go-winio/go.mod generated vendored Normal file
View File

@ -0,0 +1,9 @@
module github.com/Microsoft/go-winio
go 1.12
require (
github.com/pkg/errors v0.8.1
github.com/sirupsen/logrus v1.4.1
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b
)

16
vendor/github.com/Microsoft/go-winio/go.sum generated vendored Normal file
View File

@ -0,0 +1,16 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

305
vendor/github.com/Microsoft/go-winio/hvsock.go generated vendored Normal file
View File

@ -0,0 +1,305 @@
package winio
import (
"fmt"
"io"
"net"
"os"
"syscall"
"time"
"unsafe"
"github.com/Microsoft/go-winio/pkg/guid"
)
//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
const (
afHvSock = 34 // AF_HYPERV
socketError = ^uintptr(0)
)
// An HvsockAddr is an address for a AF_HYPERV socket.
type HvsockAddr struct {
VMID guid.GUID
ServiceID guid.GUID
}
type rawHvsockAddr struct {
Family uint16
_ uint16
VMID guid.GUID
ServiceID guid.GUID
}
// Network returns the address's network name, "hvsock".
func (addr *HvsockAddr) Network() string {
return "hvsock"
}
func (addr *HvsockAddr) String() string {
return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID)
}
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
func VsockServiceID(port uint32) guid.GUID {
g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3")
g.Data1 = port
return g
}
func (addr *HvsockAddr) raw() rawHvsockAddr {
return rawHvsockAddr{
Family: afHvSock,
VMID: addr.VMID,
ServiceID: addr.ServiceID,
}
}
func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) {
addr.VMID = raw.VMID
addr.ServiceID = raw.ServiceID
}
// HvsockListener is a socket listener for the AF_HYPERV address family.
type HvsockListener struct {
sock *win32File
addr HvsockAddr
}
// HvsockConn is a connected socket of the AF_HYPERV address family.
type HvsockConn struct {
sock *win32File
local, remote HvsockAddr
}
func newHvSocket() (*win32File, error) {
fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1)
if err != nil {
return nil, os.NewSyscallError("socket", err)
}
f, err := makeWin32File(fd)
if err != nil {
syscall.Close(fd)
return nil, err
}
f.socket = true
return f, nil
}
// ListenHvsock listens for connections on the specified hvsock address.
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) {
l := &HvsockListener{addr: *addr}
sock, err := newHvSocket()
if err != nil {
return nil, l.opErr("listen", err)
}
sa := addr.raw()
err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa)))
if err != nil {
return nil, l.opErr("listen", os.NewSyscallError("socket", err))
}
err = syscall.Listen(sock.handle, 16)
if err != nil {
return nil, l.opErr("listen", os.NewSyscallError("listen", err))
}
return &HvsockListener{sock: sock, addr: *addr}, nil
}
func (l *HvsockListener) opErr(op string, err error) error {
return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err}
}
// Addr returns the listener's network address.
func (l *HvsockListener) Addr() net.Addr {
return &l.addr
}
// Accept waits for the next connection and returns it.
func (l *HvsockListener) Accept() (_ net.Conn, err error) {
sock, err := newHvSocket()
if err != nil {
return nil, l.opErr("accept", err)
}
defer func() {
if sock != nil {
sock.Close()
}
}()
c, err := l.sock.prepareIo()
if err != nil {
return nil, l.opErr("accept", err)
}
defer l.sock.wg.Done()
// AcceptEx, per documentation, requires an extra 16 bytes per address.
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{}))
var addrbuf [addrlen * 2]byte
var bytes uint32
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o)
_, err = l.sock.asyncIo(c, nil, bytes, err)
if err != nil {
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
}
conn := &HvsockConn{
sock: sock,
}
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0])))
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen])))
sock = nil
return conn, nil
}
// Close closes the listener, causing any pending Accept calls to fail.
func (l *HvsockListener) Close() error {
return l.sock.Close()
}
/* Need to finish ConnectEx handling
func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) {
sock, err := newHvSocket()
if err != nil {
return nil, err
}
defer func() {
if sock != nil {
sock.Close()
}
}()
c, err := sock.prepareIo()
if err != nil {
return nil, err
}
defer sock.wg.Done()
var bytes uint32
err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o)
_, err = sock.asyncIo(ctx, c, nil, bytes, err)
if err != nil {
return nil, err
}
conn := &HvsockConn{
sock: sock,
remote: *addr,
}
sock = nil
return conn, nil
}
*/
func (conn *HvsockConn) opErr(op string, err error) error {
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err}
}
func (conn *HvsockConn) Read(b []byte) (int, error) {
c, err := conn.sock.prepareIo()
if err != nil {
return 0, conn.opErr("read", err)
}
defer conn.sock.wg.Done()
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
var flags, bytes uint32
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil)
n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err)
if err != nil {
if _, ok := err.(syscall.Errno); ok {
err = os.NewSyscallError("wsarecv", err)
}
return 0, conn.opErr("read", err)
} else if n == 0 {
err = io.EOF
}
return n, err
}
func (conn *HvsockConn) Write(b []byte) (int, error) {
t := 0
for len(b) != 0 {
n, err := conn.write(b)
if err != nil {
return t + n, err
}
t += n
b = b[n:]
}
return t, nil
}
func (conn *HvsockConn) write(b []byte) (int, error) {
c, err := conn.sock.prepareIo()
if err != nil {
return 0, conn.opErr("write", err)
}
defer conn.sock.wg.Done()
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))}
var bytes uint32
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil)
n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err)
if err != nil {
if _, ok := err.(syscall.Errno); ok {
err = os.NewSyscallError("wsasend", err)
}
return 0, conn.opErr("write", err)
}
return n, err
}
// Close closes the socket connection, failing any pending read or write calls.
func (conn *HvsockConn) Close() error {
return conn.sock.Close()
}
func (conn *HvsockConn) shutdown(how int) error {
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
if err != nil {
return os.NewSyscallError("shutdown", err)
}
return nil
}
// CloseRead shuts down the read end of the socket.
func (conn *HvsockConn) CloseRead() error {
err := conn.shutdown(syscall.SHUT_RD)
if err != nil {
return conn.opErr("close", err)
}
return nil
}
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
// no more data will be written.
func (conn *HvsockConn) CloseWrite() error {
err := conn.shutdown(syscall.SHUT_WR)
if err != nil {
return conn.opErr("close", err)
}
return nil
}
// LocalAddr returns the local address of the connection.
func (conn *HvsockConn) LocalAddr() net.Addr {
return &conn.local
}
// RemoteAddr returns the remote address of the connection.
func (conn *HvsockConn) RemoteAddr() net.Addr {
return &conn.remote
}
// SetDeadline implements the net.Conn SetDeadline method.
func (conn *HvsockConn) SetDeadline(t time.Time) error {
conn.SetReadDeadline(t)
conn.SetWriteDeadline(t)
return nil
}
// SetReadDeadline implements the net.Conn SetReadDeadline method.
func (conn *HvsockConn) SetReadDeadline(t time.Time) error {
return conn.sock.SetReadDeadline(t)
}
// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
func (conn *HvsockConn) SetWriteDeadline(t time.Time) error {
return conn.sock.SetWriteDeadline(t)
}

View File

@ -3,10 +3,13 @@
package winio
import (
"context"
"errors"
"fmt"
"io"
"net"
"os"
"runtime"
"syscall"
"time"
"unsafe"
@ -18,6 +21,48 @@ import (
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
type ioStatusBlock struct {
Status, Information uintptr
}
type objectAttributes struct {
Length uintptr
RootDirectory uintptr
ObjectName *unicodeString
Attributes uintptr
SecurityDescriptor *securityDescriptor
SecurityQoS uintptr
}
type unicodeString struct {
Length uint16
MaximumLength uint16
Buffer uintptr
}
type securityDescriptor struct {
Revision byte
Sbz1 byte
Control uint16
Owner uintptr
Group uintptr
Sacl uintptr
Dacl uintptr
}
type ntstatus int32
func (status ntstatus) Err() error {
if status >= 0 {
return nil
}
return rtlNtStatusToDosError(status)
}
const (
cERROR_PIPE_BUSY = syscall.Errno(231)
@ -25,21 +70,20 @@ const (
cERROR_PIPE_CONNECTED = syscall.Errno(535)
cERROR_SEM_TIMEOUT = syscall.Errno(121)
cPIPE_ACCESS_DUPLEX = 0x3
cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
cPIPE_REJECT_REMOTE_CLIENTS = 0x8
cPIPE_UNLIMITED_INSTANCES = 255
cNMPWAIT_USE_DEFAULT_WAIT = 0
cNMPWAIT_NOWAIT = 1
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
cPIPE_TYPE_MESSAGE = 4
cPIPE_READMODE_MESSAGE = 2
cFILE_OPEN = 1
cFILE_CREATE = 2
cFILE_PIPE_MESSAGE_TYPE = 1
cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2
cSE_DACL_PRESENT = 4
)
var (
@ -137,9 +181,30 @@ func (s pipeAddress) String() string {
return string(s)
}
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) {
for {
select {
case <-ctx.Done():
return syscall.Handle(0), ctx.Err()
default:
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err == nil {
return h, nil
}
if err != cERROR_PIPE_BUSY {
return h, &os.PathError{Err: err, Op: "open", Path: *path}
}
// Wait 10 msec and try again. This is a rather simplistic
// view, as we always try each 10 milliseconds.
time.Sleep(time.Millisecond * 10)
}
}
}
// DialPipe connects to a named pipe by path, timing out if the connection
// takes longer than the specified duration. If timeout is nil, then we use
// a default timeout of 5 seconds. (We do not use WaitNamedPipe.)
// a default timeout of 2 seconds. (We do not use WaitNamedPipe.)
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
var absTimeout time.Time
if timeout != nil {
@ -147,23 +212,22 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) {
} else {
absTimeout = time.Now().Add(time.Second * 2)
}
ctx, _ := context.WithDeadline(context.Background(), absTimeout)
conn, err := DialPipeContext(ctx, path)
if err == context.DeadlineExceeded {
return nil, ErrTimeout
}
return conn, err
}
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
// cancellation or timeout.
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
var err error
var h syscall.Handle
for {
h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err != cERROR_PIPE_BUSY {
break
}
if time.Now().After(absTimeout) {
return nil, ErrTimeout
}
// Wait 10 msec and try again. This is a rather simplistic
// view, as we always try each 10 milliseconds.
time.Sleep(time.Millisecond * 10)
}
h, err = tryDialPipe(ctx, &path)
if err != nil {
return nil, &os.PathError{Op: "open", Path: path, Err: err}
return nil, err
}
var flags uint32
@ -194,43 +258,87 @@ type acceptResponse struct {
}
type win32PipeListener struct {
firstHandle syscall.Handle
path string
securityDescriptor []byte
config PipeConfig
acceptCh chan (chan acceptResponse)
closeCh chan int
doneCh chan int
firstHandle syscall.Handle
path string
config PipeConfig
acceptCh chan (chan acceptResponse)
closeCh chan int
doneCh chan int
}
func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED
if first {
flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE
}
var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS
if c.MessageMode {
mode |= cPIPE_TYPE_MESSAGE
}
sa := &syscall.SecurityAttributes{}
sa.Length = uint32(unsafe.Sizeof(*sa))
if securityDescriptor != nil {
len := uint32(len(securityDescriptor))
sa.SecurityDescriptor = localAlloc(0, len)
defer localFree(sa.SecurityDescriptor)
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor)
}
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa)
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) {
path16, err := syscall.UTF16FromString(path)
if err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
var oa objectAttributes
oa.Length = unsafe.Sizeof(oa)
var ntPath unicodeString
if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
defer localFree(ntPath.Buffer)
oa.ObjectName = &ntPath
// The security descriptor is only needed for the first pipe.
if first {
if sd != nil {
len := uint32(len(sd))
sdb := localAlloc(0, len)
defer localFree(sdb)
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd)
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb))
} else {
// Construct the default named pipe security descriptor.
var dacl uintptr
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil {
return 0, fmt.Errorf("getting default named pipe ACL: %s", err)
}
defer localFree(dacl)
sdb := &securityDescriptor{
Revision: 1,
Control: cSE_DACL_PRESENT,
Dacl: dacl,
}
oa.SecurityDescriptor = sdb
}
}
typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS)
if c.MessageMode {
typ |= cFILE_PIPE_MESSAGE_TYPE
}
disposition := uint32(cFILE_OPEN)
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE)
if first {
disposition = cFILE_CREATE
// By not asking for read or write access, the named pipe file system
// will put this pipe into an initially disconnected state, blocking
// client connections until the next call with first == false.
access = syscall.SYNCHRONIZE
}
timeout := int64(-50 * 10000) // 50ms
var (
h syscall.Handle
iosb ioStatusBlock
)
err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err()
if err != nil {
return 0, &os.PathError{Op: "open", Path: path, Err: err}
}
runtime.KeepAlive(ntPath)
return h, nil
}
func (l *win32PipeListener) makeServerPipe() (*win32File, error) {
h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false)
h, err := makeServerPipeHandle(l.path, nil, &l.config, false)
if err != nil {
return nil, err
}
@ -341,32 +449,13 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) {
if err != nil {
return nil, err
}
// Create a client handle and connect it. This results in the pipe
// instance always existing, so that clients see ERROR_PIPE_BUSY
// rather than ERROR_FILE_NOT_FOUND. This ties the first instance
// up so that no other instances can be used. This would have been
// cleaner if the Win32 API matched CreateFile with ConnectNamedPipe
// instead of CreateNamedPipe. (Apparently created named pipes are
// considered to be in listening state regardless of whether any
// active calls to ConnectNamedPipe are outstanding.)
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0)
if err != nil {
syscall.Close(h)
return nil, err
}
// Close the client handle. The server side of the instance will
// still be busy, leading to ERROR_PIPE_BUSY instead of
// ERROR_NOT_FOUND, as long as we don't close the server handle,
// or disconnect the client with DisconnectNamedPipe.
syscall.Close(h2)
l := &win32PipeListener{
firstHandle: h,
path: path,
securityDescriptor: sd,
config: *c,
acceptCh: make(chan (chan acceptResponse)),
closeCh: make(chan int),
doneCh: make(chan int),
firstHandle: h,
path: path,
config: *c,
acceptCh: make(chan (chan acceptResponse)),
closeCh: make(chan int),
doneCh: make(chan int),
}
go l.listenerRoutine()
return l, nil

235
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go generated vendored Normal file
View File

@ -0,0 +1,235 @@
// Package guid provides a GUID type. The backing structure for a GUID is
// identical to that used by the golang.org/x/sys/windows GUID type.
// There are two main binary encodings used for a GUID, the big-endian encoding,
// and the Windows (mixed-endian) encoding. See here for details:
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
package guid
import (
"crypto/rand"
"crypto/sha1"
"encoding"
"encoding/binary"
"fmt"
"strconv"
"golang.org/x/sys/windows"
)
// Variant specifies which GUID variant (or "type") of the GUID. It determines
// how the entirety of the rest of the GUID is interpreted.
type Variant uint8
// The variants specified by RFC 4122.
const (
// VariantUnknown specifies a GUID variant which does not conform to one of
// the variant encodings specified in RFC 4122.
VariantUnknown Variant = iota
VariantNCS
VariantRFC4122
VariantMicrosoft
VariantFuture
)
// Version specifies how the bits in the GUID were generated. For instance, a
// version 4 GUID is randomly generated, and a version 5 is generated from the
// hash of an input string.
type Version uint8
var _ = (encoding.TextMarshaler)(GUID{})
var _ = (encoding.TextUnmarshaler)(&GUID{})
// GUID represents a GUID/UUID. It has the same structure as
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
// that type. It is defined as its own type so that stringification and
// marshaling can be supported. The representation matches that used by native
// Windows code.
type GUID windows.GUID
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
func NewV4() (GUID, error) {
var b [16]byte
if _, err := rand.Read(b[:]); err != nil {
return GUID{}, err
}
g := FromArray(b)
g.setVersion(4) // Version 4 means randomly generated.
g.setVariant(VariantRFC4122)
return g, nil
}
// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing)
// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name,
// and the sample code treats it as a series of bytes, so we do the same here.
//
// Some implementations, such as those found on Windows, treat the name as a
// big-endian UTF16 stream of bytes. If that is desired, the string can be
// encoded as such before being passed to this function.
func NewV5(namespace GUID, name []byte) (GUID, error) {
b := sha1.New()
namespaceBytes := namespace.ToArray()
b.Write(namespaceBytes[:])
b.Write(name)
a := [16]byte{}
copy(a[:], b.Sum(nil))
g := FromArray(a)
g.setVersion(5) // Version 5 means generated from a string.
g.setVariant(VariantRFC4122)
return g, nil
}
func fromArray(b [16]byte, order binary.ByteOrder) GUID {
var g GUID
g.Data1 = order.Uint32(b[0:4])
g.Data2 = order.Uint16(b[4:6])
g.Data3 = order.Uint16(b[6:8])
copy(g.Data4[:], b[8:16])
return g
}
func (g GUID) toArray(order binary.ByteOrder) [16]byte {
b := [16]byte{}
order.PutUint32(b[0:4], g.Data1)
order.PutUint16(b[4:6], g.Data2)
order.PutUint16(b[6:8], g.Data3)
copy(b[8:16], g.Data4[:])
return b
}
// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
func FromArray(b [16]byte) GUID {
return fromArray(b, binary.BigEndian)
}
// ToArray returns an array of 16 bytes representing the GUID in big-endian
// encoding.
func (g GUID) ToArray() [16]byte {
return g.toArray(binary.BigEndian)
}
// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
func FromWindowsArray(b [16]byte) GUID {
return fromArray(b, binary.LittleEndian)
}
// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
// encoding.
func (g GUID) ToWindowsArray() [16]byte {
return g.toArray(binary.LittleEndian)
}
func (g GUID) String() string {
return fmt.Sprintf(
"%08x-%04x-%04x-%04x-%012x",
g.Data1,
g.Data2,
g.Data3,
g.Data4[:2],
g.Data4[2:])
}
// FromString parses a string containing a GUID and returns the GUID. The only
// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
// format.
func FromString(s string) (GUID, error) {
if len(s) != 36 {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
var g GUID
data1, err := strconv.ParseUint(s[0:8], 16, 32)
if err != nil {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
g.Data1 = uint32(data1)
data2, err := strconv.ParseUint(s[9:13], 16, 16)
if err != nil {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
g.Data2 = uint16(data2)
data3, err := strconv.ParseUint(s[14:18], 16, 16)
if err != nil {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
g.Data3 = uint16(data3)
for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} {
v, err := strconv.ParseUint(s[x:x+2], 16, 8)
if err != nil {
return GUID{}, fmt.Errorf("invalid GUID %q", s)
}
g.Data4[i] = uint8(v)
}
return g, nil
}
func (g *GUID) setVariant(v Variant) {
d := g.Data4[0]
switch v {
case VariantNCS:
d = (d & 0x7f)
case VariantRFC4122:
d = (d & 0x3f) | 0x80
case VariantMicrosoft:
d = (d & 0x1f) | 0xc0
case VariantFuture:
d = (d & 0x0f) | 0xe0
case VariantUnknown:
fallthrough
default:
panic(fmt.Sprintf("invalid variant: %d", v))
}
g.Data4[0] = d
}
// Variant returns the GUID variant, as defined in RFC 4122.
func (g GUID) Variant() Variant {
b := g.Data4[0]
if b&0x80 == 0 {
return VariantNCS
} else if b&0xc0 == 0x80 {
return VariantRFC4122
} else if b&0xe0 == 0xc0 {
return VariantMicrosoft
} else if b&0xe0 == 0xe0 {
return VariantFuture
}
return VariantUnknown
}
func (g *GUID) setVersion(v Version) {
g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12)
}
// Version returns the GUID version, as defined in RFC 4122.
func (g GUID) Version() Version {
return Version((g.Data3 & 0xF000) >> 12)
}
// MarshalText returns the textual representation of the GUID.
func (g GUID) MarshalText() ([]byte, error) {
return []byte(g.String()), nil
}
// UnmarshalText takes the textual representation of a GUID, and unmarhals it
// into this GUID.
func (g *GUID) UnmarshalText(text []byte) error {
g2, err := FromString(string(text))
if err != nil {
return err
}
*g = g2
return nil
}

View File

@ -1,3 +1,3 @@
package winio
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go

View File

@ -1,4 +1,4 @@
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
// Code generated by 'go generate'; DO NOT EDIT.
package winio
@ -38,19 +38,25 @@ func errnoErr(e syscall.Errno) error {
var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll")
modntdll = windows.NewLazySystemDLL("ntdll.dll")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes")
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
procCreateFileW = modkernel32.NewProc("CreateFileW")
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile")
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U")
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl")
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW")
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW")
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
@ -69,6 +75,7 @@ var (
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW")
procBackupRead = modkernel32.NewProc("BackupRead")
procBackupWrite = modkernel32.NewProc("BackupWrite")
procbind = modws2_32.NewProc("bind")
)
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) {
@ -120,6 +127,24 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro
return
}
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
var _p0 uint32
if wait {
_p0 = 1
} else {
_p0 = 0
}
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0)
if r1 == 0 {
@ -176,27 +201,6 @@ func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityA
return
}
func waitNamedPipe(name string, timeout uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(name)
if err != nil {
return
}
return _waitNamedPipe(_p0, timeout)
}
func _waitNamedPipe(name *uint16, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
if r1 == 0 {
@ -227,6 +231,32 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) {
return
}
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) {
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
status = ntstatus(r0)
return
}
func rtlNtStatusToDosError(status ntstatus) (winerr error) {
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0)
if r0 != 0 {
winerr = syscall.Errno(r0)
}
return
}
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0)
status = ntstatus(r0)
return
}
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) {
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0)
status = ntstatus(r0)
return
}
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) {
var _p0 *uint16
_p0, err = syscall.UTF16PtrFromString(accountName)
@ -518,3 +548,15 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p
}
return
}
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) {
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socketError {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}

View File

@ -1,26 +0,0 @@
Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.

View File

@ -1,5 +0,0 @@
Gotty is a library written in Go that determines and reads termcap database
files to produce an interface for interacting with the capabilities of a
terminal.
See the godoc documentation or the source code for more information about
function usage.

View File

@ -1,3 +0,0 @@
gotty.go:// TODO add more concurrency to name lookup, look for more opportunities.
all:// TODO add more documentation, with function usage in a doc.go file.
all:// TODO add more testing/benchmarking with go test.

View File

@ -1,514 +0,0 @@
// Copyright 2012 Neal van Veen. All rights reserved.
// Usage of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package gotty
// Boolean capabilities
var BoolAttr = [...]string{
"auto_left_margin", "bw",
"auto_right_margin", "am",
"no_esc_ctlc", "xsb",
"ceol_standout_glitch", "xhp",
"eat_newline_glitch", "xenl",
"erase_overstrike", "eo",
"generic_type", "gn",
"hard_copy", "hc",
"has_meta_key", "km",
"has_status_line", "hs",
"insert_null_glitch", "in",
"memory_above", "da",
"memory_below", "db",
"move_insert_mode", "mir",
"move_standout_mode", "msgr",
"over_strike", "os",
"status_line_esc_ok", "eslok",
"dest_tabs_magic_smso", "xt",
"tilde_glitch", "hz",
"transparent_underline", "ul",
"xon_xoff", "nxon",
"needs_xon_xoff", "nxon",
"prtr_silent", "mc5i",
"hard_cursor", "chts",
"non_rev_rmcup", "nrrmc",
"no_pad_char", "npc",
"non_dest_scroll_region", "ndscr",
"can_change", "ccc",
"back_color_erase", "bce",
"hue_lightness_saturation", "hls",
"col_addr_glitch", "xhpa",
"cr_cancels_micro_mode", "crxm",
"has_print_wheel", "daisy",
"row_addr_glitch", "xvpa",
"semi_auto_right_margin", "sam",
"cpi_changes_res", "cpix",
"lpi_changes_res", "lpix",
"backspaces_with_bs", "",
"crt_no_scrolling", "",
"no_correctly_working_cr", "",
"gnu_has_meta_key", "",
"linefeed_is_newline", "",
"has_hardware_tabs", "",
"return_does_clr_eol", "",
}
// Numerical capabilities
var NumAttr = [...]string{
"columns", "cols",
"init_tabs", "it",
"lines", "lines",
"lines_of_memory", "lm",
"magic_cookie_glitch", "xmc",
"padding_baud_rate", "pb",
"virtual_terminal", "vt",
"width_status_line", "wsl",
"num_labels", "nlab",
"label_height", "lh",
"label_width", "lw",
"max_attributes", "ma",
"maximum_windows", "wnum",
"max_colors", "colors",
"max_pairs", "pairs",
"no_color_video", "ncv",
"buffer_capacity", "bufsz",
"dot_vert_spacing", "spinv",
"dot_horz_spacing", "spinh",
"max_micro_address", "maddr",
"max_micro_jump", "mjump",
"micro_col_size", "mcs",
"micro_line_size", "mls",
"number_of_pins", "npins",
"output_res_char", "orc",
"output_res_line", "orl",
"output_res_horz_inch", "orhi",
"output_res_vert_inch", "orvi",
"print_rate", "cps",
"wide_char_size", "widcs",
"buttons", "btns",
"bit_image_entwining", "bitwin",
"bit_image_type", "bitype",
"magic_cookie_glitch_ul", "",
"carriage_return_delay", "",
"new_line_delay", "",
"backspace_delay", "",
"horizontal_tab_delay", "",
"number_of_function_keys", "",
}
// String capabilities
var StrAttr = [...]string{
"back_tab", "cbt",
"bell", "bel",
"carriage_return", "cr",
"change_scroll_region", "csr",
"clear_all_tabs", "tbc",
"clear_screen", "clear",
"clr_eol", "el",
"clr_eos", "ed",
"column_address", "hpa",
"command_character", "cmdch",
"cursor_address", "cup",
"cursor_down", "cud1",
"cursor_home", "home",
"cursor_invisible", "civis",
"cursor_left", "cub1",
"cursor_mem_address", "mrcup",
"cursor_normal", "cnorm",
"cursor_right", "cuf1",
"cursor_to_ll", "ll",
"cursor_up", "cuu1",
"cursor_visible", "cvvis",
"delete_character", "dch1",
"delete_line", "dl1",
"dis_status_line", "dsl",
"down_half_line", "hd",
"enter_alt_charset_mode", "smacs",
"enter_blink_mode", "blink",
"enter_bold_mode", "bold",
"enter_ca_mode", "smcup",
"enter_delete_mode", "smdc",
"enter_dim_mode", "dim",
"enter_insert_mode", "smir",
"enter_secure_mode", "invis",
"enter_protected_mode", "prot",
"enter_reverse_mode", "rev",
"enter_standout_mode", "smso",
"enter_underline_mode", "smul",
"erase_chars", "ech",
"exit_alt_charset_mode", "rmacs",
"exit_attribute_mode", "sgr0",
"exit_ca_mode", "rmcup",
"exit_delete_mode", "rmdc",
"exit_insert_mode", "rmir",
"exit_standout_mode", "rmso",
"exit_underline_mode", "rmul",
"flash_screen", "flash",
"form_feed", "ff",
"from_status_line", "fsl",
"init_1string", "is1",
"init_2string", "is2",
"init_3string", "is3",
"init_file", "if",
"insert_character", "ich1",
"insert_line", "il1",
"insert_padding", "ip",
"key_backspace", "kbs",
"key_catab", "ktbc",
"key_clear", "kclr",
"key_ctab", "kctab",
"key_dc", "kdch1",
"key_dl", "kdl1",
"key_down", "kcud1",
"key_eic", "krmir",
"key_eol", "kel",
"key_eos", "ked",
"key_f0", "kf0",
"key_f1", "kf1",
"key_f10", "kf10",
"key_f2", "kf2",
"key_f3", "kf3",
"key_f4", "kf4",
"key_f5", "kf5",
"key_f6", "kf6",
"key_f7", "kf7",
"key_f8", "kf8",
"key_f9", "kf9",
"key_home", "khome",
"key_ic", "kich1",
"key_il", "kil1",
"key_left", "kcub1",
"key_ll", "kll",
"key_npage", "knp",
"key_ppage", "kpp",
"key_right", "kcuf1",
"key_sf", "kind",
"key_sr", "kri",
"key_stab", "khts",
"key_up", "kcuu1",
"keypad_local", "rmkx",
"keypad_xmit", "smkx",
"lab_f0", "lf0",
"lab_f1", "lf1",
"lab_f10", "lf10",
"lab_f2", "lf2",
"lab_f3", "lf3",
"lab_f4", "lf4",
"lab_f5", "lf5",
"lab_f6", "lf6",
"lab_f7", "lf7",
"lab_f8", "lf8",
"lab_f9", "lf9",
"meta_off", "rmm",
"meta_on", "smm",
"newline", "_glitch",
"pad_char", "npc",
"parm_dch", "dch",
"parm_delete_line", "dl",
"parm_down_cursor", "cud",
"parm_ich", "ich",
"parm_index", "indn",
"parm_insert_line", "il",
"parm_left_cursor", "cub",
"parm_right_cursor", "cuf",
"parm_rindex", "rin",
"parm_up_cursor", "cuu",
"pkey_key", "pfkey",
"pkey_local", "pfloc",
"pkey_xmit", "pfx",
"print_screen", "mc0",
"prtr_off", "mc4",
"prtr_on", "mc5",
"repeat_char", "rep",
"reset_1string", "rs1",
"reset_2string", "rs2",
"reset_3string", "rs3",
"reset_file", "rf",
"restore_cursor", "rc",
"row_address", "mvpa",
"save_cursor", "row_address",
"scroll_forward", "ind",
"scroll_reverse", "ri",
"set_attributes", "sgr",
"set_tab", "hts",
"set_window", "wind",
"tab", "s_magic_smso",
"to_status_line", "tsl",
"underline_char", "uc",
"up_half_line", "hu",
"init_prog", "iprog",
"key_a1", "ka1",
"key_a3", "ka3",
"key_b2", "kb2",
"key_c1", "kc1",
"key_c3", "kc3",
"prtr_non", "mc5p",
"char_padding", "rmp",
"acs_chars", "acsc",
"plab_norm", "pln",
"key_btab", "kcbt",
"enter_xon_mode", "smxon",
"exit_xon_mode", "rmxon",
"enter_am_mode", "smam",
"exit_am_mode", "rmam",
"xon_character", "xonc",
"xoff_character", "xoffc",
"ena_acs", "enacs",
"label_on", "smln",
"label_off", "rmln",
"key_beg", "kbeg",
"key_cancel", "kcan",
"key_close", "kclo",
"key_command", "kcmd",
"key_copy", "kcpy",
"key_create", "kcrt",
"key_end", "kend",
"key_enter", "kent",
"key_exit", "kext",
"key_find", "kfnd",
"key_help", "khlp",
"key_mark", "kmrk",
"key_message", "kmsg",
"key_move", "kmov",
"key_next", "knxt",
"key_open", "kopn",
"key_options", "kopt",
"key_previous", "kprv",
"key_print", "kprt",
"key_redo", "krdo",
"key_reference", "kref",
"key_refresh", "krfr",
"key_replace", "krpl",
"key_restart", "krst",
"key_resume", "kres",
"key_save", "ksav",
"key_suspend", "kspd",
"key_undo", "kund",
"key_sbeg", "kBEG",
"key_scancel", "kCAN",
"key_scommand", "kCMD",
"key_scopy", "kCPY",
"key_screate", "kCRT",
"key_sdc", "kDC",
"key_sdl", "kDL",
"key_select", "kslt",
"key_send", "kEND",
"key_seol", "kEOL",
"key_sexit", "kEXT",
"key_sfind", "kFND",
"key_shelp", "kHLP",
"key_shome", "kHOM",
"key_sic", "kIC",
"key_sleft", "kLFT",
"key_smessage", "kMSG",
"key_smove", "kMOV",
"key_snext", "kNXT",
"key_soptions", "kOPT",
"key_sprevious", "kPRV",
"key_sprint", "kPRT",
"key_sredo", "kRDO",
"key_sreplace", "kRPL",
"key_sright", "kRIT",
"key_srsume", "kRES",
"key_ssave", "kSAV",
"key_ssuspend", "kSPD",
"key_sundo", "kUND",
"req_for_input", "rfi",
"key_f11", "kf11",
"key_f12", "kf12",
"key_f13", "kf13",
"key_f14", "kf14",
"key_f15", "kf15",
"key_f16", "kf16",
"key_f17", "kf17",
"key_f18", "kf18",
"key_f19", "kf19",
"key_f20", "kf20",
"key_f21", "kf21",
"key_f22", "kf22",
"key_f23", "kf23",
"key_f24", "kf24",
"key_f25", "kf25",
"key_f26", "kf26",
"key_f27", "kf27",
"key_f28", "kf28",
"key_f29", "kf29",
"key_f30", "kf30",
"key_f31", "kf31",
"key_f32", "kf32",
"key_f33", "kf33",
"key_f34", "kf34",
"key_f35", "kf35",
"key_f36", "kf36",
"key_f37", "kf37",
"key_f38", "kf38",
"key_f39", "kf39",
"key_f40", "kf40",
"key_f41", "kf41",
"key_f42", "kf42",
"key_f43", "kf43",
"key_f44", "kf44",
"key_f45", "kf45",
"key_f46", "kf46",
"key_f47", "kf47",
"key_f48", "kf48",
"key_f49", "kf49",
"key_f50", "kf50",
"key_f51", "kf51",
"key_f52", "kf52",
"key_f53", "kf53",
"key_f54", "kf54",
"key_f55", "kf55",
"key_f56", "kf56",
"key_f57", "kf57",
"key_f58", "kf58",
"key_f59", "kf59",
"key_f60", "kf60",
"key_f61", "kf61",
"key_f62", "kf62",
"key_f63", "kf63",
"clr_bol", "el1",
"clear_margins", "mgc",
"set_left_margin", "smgl",
"set_right_margin", "smgr",
"label_format", "fln",
"set_clock", "sclk",
"display_clock", "dclk",
"remove_clock", "rmclk",
"create_window", "cwin",
"goto_window", "wingo",
"hangup", "hup",
"dial_phone", "dial",
"quick_dial", "qdial",
"tone", "tone",
"pulse", "pulse",
"flash_hook", "hook",
"fixed_pause", "pause",
"wait_tone", "wait",
"user0", "u0",
"user1", "u1",
"user2", "u2",
"user3", "u3",
"user4", "u4",
"user5", "u5",
"user6", "u6",
"user7", "u7",
"user8", "u8",
"user9", "u9",
"orig_pair", "op",
"orig_colors", "oc",
"initialize_color", "initc",
"initialize_pair", "initp",
"set_color_pair", "scp",
"set_foreground", "setf",
"set_background", "setb",
"change_char_pitch", "cpi",
"change_line_pitch", "lpi",
"change_res_horz", "chr",
"change_res_vert", "cvr",
"define_char", "defc",
"enter_doublewide_mode", "swidm",
"enter_draft_quality", "sdrfq",
"enter_italics_mode", "sitm",
"enter_leftward_mode", "slm",
"enter_micro_mode", "smicm",
"enter_near_letter_quality", "snlq",
"enter_normal_quality", "snrmq",
"enter_shadow_mode", "sshm",
"enter_subscript_mode", "ssubm",
"enter_superscript_mode", "ssupm",
"enter_upward_mode", "sum",
"exit_doublewide_mode", "rwidm",
"exit_italics_mode", "ritm",
"exit_leftward_mode", "rlm",
"exit_micro_mode", "rmicm",
"exit_shadow_mode", "rshm",
"exit_subscript_mode", "rsubm",
"exit_superscript_mode", "rsupm",
"exit_upward_mode", "rum",
"micro_column_address", "mhpa",
"micro_down", "mcud1",
"micro_left", "mcub1",
"micro_right", "mcuf1",
"micro_row_address", "mvpa",
"micro_up", "mcuu1",
"order_of_pins", "porder",
"parm_down_micro", "mcud",
"parm_left_micro", "mcub",
"parm_right_micro", "mcuf",
"parm_up_micro", "mcuu",
"select_char_set", "scs",
"set_bottom_margin", "smgb",
"set_bottom_margin_parm", "smgbp",
"set_left_margin_parm", "smglp",
"set_right_margin_parm", "smgrp",
"set_top_margin", "smgt",
"set_top_margin_parm", "smgtp",
"start_bit_image", "sbim",
"start_char_set_def", "scsd",
"stop_bit_image", "rbim",
"stop_char_set_def", "rcsd",
"subscript_characters", "subcs",
"superscript_characters", "supcs",
"these_cause_cr", "docr",
"zero_motion", "zerom",
"char_set_names", "csnm",
"key_mouse", "kmous",
"mouse_info", "minfo",
"req_mouse_pos", "reqmp",
"get_mouse", "getm",
"set_a_foreground", "setaf",
"set_a_background", "setab",
"pkey_plab", "pfxl",
"device_type", "devt",
"code_set_init", "csin",
"set0_des_seq", "s0ds",
"set1_des_seq", "s1ds",
"set2_des_seq", "s2ds",
"set3_des_seq", "s3ds",
"set_lr_margin", "smglr",
"set_tb_margin", "smgtb",
"bit_image_repeat", "birep",
"bit_image_newline", "binel",
"bit_image_carriage_return", "bicr",
"color_names", "colornm",
"define_bit_image_region", "defbi",
"end_bit_image_region", "endbi",
"set_color_band", "setcolor",
"set_page_length", "slines",
"display_pc_char", "dispc",
"enter_pc_charset_mode", "smpch",
"exit_pc_charset_mode", "rmpch",
"enter_scancode_mode", "smsc",
"exit_scancode_mode", "rmsc",
"pc_term_options", "pctrm",
"scancode_escape", "scesc",
"alt_scancode_esc", "scesa",
"enter_horizontal_hl_mode", "ehhlm",
"enter_left_hl_mode", "elhlm",
"enter_low_hl_mode", "elohlm",
"enter_right_hl_mode", "erhlm",
"enter_top_hl_mode", "ethlm",
"enter_vertical_hl_mode", "evhlm",
"set_a_attributes", "sgr1",
"set_pglen_inch", "slength",
"termcap_init2", "",
"termcap_reset", "",
"linefeed_if_not_lf", "",
"backspace_if_not_bs", "",
"other_non_function_keys", "",
"arrow_key_map", "",
"acs_ulcorner", "",
"acs_llcorner", "",
"acs_urcorner", "",
"acs_lrcorner", "",
"acs_ltee", "",
"acs_rtee", "",
"acs_btee", "",
"acs_ttee", "",
"acs_hline", "",
"acs_vline", "",
"acs_plus", "",
"memory_lock", "",
"memory_unlock", "",
"box_chars_1", "",
}

View File

@ -1,238 +0,0 @@
// Copyright 2012 Neal van Veen. All rights reserved.
// Usage of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Gotty is a Go-package for reading and parsing the terminfo database
package gotty
// TODO add more concurrency to name lookup, look for more opportunities.
import (
"encoding/binary"
"errors"
"fmt"
"os"
"reflect"
"strings"
"sync"
)
// Open a terminfo file by the name given and construct a TermInfo object.
// If something went wrong reading the terminfo database file, an error is
// returned.
func OpenTermInfo(termName string) (*TermInfo, error) {
var term *TermInfo
var err error
// Find the environment variables
termloc := os.Getenv("TERMINFO")
if len(termloc) == 0 {
// Search like ncurses
locations := []string{os.Getenv("HOME") + "/.terminfo/", "/etc/terminfo/",
"/lib/terminfo/", "/usr/share/terminfo/"}
var path string
for _, str := range locations {
// Construct path
path = str + string(termName[0]) + "/" + termName
// Check if path can be opened
file, _ := os.Open(path)
if file != nil {
// Path can open, fall out and use current path
file.Close()
break
}
}
if len(path) > 0 {
term, err = readTermInfo(path)
} else {
err = errors.New(fmt.Sprintf("No terminfo file(-location) found"))
}
}
return term, err
}
// Open a terminfo file from the environment variable containing the current
// terminal name and construct a TermInfo object. If something went wrong
// reading the terminfo database file, an error is returned.
func OpenTermInfoEnv() (*TermInfo, error) {
termenv := os.Getenv("TERM")
return OpenTermInfo(termenv)
}
// Return an attribute by the name attr provided. If none can be found,
// an error is returned.
func (term *TermInfo) GetAttribute(attr string) (stacker, error) {
// Channel to store the main value in.
var value stacker
// Add a blocking WaitGroup
var block sync.WaitGroup
// Keep track of variable being written.
written := false
// Function to put into goroutine.
f := func(ats interface{}) {
var ok bool
var v stacker
// Switch on type of map to use and assign value to it.
switch reflect.TypeOf(ats).Elem().Kind() {
case reflect.Bool:
v, ok = ats.(map[string]bool)[attr]
case reflect.Int16:
v, ok = ats.(map[string]int16)[attr]
case reflect.String:
v, ok = ats.(map[string]string)[attr]
}
// If ok, a value is found, so we can write.
if ok {
value = v
written = true
}
// Goroutine is done
block.Done()
}
block.Add(3)
// Go for all 3 attribute lists.
go f(term.boolAttributes)
go f(term.numAttributes)
go f(term.strAttributes)
// Wait until every goroutine is done.
block.Wait()
// If a value has been written, return it.
if written {
return value, nil
}
// Otherwise, error.
return nil, fmt.Errorf("Erorr finding attribute")
}
// Return an attribute by the name attr provided. If none can be found,
// an error is returned. A name is first converted to its termcap value.
func (term *TermInfo) GetAttributeName(name string) (stacker, error) {
tc := GetTermcapName(name)
return term.GetAttribute(tc)
}
// A utility function that finds and returns the termcap equivalent of a
// variable name.
func GetTermcapName(name string) string {
// Termcap name
var tc string
// Blocking group
var wait sync.WaitGroup
// Function to put into a goroutine
f := func(attrs []string) {
// Find the string corresponding to the name
for i, s := range attrs {
if s == name {
tc = attrs[i+1]
}
}
// Goroutine is finished
wait.Done()
}
wait.Add(3)
// Go for all 3 attribute lists
go f(BoolAttr[:])
go f(NumAttr[:])
go f(StrAttr[:])
// Wait until every goroutine is done
wait.Wait()
// Return the termcap name
return tc
}
// This function takes a path to a terminfo file and reads it in binary
// form to construct the actual TermInfo file.
func readTermInfo(path string) (*TermInfo, error) {
// Open the terminfo file
file, err := os.Open(path)
defer file.Close()
if err != nil {
return nil, err
}
// magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize
// Header is composed of the magic 0432 octal number, size of the name
// section, size of the boolean section, the amount of number values,
// the number of offsets of strings, and the size of the string section.
var header [6]int16
// Byte array is used to read in byte values
var byteArray []byte
// Short array is used to read in short values
var shArray []int16
// TermInfo object to store values
var term TermInfo
// Read in the header
err = binary.Read(file, binary.LittleEndian, &header)
if err != nil {
return nil, err
}
// If magic number isn't there or isn't correct, we have the wrong filetype
if header[0] != 0432 {
return nil, errors.New(fmt.Sprintf("Wrong filetype"))
}
// Read in the names
byteArray = make([]byte, header[1])
err = binary.Read(file, binary.LittleEndian, &byteArray)
if err != nil {
return nil, err
}
term.Names = strings.Split(string(byteArray), "|")
// Read in the booleans
byteArray = make([]byte, header[2])
err = binary.Read(file, binary.LittleEndian, &byteArray)
if err != nil {
return nil, err
}
term.boolAttributes = make(map[string]bool)
for i, b := range byteArray {
if b == 1 {
term.boolAttributes[BoolAttr[i*2+1]] = true
}
}
// If the number of bytes read is not even, a byte for alignment is added
if len(byteArray)%2 != 0 {
err = binary.Read(file, binary.LittleEndian, make([]byte, 1))
if err != nil {
return nil, err
}
}
// Read in shorts
shArray = make([]int16, header[3])
err = binary.Read(file, binary.LittleEndian, &shArray)
if err != nil {
return nil, err
}
term.numAttributes = make(map[string]int16)
for i, n := range shArray {
if n != 0377 && n > -1 {
term.numAttributes[NumAttr[i*2+1]] = n
}
}
// Read the offsets into the short array
shArray = make([]int16, header[4])
err = binary.Read(file, binary.LittleEndian, &shArray)
if err != nil {
return nil, err
}
// Read the actual strings in the byte array
byteArray = make([]byte, header[5])
err = binary.Read(file, binary.LittleEndian, &byteArray)
if err != nil {
return nil, err
}
term.strAttributes = make(map[string]string)
// We get an offset, and then iterate until the string is null-terminated
for i, offset := range shArray {
if offset > -1 {
r := offset
for ; byteArray[r] != 0; r++ {
}
term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r])
}
}
return &term, nil
}

View File

@ -1,362 +0,0 @@
// Copyright 2012 Neal van Veen. All rights reserved.
// Usage of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package gotty
import (
"bytes"
"errors"
"fmt"
"regexp"
"strconv"
"strings"
)
var exp = [...]string{
"%%",
"%c",
"%s",
"%p(\\d)",
"%P([A-z])",
"%g([A-z])",
"%'(.)'",
"%{([0-9]+)}",
"%l",
"%\\+|%-|%\\*|%/|%m",
"%&|%\\||%\\^",
"%=|%>|%<",
"%A|%O",
"%!|%~",
"%i",
"%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]",
"%\\?(.*?);",
}
var regex *regexp.Regexp
var staticVar map[byte]stacker
// Parses the attribute that is received with name attr and parameters params.
func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) {
// Get the attribute name first.
iface, err := term.GetAttribute(attr)
str, ok := iface.(string)
if err != nil {
return "", err
}
if !ok {
return str, errors.New("Only string capabilities can be parsed.")
}
// Construct the hidden parser struct so we can use a recursive stack based
// parser.
ps := &parser{}
// Dynamic variables only exist in this context.
ps.dynamicVar = make(map[byte]stacker, 26)
ps.parameters = make([]stacker, len(params))
// Convert the parameters to insert them into the parser struct.
for i, x := range params {
ps.parameters[i] = x
}
// Recursively walk and return.
result, err := ps.walk(str)
return result, err
}
// Parses the attribute that is received with name attr and parameters params.
// Only works on full name of a capability that is given, which it uses to
// search for the termcap name.
func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) {
tc := GetTermcapName(attr)
return term.Parse(tc, params)
}
// Identify each token in a stack based manner and do the actual parsing.
func (ps *parser) walk(attr string) (string, error) {
// We use a buffer to get the modified string.
var buf bytes.Buffer
// Next, find and identify all tokens by their indices and strings.
tokens := regex.FindAllStringSubmatch(attr, -1)
if len(tokens) == 0 {
return attr, nil
}
indices := regex.FindAllStringIndex(attr, -1)
q := 0 // q counts the matches of one token
// Iterate through the string per character.
for i := 0; i < len(attr); i++ {
// If the current position is an identified token, execute the following
// steps.
if q < len(indices) && i >= indices[q][0] && i < indices[q][1] {
// Switch on token.
switch {
case tokens[q][0][:2] == "%%":
// Literal percentage character.
buf.WriteByte('%')
case tokens[q][0][:2] == "%c":
// Pop a character.
c, err := ps.st.pop()
if err != nil {
return buf.String(), err
}
buf.WriteByte(c.(byte))
case tokens[q][0][:2] == "%s":
// Pop a string.
str, err := ps.st.pop()
if err != nil {
return buf.String(), err
}
if _, ok := str.(string); !ok {
return buf.String(), errors.New("Stack head is not a string")
}
buf.WriteString(str.(string))
case tokens[q][0][:2] == "%p":
// Push a parameter on the stack.
index, err := strconv.ParseInt(tokens[q][1], 10, 8)
index--
if err != nil {
return buf.String(), err
}
if int(index) >= len(ps.parameters) {
return buf.String(), errors.New("Parameters index out of bound")
}
ps.st.push(ps.parameters[index])
case tokens[q][0][:2] == "%P":
// Pop a variable from the stack as a dynamic or static variable.
val, err := ps.st.pop()
if err != nil {
return buf.String(), err
}
index := tokens[q][2]
if len(index) > 1 {
errorStr := fmt.Sprintf("%s is not a valid dynamic variables index",
index)
return buf.String(), errors.New(errorStr)
}
// Specify either dynamic or static.
if index[0] >= 'a' && index[0] <= 'z' {
ps.dynamicVar[index[0]] = val
} else if index[0] >= 'A' && index[0] <= 'Z' {
staticVar[index[0]] = val
}
case tokens[q][0][:2] == "%g":
// Push a variable from the stack as a dynamic or static variable.
index := tokens[q][3]
if len(index) > 1 {
errorStr := fmt.Sprintf("%s is not a valid static variables index",
index)
return buf.String(), errors.New(errorStr)
}
var val stacker
if index[0] >= 'a' && index[0] <= 'z' {
val = ps.dynamicVar[index[0]]
} else if index[0] >= 'A' && index[0] <= 'Z' {
val = staticVar[index[0]]
}
ps.st.push(val)
case tokens[q][0][:2] == "%'":
// Push a character constant.
con := tokens[q][4]
if len(con) > 1 {
errorStr := fmt.Sprintf("%s is not a valid character constant", con)
return buf.String(), errors.New(errorStr)
}
ps.st.push(con[0])
case tokens[q][0][:2] == "%{":
// Push an integer constant.
con, err := strconv.ParseInt(tokens[q][5], 10, 32)
if err != nil {
return buf.String(), err
}
ps.st.push(con)
case tokens[q][0][:2] == "%l":
// Push the length of the string that is popped from the stack.
popStr, err := ps.st.pop()
if err != nil {
return buf.String(), err
}
if _, ok := popStr.(string); !ok {
errStr := fmt.Sprintf("Stack head is not a string")
return buf.String(), errors.New(errStr)
}
ps.st.push(len(popStr.(string)))
case tokens[q][0][:2] == "%?":
// If-then-else construct. First, the whole string is identified and
// then inside this substring, we can specify which parts to switch on.
ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);")
ifTokens := ifReg.FindStringSubmatch(tokens[q][0])
var (
ifStr string
err error
)
// Parse the if-part to determine if-else.
if len(ifTokens[1]) > 0 {
ifStr, err = ps.walk(ifTokens[1])
} else { // else
ifStr, err = ps.walk(ifTokens[4])
}
// Return any errors
if err != nil {
return buf.String(), err
} else if len(ifStr) > 0 {
// Self-defined limitation, not sure if this is correct, but didn't
// seem like it.
return buf.String(), errors.New("If-clause cannot print statements")
}
var thenStr string
// Pop the first value that is set by parsing the if-clause.
choose, err := ps.st.pop()
if err != nil {
return buf.String(), err
}
// Switch to if or else.
if choose.(int) == 0 && len(ifTokens[1]) > 0 {
thenStr, err = ps.walk(ifTokens[3])
} else if choose.(int) != 0 {
if len(ifTokens[1]) > 0 {
thenStr, err = ps.walk(ifTokens[2])
} else {
thenStr, err = ps.walk(ifTokens[5])
}
}
if err != nil {
return buf.String(), err
}
buf.WriteString(thenStr)
case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing
fallthrough
case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits.
fallthrough
case tokens[q][0][len(tokens[q][0])-1] == 'x':
fallthrough
case tokens[q][0][len(tokens[q][0])-1] == 'X':
fallthrough
case tokens[q][0][len(tokens[q][0])-1] == 's':
token := tokens[q][0]
// Remove the : that comes before a flag.
if token[1] == ':' {
token = token[:1] + token[2:]
}
digit, err := ps.st.pop()
if err != nil {
return buf.String(), err
}
// The rest is determined like the normal formatted prints.
digitStr := fmt.Sprintf(token, digit.(int))
buf.WriteString(digitStr)
case tokens[q][0][:2] == "%i":
// Increment the parameters by one.
if len(ps.parameters) < 2 {
return buf.String(), errors.New("Not enough parameters to increment.")
}
val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int)
val1++
val2++
ps.parameters[0], ps.parameters[1] = val1, val2
default:
// The rest of the tokens is a special case, where two values are
// popped and then operated on by the token that comes after them.
op1, err := ps.st.pop()
if err != nil {
return buf.String(), err
}
op2, err := ps.st.pop()
if err != nil {
return buf.String(), err
}
var result stacker
switch tokens[q][0][:2] {
case "%+":
// Addition
result = op2.(int) + op1.(int)
case "%-":
// Subtraction
result = op2.(int) - op1.(int)
case "%*":
// Multiplication
result = op2.(int) * op1.(int)
case "%/":
// Division
result = op2.(int) / op1.(int)
case "%m":
// Modulo
result = op2.(int) % op1.(int)
case "%&":
// Bitwise AND
result = op2.(int) & op1.(int)
case "%|":
// Bitwise OR
result = op2.(int) | op1.(int)
case "%^":
// Bitwise XOR
result = op2.(int) ^ op1.(int)
case "%=":
// Equals
result = op2 == op1
case "%>":
// Greater-than
result = op2.(int) > op1.(int)
case "%<":
// Lesser-than
result = op2.(int) < op1.(int)
case "%A":
// Logical AND
result = op2.(bool) && op1.(bool)
case "%O":
// Logical OR
result = op2.(bool) || op1.(bool)
case "%!":
// Logical complement
result = !op1.(bool)
case "%~":
// Bitwise complement
result = ^(op1.(int))
}
ps.st.push(result)
}
i = indices[q][1] - 1
q++
} else {
// We are not "inside" a token, so just skip until the end or the next
// token, and add all characters to the buffer.
j := i
if q != len(indices) {
for !(j >= indices[q][0] && j < indices[q][1]) {
j++
}
} else {
j = len(attr)
}
buf.WriteString(string(attr[i:j]))
i = j
}
}
// Return the buffer as a string.
return buf.String(), nil
}
// Push a stacker-value onto the stack.
func (st *stack) push(s stacker) {
*st = append(*st, s)
}
// Pop a stacker-value from the stack.
func (st *stack) pop() (stacker, error) {
if len(*st) == 0 {
return nil, errors.New("Stack is empty.")
}
newStack := make(stack, len(*st)-1)
val := (*st)[len(*st)-1]
copy(newStack, (*st)[:len(*st)-1])
*st = newStack
return val, nil
}
// Initialize regexes and the static vars (that don't get changed between
// calls.
func init() {
// Initialize the main regex.
expStr := strings.Join(exp[:], "|")
regex, _ = regexp.Compile(expStr)
// Initialize the static variables.
staticVar = make(map[byte]stacker, 26)
}

View File

@ -1,23 +0,0 @@
// Copyright 2012 Neal van Veen. All rights reserved.
// Usage of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package gotty
type TermInfo struct {
boolAttributes map[string]bool
numAttributes map[string]int16
strAttributes map[string]string
// The various names of the TermInfo file.
Names []string
}
type stacker interface {
}
type stack []stacker
type parser struct {
st stack
parameters []stacker
dynamicVar map[byte]stacker
}

View File

@ -1,18 +0,0 @@
BRANCH=`git rev-parse --abbrev-ref HEAD`
COMMIT=`git rev-parse --short HEAD`
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
default: build
race:
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
# go get github.com/kisielk/errcheck
errcheck:
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
test:
@go test -v -cover .
@go test -v ./cmd/bolt
.PHONY: fmt test

View File

@ -1,18 +0,0 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\boltdb\bolt
environment:
GOPATH: c:\gopath
install:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go get -v -t ./...
build_script:
- go test -v ./...

View File

@ -1,252 +0,0 @@
package bolt
import (
"fmt"
"sort"
"unsafe"
)
// freelist represents a list of all pages that are available for allocation.
// It also tracks pages that have been freed but are still in use by open transactions.
type freelist struct {
ids []pgid // all free and available free page ids.
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
cache map[pgid]bool // fast lookup of all free and pending page ids.
}
// newFreelist returns an empty, initialized freelist.
func newFreelist() *freelist {
return &freelist{
pending: make(map[txid][]pgid),
cache: make(map[pgid]bool),
}
}
// size returns the size of the page after serialization.
func (f *freelist) size() int {
n := f.count()
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n++
}
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
}
// count returns count of pages on the freelist
func (f *freelist) count() int {
return f.free_count() + f.pending_count()
}
// free_count returns count of free pages
func (f *freelist) free_count() int {
return len(f.ids)
}
// pending_count returns count of pending pages
func (f *freelist) pending_count() int {
var count int
for _, list := range f.pending {
count += len(list)
}
return count
}
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
func (f *freelist) copyall(dst []pgid) {
m := make(pgids, 0, f.pending_count())
for _, list := range f.pending {
m = append(m, list...)
}
sort.Sort(m)
mergepgids(dst, f.ids, m)
}
// allocate returns the starting page id of a contiguous list of pages of a given size.
// If a contiguous block cannot be found then 0 is returned.
func (f *freelist) allocate(n int) pgid {
if len(f.ids) == 0 {
return 0
}
var initial, previd pgid
for i, id := range f.ids {
if id <= 1 {
panic(fmt.Sprintf("invalid page allocation: %d", id))
}
// Reset initial page if this is not contiguous.
if previd == 0 || id-previd != 1 {
initial = id
}
// If we found a contiguous block then remove it and return it.
if (id-initial)+1 == pgid(n) {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if (i + 1) == n {
f.ids = f.ids[i+1:]
} else {
copy(f.ids[i-n+1:], f.ids[i+1:])
f.ids = f.ids[:len(f.ids)-n]
}
// Remove from the free cache.
for i := pgid(0); i < pgid(n); i++ {
delete(f.cache, initial+i)
}
return initial
}
previd = id
}
return 0
}
// free releases a page and its overflow for a given transaction id.
// If the page is already free then a panic will occur.
func (f *freelist) free(txid txid, p *page) {
if p.id <= 1 {
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
}
// Free page and all its overflow pages.
var ids = f.pending[txid]
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
// Verify that page is not already free.
if f.cache[id] {
panic(fmt.Sprintf("page %d already freed", id))
}
// Add to the freelist and cache.
ids = append(ids, id)
f.cache[id] = true
}
f.pending[txid] = ids
}
// release moves all page ids for a transaction id (or older) to the freelist.
func (f *freelist) release(txid txid) {
m := make(pgids, 0)
for tid, ids := range f.pending {
if tid <= txid {
// Move transaction's pending pages to the available freelist.
// Don't remove from the cache since the page is still free.
m = append(m, ids...)
delete(f.pending, tid)
}
}
sort.Sort(m)
f.ids = pgids(f.ids).merge(m)
}
// rollback removes the pages from a given pending tx.
func (f *freelist) rollback(txid txid) {
// Remove page ids from cache.
for _, id := range f.pending[txid] {
delete(f.cache, id)
}
// Remove pages from pending list.
delete(f.pending, txid)
}
// freed returns whether a given page is in the free list.
func (f *freelist) freed(pgid pgid) bool {
return f.cache[pgid]
}
// read initializes the freelist from a freelist page.
func (f *freelist) read(p *page) {
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
idx, count := 0, int(p.count)
if count == 0xFFFF {
idx = 1
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
}
// Copy the list of page ids from the freelist.
if count == 0 {
f.ids = nil
} else {
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
f.ids = make([]pgid, len(ids))
copy(f.ids, ids)
// Make sure they're sorted.
sort.Sort(pgids(f.ids))
}
// Rebuild the page cache.
f.reindex()
}
// write writes the page ids onto a freelist page. All free and pending ids are
// saved to disk since in the event of a program crash, all pending ids will
// become free.
func (f *freelist) write(p *page) error {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= freelistPageFlag
// The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element.
lenids := f.count()
if lenids == 0 {
p.count = uint16(lenids)
} else if lenids < 0xFFFF {
p.count = uint16(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
} else {
p.count = 0xFFFF
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
}
return nil
}
// reload reads the freelist from a page and filters out pending items.
func (f *freelist) reload(p *page) {
f.read(p)
// Build a cache of only pending pages.
pcache := make(map[pgid]bool)
for _, pendingIDs := range f.pending {
for _, pendingID := range pendingIDs {
pcache[pendingID] = true
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
var a []pgid
for _, id := range f.ids {
if !pcache[id] {
a = append(a, id)
}
}
f.ids = a
// Once the available list is rebuilt then rebuild the free cache so that
// it includes the available and pending free pages.
f.reindex()
}
// reindex rebuilds the free cache based on available and pending free lists.
func (f *freelist) reindex() {
f.cache = make(map[pgid]bool, len(f.ids))
for _, id := range f.ids {
f.cache[id] = true
}
for _, pendingIDs := range f.pending {
for _, pendingID := range pendingIDs {
f.cache[pendingID] = true
}
}
}

View File

@ -18,9 +18,13 @@ package cio
import (
"context"
"errors"
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"strings"
"sync"
"github.com/containerd/containerd/defaults"
@ -140,6 +144,15 @@ func NewCreator(opts ...Opt) Creator {
if err != nil {
return nil, err
}
if streams.Stdin == nil {
fifos.Stdin = ""
}
if streams.Stdout == nil {
fifos.Stdout = ""
}
if streams.Stderr == nil {
fifos.Stderr = ""
}
return copyIO(fifos, streams)
}
}
@ -212,7 +225,90 @@ type DirectIO struct {
cio
}
var _ IO = &DirectIO{}
var (
_ IO = &DirectIO{}
_ IO = &logURI{}
)
// LogURI provides the raw logging URI
func LogURI(uri *url.URL) Creator {
return func(_ string) (IO, error) {
return &logURI{
config: Config{
Stdout: uri.String(),
Stderr: uri.String(),
},
}, nil
}
}
// BinaryIO forwards container STDOUT|STDERR directly to a logging binary
func BinaryIO(binary string, args map[string]string) Creator {
return func(_ string) (IO, error) {
binary = filepath.Clean(binary)
if !strings.HasPrefix(binary, "/") {
return nil, errors.New("absolute path needed")
}
uri := &url.URL{
Scheme: "binary",
Path: binary,
}
q := uri.Query()
for k, v := range args {
q.Set(k, v)
}
uri.RawQuery = q.Encode()
res := uri.String()
return &logURI{
config: Config{
Stdout: res,
Stderr: res,
},
}, nil
}
}
// LogFile creates a file on disk that logs the task's STDOUT,STDERR.
// If the log file already exists, the logs will be appended to the file.
func LogFile(path string) Creator {
return func(_ string) (IO, error) {
path = filepath.Clean(path)
if !strings.HasPrefix(path, "/") {
return nil, errors.New("absolute path needed")
}
uri := &url.URL{
Scheme: "file",
Path: path,
}
res := uri.String()
return &logURI{
config: Config{
Stdout: res,
Stderr: res,
},
}, nil
}
}
type logURI struct {
config Config
}
func (l *logURI) Config() Config {
return l.config
}
func (l *logURI) Cancel() {
}
func (l *logURI) Wait() {
}
func (l *logURI) Close() error {
return nil
}
// Load the io for a container but do not attach
//
@ -224,3 +320,7 @@ func Load(set *FIFOSet) (IO, error) {
closers: []io.Closer{set},
}, nil
}
func (p *pipes) closers() []io.Closer {
return []io.Closer{p.Stdin, p.Stdout, p.Stderr}
}

View File

@ -72,17 +72,19 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) {
}
var wg = &sync.WaitGroup{}
wg.Add(1)
go func() {
p := bufPool.Get().(*[]byte)
defer bufPool.Put(p)
if fifos.Stdout != "" {
wg.Add(1)
go func() {
p := bufPool.Get().(*[]byte)
defer bufPool.Put(p)
io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p)
pipes.Stdout.Close()
wg.Done()
}()
io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p)
pipes.Stdout.Close()
wg.Done()
}()
}
if !fifos.Terminal {
if !fifos.Terminal && fifos.Stderr != "" {
wg.Add(1)
go func() {
p := bufPool.Get().(*[]byte)
@ -152,7 +154,3 @@ func NewDirectIO(ctx context.Context, fifos *FIFOSet) (*DirectIO, error) {
},
}, err
}
func (p *pipes) closers() []io.Closer {
return []io.Closer{p.Stdin, p.Stdout, p.Stderr}
}

View File

@ -17,10 +17,10 @@
package cio
import (
"context"
"fmt"
"io"
"net"
"sync"
winio "github.com/Microsoft/go-winio"
"github.com/containerd/containerd/log"
@ -31,17 +31,20 @@ const pipeRoot = `\\.\pipe`
// NewFIFOSetInDir returns a new set of fifos for the task
func NewFIFOSetInDir(_, id string, terminal bool) (*FIFOSet, error) {
stderrPipe := ""
if !terminal {
stderrPipe = fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id)
}
return NewFIFOSet(Config{
Terminal: terminal,
Stdin: fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id),
Stdout: fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id),
Stderr: fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id),
Stderr: stderrPipe,
}, nil), nil
}
func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) {
var (
wg sync.WaitGroup
set []io.Closer
)
@ -76,7 +79,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) {
if fifos.Stdout != "" {
l, err := winio.ListenPipe(fifos.Stdout, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdout)
return nil, errors.Wrapf(err, "failed to create stdout pipe %s", fifos.Stdout)
}
defer func(l net.Listener) {
if err != nil {
@ -85,9 +88,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) {
}(l)
set = append(set, l)
wg.Add(1)
go func() {
defer wg.Done()
c, err := l.Accept()
if err != nil {
log.L.WithError(err).Errorf("failed to accept stdout connection on %s", fifos.Stdout)
@ -103,7 +104,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) {
}()
}
if !fifos.Terminal && fifos.Stderr != "" {
if fifos.Stderr != "" {
l, err := winio.ListenPipe(fifos.Stderr, nil)
if err != nil {
return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Stderr)
@ -115,9 +116,7 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) {
}(l)
set = append(set, l)
wg.Add(1)
go func() {
defer wg.Done()
c, err := l.Accept()
if err != nil {
log.L.WithError(err).Errorf("failed to accept stderr connection on %s", fifos.Stderr)
@ -150,3 +149,22 @@ func NewDirectIO(stdin io.WriteCloser, stdout, stderr io.ReadCloser, terminal bo
},
}
}
// NewDirectIOFromFIFOSet returns an IO implementation that exposes the IO streams as io.ReadCloser
// and io.WriteCloser.
func NewDirectIOFromFIFOSet(ctx context.Context, stdin io.WriteCloser, stdout, stderr io.ReadCloser, fifos *FIFOSet) *DirectIO {
_, cancel := context.WithCancel(ctx)
pipes := pipes{
Stdin: stdin,
Stdout: stdout,
Stderr: stderr,
}
return &DirectIO{
pipes: pipes,
cio: cio{
config: fifos.Config,
closers: append(pipes.closers(), fifos),
cancel: cancel,
},
}
}

View File

@ -23,4 +23,10 @@ const (
// DefaultMaxSendMsgSize defines the default maximum message size for
// sending protobufs passed over the GRPC API.
DefaultMaxSendMsgSize = 16 << 20
// DefaultRuntimeNSLabel defines the namespace label to check for the
// default runtime
DefaultRuntimeNSLabel = "containerd.io/defaults/runtime"
// DefaultSnapshotterNSLabel defines the namespace label to check for the
// default snapshotter
DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter"
)

View File

@ -32,4 +32,6 @@ const (
// DefaultFIFODir is the default location used by client-side cio library
// to store FIFOs.
DefaultFIFODir = "/run/containerd/fifo"
// DefaultRuntime is the default linux runtime
DefaultRuntime = "io.containerd.runc.v2"
)

View File

@ -26,10 +26,10 @@ import (
var (
// DefaultRootDir is the default location used by containerd to store
// persistent data
DefaultRootDir = filepath.Join(os.Getenv("programfiles"), "containerd", "root")
DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root")
// DefaultStateDir is the default location used by containerd to store
// transient data
DefaultStateDir = filepath.Join(os.Getenv("programfiles"), "containerd", "state")
DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state")
)
const (
@ -40,4 +40,6 @@ const (
// DefaultFIFODir is the default location used by client-side cio library
// to store FIFOs. Unused on Windows.
DefaultFIFODir = ""
// DefaultRuntime is the default windows runtime
DefaultRuntime = "io.containerd.runhcs.v1"
)

View File

@ -0,0 +1,93 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package errdefs defines the common errors used throughout containerd
// packages.
//
// Use with errors.Wrap and error.Wrapf to add context to an error.
//
// To detect an error class, use the IsXXX functions to tell whether an error
// is of a certain type.
//
// The functions ToGRPC and FromGRPC can be used to map server-side and
// client-side errors to the correct types.
package errdefs
import (
"context"
"github.com/pkg/errors"
)
// Definitions of common error types used throughout containerd. All containerd
// errors returned by most packages will map into one of these errors classes.
// Packages should return errors of these types when they want to instruct a
// client to take a particular action.
//
// For the most part, we just try to provide local grpc errors. Most conditions
// map very well to those defined by grpc.
var (
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
ErrInvalidArgument = errors.New("invalid argument")
ErrNotFound = errors.New("not found")
ErrAlreadyExists = errors.New("already exists")
ErrFailedPrecondition = errors.New("failed precondition")
ErrUnavailable = errors.New("unavailable")
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
)
// IsInvalidArgument returns true if the error is due to an invalid argument
func IsInvalidArgument(err error) bool {
return errors.Cause(err) == ErrInvalidArgument
}
// IsNotFound returns true if the error is due to a missing object
func IsNotFound(err error) bool {
return errors.Cause(err) == ErrNotFound
}
// IsAlreadyExists returns true if the error is due to an already existing
// metadata item
func IsAlreadyExists(err error) bool {
return errors.Cause(err) == ErrAlreadyExists
}
// IsFailedPrecondition returns true if an operation could not proceed to the
// lack of a particular condition
func IsFailedPrecondition(err error) bool {
return errors.Cause(err) == ErrFailedPrecondition
}
// IsUnavailable returns true if the error is due to a resource being unavailable
func IsUnavailable(err error) bool {
return errors.Cause(err) == ErrUnavailable
}
// IsNotImplemented returns true if the error is due to not being implemented
func IsNotImplemented(err error) bool {
return errors.Cause(err) == ErrNotImplemented
}
// IsCanceled returns true if the error is due to `context.Canceled`.
func IsCanceled(err error) bool {
return errors.Cause(err) == context.Canceled
}
// IsDeadlineExceeded returns true if the error is due to
// `context.DeadlineExceeded`.
func IsDeadlineExceeded(err error) bool {
return errors.Cause(err) == context.DeadlineExceeded
}

147
vendor/github.com/containerd/containerd/errdefs/grpc.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errdefs
import (
"context"
"strings"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ToGRPC will attempt to map the backend containerd error into a grpc error,
// using the original error message as a description.
//
// Further information may be extracted from certain errors depending on their
// type.
//
// If the error is unmapped, the original error will be returned to be handled
// by the regular grpc error handling stack.
func ToGRPC(err error) error {
if err == nil {
return nil
}
if isGRPCError(err) {
// error has already been mapped to grpc
return err
}
switch {
case IsInvalidArgument(err):
return status.Errorf(codes.InvalidArgument, err.Error())
case IsNotFound(err):
return status.Errorf(codes.NotFound, err.Error())
case IsAlreadyExists(err):
return status.Errorf(codes.AlreadyExists, err.Error())
case IsFailedPrecondition(err):
return status.Errorf(codes.FailedPrecondition, err.Error())
case IsUnavailable(err):
return status.Errorf(codes.Unavailable, err.Error())
case IsNotImplemented(err):
return status.Errorf(codes.Unimplemented, err.Error())
case IsCanceled(err):
return status.Errorf(codes.Canceled, err.Error())
case IsDeadlineExceeded(err):
return status.Errorf(codes.DeadlineExceeded, err.Error())
}
return err
}
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
// and combining it with the target error string.
//
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
func ToGRPCf(err error, format string, args ...interface{}) error {
return ToGRPC(errors.Wrapf(err, format, args...))
}
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
func FromGRPC(err error) error {
if err == nil {
return nil
}
var cls error // divide these into error classes, becomes the cause
switch code(err) {
case codes.InvalidArgument:
cls = ErrInvalidArgument
case codes.AlreadyExists:
cls = ErrAlreadyExists
case codes.NotFound:
cls = ErrNotFound
case codes.Unavailable:
cls = ErrUnavailable
case codes.FailedPrecondition:
cls = ErrFailedPrecondition
case codes.Unimplemented:
cls = ErrNotImplemented
case codes.Canceled:
cls = context.Canceled
case codes.DeadlineExceeded:
cls = context.DeadlineExceeded
default:
cls = ErrUnknown
}
msg := rebaseMessage(cls, err)
if msg != "" {
err = errors.Wrap(cls, msg)
} else {
err = errors.WithStack(cls)
}
return err
}
// rebaseMessage removes the repeats for an error at the end of an error
// string. This will happen when taking an error over grpc then remapping it.
//
// Effectively, we just remove the string of cls from the end of err if it
// appears there.
func rebaseMessage(cls error, err error) string {
desc := errDesc(err)
clss := cls.Error()
if desc == clss {
return ""
}
return strings.TrimSuffix(desc, ": "+clss)
}
func isGRPCError(err error) bool {
_, ok := status.FromError(err)
return ok
}
func code(err error) codes.Code {
if s, ok := status.FromError(err); ok {
return s.Code()
}
return codes.Unknown
}
func errDesc(err error) string {
if s, ok := status.FromError(err); ok {
return s.Message()
}
return err.Error()
}

View File

@ -30,7 +30,7 @@ var (
// messages.
G = GetLogger
// L is an alias for the the standard logger.
// L is an alias for the standard logger.
L = logrus.NewEntry(logrus.StandardLogger())
)
@ -42,6 +42,10 @@ type (
// and is usually used to trace detailed behavior of the program.
const TraceLevel = logrus.Level(uint32(logrus.DebugLevel + 1))
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
// ensure the formatted time is always the same number of characters.
const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
// ParseLevel takes a string level and returns the Logrus log level constant.
// It supports trace level.
func ParseLevel(lvl string) (logrus.Level, error) {

View File

@ -0,0 +1,229 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package platforms
import specs "github.com/opencontainers/image-spec/specs-go/v1"
// MatchComparer is able to match and compare platforms to
// filter and sort platforms.
type MatchComparer interface {
Matcher
Less(specs.Platform, specs.Platform) bool
}
// Only returns a match comparer for a single platform
// using default resolution logic for the platform.
//
// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes)
// For ARMv7, will also match ARMv6 and ARMv5
// For ARMv6, will also match ARMv5
func Only(platform specs.Platform) MatchComparer {
platform = Normalize(platform)
if platform.Architecture == "arm" {
if platform.Variant == "v8" {
return orderedPlatformComparer{
matchers: []Matcher{
&matcher{
Platform: platform,
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v7",
},
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v6",
},
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v5",
},
},
},
}
}
if platform.Variant == "v7" {
return orderedPlatformComparer{
matchers: []Matcher{
&matcher{
Platform: platform,
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v6",
},
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v5",
},
},
},
}
}
if platform.Variant == "v6" {
return orderedPlatformComparer{
matchers: []Matcher{
&matcher{
Platform: platform,
},
&matcher{
Platform: specs.Platform{
Architecture: platform.Architecture,
OS: platform.OS,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
Variant: "v5",
},
},
},
}
}
}
return singlePlatformComparer{
Matcher: &matcher{
Platform: platform,
},
}
}
// Ordered returns a platform MatchComparer which matches any of the platforms
// but orders them in order they are provided.
func Ordered(platforms ...specs.Platform) MatchComparer {
matchers := make([]Matcher, len(platforms))
for i := range platforms {
matchers[i] = NewMatcher(platforms[i])
}
return orderedPlatformComparer{
matchers: matchers,
}
}
// Any returns a platform MatchComparer which matches any of the platforms
// with no preference for ordering.
func Any(platforms ...specs.Platform) MatchComparer {
matchers := make([]Matcher, len(platforms))
for i := range platforms {
matchers[i] = NewMatcher(platforms[i])
}
return anyPlatformComparer{
matchers: matchers,
}
}
// All is a platform MatchComparer which matches all platforms
// with preference for ordering.
var All MatchComparer = allPlatformComparer{}
type singlePlatformComparer struct {
Matcher
}
func (c singlePlatformComparer) Less(p1, p2 specs.Platform) bool {
return c.Match(p1) && !c.Match(p2)
}
type orderedPlatformComparer struct {
matchers []Matcher
}
func (c orderedPlatformComparer) Match(platform specs.Platform) bool {
for _, m := range c.matchers {
if m.Match(platform) {
return true
}
}
return false
}
func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool {
for _, m := range c.matchers {
p1m := m.Match(p1)
p2m := m.Match(p2)
if p1m && !p2m {
return true
}
if p1m || p2m {
return false
}
}
return false
}
type anyPlatformComparer struct {
matchers []Matcher
}
func (c anyPlatformComparer) Match(platform specs.Platform) bool {
for _, m := range c.matchers {
if m.Match(platform) {
return true
}
}
return false
}
func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool {
var p1m, p2m bool
for _, m := range c.matchers {
if !p1m && m.Match(p1) {
p1m = true
}
if !p2m && m.Match(p2) {
p2m = true
}
if p1m && p2m {
return false
}
}
// If one matches, and the other does, sort match first
return p1m && !p2m
}
type allPlatformComparer struct{}
func (allPlatformComparer) Match(specs.Platform) bool {
return true
}
func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool {
return false
}

Some files were not shown because too many files have changed in this diff Show More