commit
5dcc1706c0
|
|
@ -1 +0,0 @@
|
|||
vendor/
|
||||
|
|
@ -31,11 +31,6 @@ Describe any changes here so maintainer can include it in the release notes, or
|
|||
|
||||
```
|
||||
Examples of user facing changes:
|
||||
- Skaffold config changes like
|
||||
e.g. "Add buildArgs to `Kustomize` deployer skaffold config."
|
||||
- Bug fixes
|
||||
e.g. "Improve skaffold init behaviour when tags are used in manifests"
|
||||
- Any changes in skaffold behavior
|
||||
e.g. "Artiface cachine is turned on by default."
|
||||
- kaniko adds a new flag `--registry-repo` to override registry
|
||||
|
||||
```
|
||||
|
|
|
|||
39
.travis.yml
39
.travis.yml
|
|
@ -1,10 +1,37 @@
|
|||
language: go
|
||||
os: linux
|
||||
|
||||
dist: bionic
|
||||
env:
|
||||
global:
|
||||
- IMAGE_REPO=localhost:5000 REGISTRY=localhost:5000
|
||||
go:
|
||||
- "1.13.3"
|
||||
- "1.14"
|
||||
go_import_path: github.com/GoogleContainerTools/kaniko
|
||||
|
||||
script:
|
||||
- make test
|
||||
- make images
|
||||
jobs:
|
||||
include:
|
||||
- name: unit-test
|
||||
script:
|
||||
- make test
|
||||
- name: integration-test-run
|
||||
before_install:
|
||||
- make travis-setup
|
||||
script:
|
||||
- make integration-test-run
|
||||
- name: integration-test-layers
|
||||
before_install:
|
||||
- make travis-setup
|
||||
script:
|
||||
- make integration-test-layers
|
||||
- name: build-image-and-k8s-integration-test
|
||||
before_install:
|
||||
- make travis-setup
|
||||
- make minikube-setup
|
||||
script:
|
||||
- make images
|
||||
- make push
|
||||
- make integration-test-k8s
|
||||
- name: integration-test-misc
|
||||
before_install:
|
||||
- make travis-setup
|
||||
script:
|
||||
- make integration-test-misc
|
||||
|
|
|
|||
164
CHANGELOG.md
164
CHANGELOG.md
|
|
@ -1,3 +1,167 @@
|
|||
# v0.19.0 Release - 2020-03-18
|
||||
This is the 19th release of Kaniko!
|
||||
|
||||
In this release, the highlights are:
|
||||
1. Cache layer size duplication regression in v0.18.0 is fixed. [#1138](https://github.com/GoogleContainerTools/kaniko/issues/1138)
|
||||
1. Cache performance when using build-args. `build-args` are only part of cache key for a layer if it is used.
|
||||
1. Kaniko can support a `tar.gz` context with `tar://` prefix.
|
||||
1. Users can provide registry certificates for private registries.
|
||||
|
||||
# Bug Fixes
|
||||
* Use the correct name for acr helper [#1121](https://github.com/GoogleContainerTools/kaniko/pull/1121)
|
||||
* remove build args from composite key and replace all build args [#1085](https://github.com/GoogleContainerTools/kaniko/pull/1085)
|
||||
* fix resolve link for dirs with trailing / [#1113](https://github.com/GoogleContainerTools/kaniko/pull/1113)
|
||||
|
||||
# New Features
|
||||
* feat: add support of local '.tar.gz' file inside the kaniko container [#1115](https://github.com/GoogleContainerTools/kaniko/pull/1115)
|
||||
* Add support to `--chown` flag to ADD command (Issue #57) [#1134](https://github.com/GoogleContainerTools/kaniko/pull/1134)
|
||||
* executor: add --label flag [#1075](https://github.com/GoogleContainerTools/kaniko/pull/1075)
|
||||
* Allow user to provide registry certificate [#1037](https://github.com/GoogleContainerTools/kaniko/pull/1037)
|
||||
|
||||
# Refactors And Updates
|
||||
* Migrate to golang 1.14 [#1098](https://github.com/GoogleContainerTools/kaniko/pull/1098)
|
||||
* Make cloudbuild.yaml re-usable for anyone [#1135](https://github.com/GoogleContainerTools/kaniko/pull/1135)
|
||||
* fix: credential typo [#1128](https://github.com/GoogleContainerTools/kaniko/pull/1128)
|
||||
* Travis k8s integration test [#1124](https://github.com/GoogleContainerTools/kaniko/pull/1124)
|
||||
* Add more tests for Copy and some fixes. [#1114](https://github.com/GoogleContainerTools/kaniko/pull/1114)
|
||||
|
||||
# Documentation
|
||||
* Update README on running in Docker [#1141](https://github.com/GoogleContainerTools/kaniko/pull/1141)
|
||||
|
||||
Huge thank you for this release towards our contributors:
|
||||
- Anthony Davies
|
||||
- Batuhan Apaydın
|
||||
- Ben Einaudi
|
||||
- Carlos Sanchez
|
||||
- Cole Wippern
|
||||
- cvgw
|
||||
- Dani Raznikov
|
||||
- DracoBlue
|
||||
- James Ravn
|
||||
- Jordan GOASDOUE
|
||||
- Logan.Price
|
||||
- Moritz Wanzenböck
|
||||
- ohchang-kwon
|
||||
- Or Sela
|
||||
- Sam Stoelinga
|
||||
- Tejal Desai
|
||||
- Thomas Bonfort
|
||||
- Thomas Strömberg
|
||||
- tinkerborg
|
||||
- Wietse Muizelaar
|
||||
- xanonid
|
||||
- Yoan Blanc
|
||||
- Yuheng Zhang
|
||||
|
||||
# v0.18.0 Release -2020-03-05
|
||||
This release fixes all the regression bugs associated with v0.17.0 and v0.17.1.
|
||||
This release, the team did a lot of work improving our test infrastructure, more tests cases
|
||||
and refactored filesystem walking.
|
||||
|
||||
Thank you all for your patience and supporting us throughout!
|
||||
|
||||
# Bug Fixes
|
||||
* fix home being reset to root [#1072](https://github.com/GoogleContainerTools/kaniko/pull/1072)
|
||||
* fix user metadata set to USER:GROUP if group string is not set [#1105](https://github.com/GoogleContainerTools/kaniko/pull/1105)
|
||||
* check for filepath.Walk error everywhere [#1086](https://github.com/GoogleContainerTools/kaniko/pull/1086)
|
||||
* fix #1092 TestRelativePaths [#1093](https://github.com/GoogleContainerTools/kaniko/pull/1093)
|
||||
* Resolve filepaths before scanning for changes [#1069](https://github.com/GoogleContainerTools/kaniko/pull/1069)
|
||||
* Fix #1020 os.Chtimes invalid arg [#1074](https://github.com/GoogleContainerTools/kaniko/pull/1074)
|
||||
* Fix #1067 - image no longer available [#1068](https://github.com/GoogleContainerTools/kaniko/pull/1068)
|
||||
* Ensure image SHA stays consistent when layer contents haven't changed [#1032](https://github.com/GoogleContainerTools/kaniko/pull/1032)
|
||||
* fix flake TestRun/Dockerfile_test_copy_symlink [#1030](https://github.com/GoogleContainerTools/kaniko/pull/1030)
|
||||
|
||||
# New Features
|
||||
* root: add --registry-mirror flag [#836](https://github.com/GoogleContainerTools/kaniko/pull/836)
|
||||
* set log format using a flag [#1031](https://github.com/GoogleContainerTools/kaniko/pull/1031)
|
||||
* Do not recompute layers retrieved from cache [#882](https://github.com/GoogleContainerTools/kaniko/pull/882)
|
||||
* More idiomatic logging config [#1040](https://github.com/GoogleContainerTools/kaniko/pull/1040)
|
||||
|
||||
|
||||
# Test Refactors and Updates
|
||||
* Split travis integration tests [#1090](https://github.com/GoogleContainerTools/kaniko/pull/1090)
|
||||
* Add integration tests from Issues [#1054](https://github.com/GoogleContainerTools/kaniko/pull/1054)
|
||||
* add integration tests with their own context [#1088](https://github.com/GoogleContainerTools/kaniko/pull/1088)
|
||||
* Fixed typo in README.md [#1060](https://github.com/GoogleContainerTools/kaniko/pull/1060)
|
||||
* test: refactor container-diff call [#1077](https://github.com/GoogleContainerTools/kaniko/pull/1077)
|
||||
* Refactor integration image built [#1049](https://github.com/GoogleContainerTools/kaniko/pull/1049)
|
||||
* separate travis into multiple jobs for parallelization [#1055](https://github.com/GoogleContainerTools/kaniko/pull/1055)
|
||||
* refactor copy.chown code and add more tests [#1027](https://github.com/GoogleContainerTools/kaniko/pull/1027)
|
||||
* Allow contributors to launch integration tests against local registry [#1014](https://github.com/GoogleContainerTools/kaniko/pull/1014)
|
||||
|
||||
# Documentation
|
||||
* add design proposal template [#1046](https://github.com/GoogleContainerTools/kaniko/pull/1046)
|
||||
* Update filesystem proposal status to Reviewed [#1066](https://github.com/GoogleContainerTools/kaniko/pull/1066)
|
||||
* update instructions for running integration tests [#1034](https://github.com/GoogleContainerTools/kaniko/pull/1034)
|
||||
* design proposal 01: filesystem resolution [#1048](https://github.com/GoogleContainerTools/kaniko/pull/1048)
|
||||
* Document that this tool is not officially supported by Google [#1044](https://github.com/GoogleContainerTools/kaniko/pull/1044)
|
||||
* Fix example pod.yml to not mount to root [#1043](https://github.com/GoogleContainerTools/kaniko/pull/1043)
|
||||
* fixing docker run command in README.md [#1103](https://github.com/GoogleContainerTools/kaniko/pull/1103)
|
||||
|
||||
Huge thank you for this release towards our contributors:
|
||||
- Anthony Davies
|
||||
- Batuhan Apaydın
|
||||
- Ben Einaudi
|
||||
- Cole Wippern
|
||||
- cvgw
|
||||
- DracoBlue
|
||||
- James Ravn
|
||||
- Logan.Price
|
||||
- Moritz Wanzenböck
|
||||
- ohchang-kwon
|
||||
- Or Sela
|
||||
- Sam Stoelinga
|
||||
- Tejal Desai
|
||||
- Thomas Bonfort
|
||||
- Thomas Strömberg
|
||||
- tinkerborg
|
||||
- Wietse Muizelaar
|
||||
- xanonid
|
||||
- Yoan Blanc
|
||||
|
||||
# v0.17.1 Release - 2020-02-04
|
||||
|
||||
This is minor patch release to fix [#1002](https://github.com/GoogleContainerTools/kaniko/issues/1002)
|
||||
|
||||
# v0.17.0 Release - 2020-02-03
|
||||
|
||||
## New Features
|
||||
* Expand build argument from environment when no value specified [#993](https://github.com/GoogleContainerTools/kaniko/pull/993)
|
||||
* whitelist /tmp/apt-key-gpghome.* directory [#1000](https://github.com/GoogleContainerTools/kaniko/pull/1000)
|
||||
* Add flag to `--whitelist-var-run` set to true to preserver default kani… [#1011](https://github.com/GoogleContainerTools/kaniko/pull/1011)
|
||||
* Prefer platform that is currently running for pulling remote images and kaniko binary Makefile target [#980](https://github.com/GoogleContainerTools/kaniko/pull/980)
|
||||
|
||||
## Bug Fixes
|
||||
* Fix caching to respect .dockerignore [#854](https://github.com/GoogleContainerTools/kaniko/pull/854)
|
||||
* Fixes #988 run_in_docker.sh only works with gcr.io [#990](https://github.com/GoogleContainerTools/kaniko/pull/990)
|
||||
* Fix Symlinks not being copied across stages [#971](https://github.com/GoogleContainerTools/kaniko/pull/971)
|
||||
* Fix home and group set for user command [#995](https://github.com/GoogleContainerTools/kaniko/pull/995)
|
||||
* Fix COPY or ADD to symlink destination breaks image [#943](https://github.com/GoogleContainerTools/kaniko/pull/943)
|
||||
* [Caching] Fix bug with deleted files and cached run and copy commands
|
||||
* [Mutistage Build] Fix bug with capital letter in stage names [#983](https://github.com/GoogleContainerTools/kaniko/pull/983)
|
||||
* Fix #940 set modtime when extracting [#981](https://github.com/GoogleContainerTools/kaniko/pull/981)
|
||||
* Fix Ability for ADD to unTar a file [#792](https://github.com/GoogleContainerTools/kaniko/pull/792)
|
||||
|
||||
# Updates and Refactors
|
||||
* fix test flake [#1016](https://github.com/GoogleContainerTools/kaniko/pull/1016)
|
||||
* Upgrade go-containerregistry third-party library [#957](https://github.com/GoogleContainerTools/kaniko/pull/957)
|
||||
* Remove debug tag being built for every push to master [#1004](https://github.com/GoogleContainerTools/kaniko/pull/1004)
|
||||
* Run integration tests in Travis CI [#979](https://github.com/GoogleContainerTools/kaniko/pull/979)
|
||||
|
||||
|
||||
Huge thank you for this release towards our contributors:
|
||||
- Anthony Davies
|
||||
- Ben Einaudi
|
||||
- Cole Wippern
|
||||
- cvgw
|
||||
- Logan.Price
|
||||
- Moritz Wanzenböck
|
||||
- ohchang-kwon
|
||||
- Sam Stoelinga
|
||||
- Tejal Desai
|
||||
- Thomas Bonfort
|
||||
- Wietse Muizelaar
|
||||
|
||||
# v0.16.0 Release - 2020-01-17
|
||||
|
||||
Happy New Year 2020!
|
||||
|
|
|
|||
|
|
@ -67,18 +67,45 @@ _These tests will not run correctly unless you have [checked out your fork into
|
|||
|
||||
### Integration tests
|
||||
|
||||
The integration tests live in [`integration`](./integration) and can be run with:
|
||||
Currently the integration tests that live in [`integration`](./integration) can be run against your own gcloud space or a local registry.
|
||||
|
||||
In either case, you will need the following tools:
|
||||
|
||||
* [`container-diff`](https://github.com/GoogleContainerTools/container-diff#installation)
|
||||
|
||||
#### GCloud
|
||||
|
||||
To run integration tests with your GCloud Storage, you will also need the following tools:
|
||||
|
||||
* [`gcloud`](https://cloud.google.com/sdk/install)
|
||||
* [`gsutil`](https://cloud.google.com/storage/docs/gsutil_install)
|
||||
* A bucket in [GCS](https://cloud.google.com/storage/) which you have write access to via
|
||||
the user currently logged into `gcloud`
|
||||
* An image repo which you have write access to via the user currently logged into `gcloud`
|
||||
|
||||
Once this step done, you must override the project using environment variables:
|
||||
|
||||
* `GCS_BUCKET` - The name of your GCS bucket
|
||||
* `IMAGE_REPO` - The path to your docker image repo
|
||||
|
||||
This can be done as follows:
|
||||
|
||||
```shell
|
||||
export GCS_BUCKET="gs://<your bucket>"
|
||||
export IMAGE_REPO="gcr.io/somerepo"
|
||||
make integration-test
|
||||
```
|
||||
|
||||
If you want to run `make integration-test`, you must override the project using environment variables:
|
||||
Login for both user and application credentials
|
||||
```shell
|
||||
gcloud auth login
|
||||
gcloud auth application-default login
|
||||
```
|
||||
|
||||
* `GCS_BUCKET` - The name of your GCS bucket
|
||||
* `IMAGE_REPO` - The path to your docker image repo
|
||||
Then you can launch integration tests as follows:
|
||||
|
||||
```shell
|
||||
make integration-test
|
||||
```
|
||||
|
||||
You can also run tests with `go test`, for example to run tests individually:
|
||||
|
||||
|
|
@ -86,16 +113,37 @@ You can also run tests with `go test`, for example to run tests individually:
|
|||
go test ./integration -v --bucket $GCS_BUCKET --repo $IMAGE_REPO -run TestLayers/test_layer_Dockerfile_test_copy_bucket
|
||||
```
|
||||
|
||||
Requirements:
|
||||
These tests will be kicked off by [reviewers](#reviews) for submitted PRs by the kokoro task.
|
||||
|
||||
#### Local repository
|
||||
|
||||
To run integration tests locally against a local registry, install a local docker registry
|
||||
|
||||
```shell
|
||||
docker run --rm -d -p 5000:5000 --name registry registry:2
|
||||
```
|
||||
|
||||
Then export the `IMAGE_REPO` variable with the `localhost:5000`value
|
||||
|
||||
```shell
|
||||
export IMAGE_REPO=localhost:5000
|
||||
```
|
||||
|
||||
And run the integration tests
|
||||
|
||||
```shell
|
||||
make integration-test
|
||||
```
|
||||
|
||||
You can also run tests with `go test`, for example to run tests individually:
|
||||
|
||||
```shell
|
||||
go test ./integration -v --repo localhost:5000 -run TestLayers/test_layer_Dockerfile_test_copy_bucket
|
||||
```
|
||||
|
||||
These tests will be kicked off by [reviewers](#reviews) for submitted PRs by the travis task.
|
||||
|
||||
* [`gcloud`](https://cloud.google.com/sdk/install)
|
||||
* [`gsutil`](https://cloud.google.com/storage/docs/gsutil_install)
|
||||
* [`container-diff`](https://github.com/GoogleContainerTools/container-diff#installation)
|
||||
* A bucket in [GCS](https://cloud.google.com/storage/) which you have write access to via
|
||||
the user currently logged into `gcloud`
|
||||
* An image repo which you have write access to via the user currently logged into `gcloud`
|
||||
|
||||
These tests will be kicked off by [reviewers](#reviews) for submitted PRs.
|
||||
|
||||
### Benchmarking
|
||||
|
||||
|
|
|
|||
48
Makefile
48
Makefile
|
|
@ -14,14 +14,15 @@
|
|||
|
||||
# Bump these on release
|
||||
VERSION_MAJOR ?= 0
|
||||
VERSION_MINOR ?= 16
|
||||
VERSION_MINOR ?= 19
|
||||
VERSION_BUILD ?= 0
|
||||
|
||||
VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
|
||||
VERSION_PACKAGE = $(REPOPATH/pkg/version)
|
||||
|
||||
SHELL := /bin/bash
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH = amd64
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
ORG := github.com/GoogleContainerTools
|
||||
PROJECT := kaniko
|
||||
REGISTRY?=gcr.io/kaniko-project
|
||||
|
|
@ -38,7 +39,7 @@ GO_LDFLAGS += '
|
|||
EXECUTOR_PACKAGE = $(REPOPATH)/cmd/executor
|
||||
WARMER_PACKAGE = $(REPOPATH)/cmd/warmer
|
||||
KANIKO_PROJECT = $(REPOPATH)/kaniko
|
||||
BUILD_ARG ?=
|
||||
BUILD_ARG ?=
|
||||
|
||||
# Force using Go Modules and always read the dependencies from
|
||||
# the `vendor` folder.
|
||||
|
|
@ -52,22 +53,47 @@ out/executor: $(GO_FILES)
|
|||
out/warmer: $(GO_FILES)
|
||||
GOARCH=$(GOARCH) GOOS=linux CGO_ENABLED=0 go build -ldflags $(GO_LDFLAGS) -o $@ $(WARMER_PACKAGE)
|
||||
|
||||
.PHONY: travis-setup
|
||||
travis-setup:
|
||||
@ ./scripts/travis-setup.sh
|
||||
|
||||
.PHONY: minikube-setup
|
||||
minikube-setup:
|
||||
@ ./scripts/minikube-setup.sh
|
||||
|
||||
.PHONY: test
|
||||
test: out/executor
|
||||
@ ./test.sh
|
||||
@ ./scripts/test.sh
|
||||
|
||||
.PHONY: integration-test
|
||||
integration-test:
|
||||
@ ./integration-test.sh
|
||||
@ ./scripts/integration-test.sh
|
||||
|
||||
.PHONY: integration-test-run
|
||||
integration-test-run:
|
||||
@ ./scripts/integration-test.sh -run "TestRun"
|
||||
|
||||
.PHONY: integration-test-layers
|
||||
integration-test-layers:
|
||||
@ ./scripts/integration-test.sh -run "TestLayers"
|
||||
|
||||
.PHONY: integration-test-k8s
|
||||
integration-test-k8s:
|
||||
@ ./scripts/integration-test.sh -run "TestK8s"
|
||||
|
||||
.PHONY: integration-test-misc
|
||||
integration-test-misc:
|
||||
$(eval RUN_ARG=$(shell ./scripts/misc-integration-test.sh))
|
||||
@ ./scripts/integration-test.sh -run "$(RUN_ARG)"
|
||||
|
||||
.PHONY: images
|
||||
images:
|
||||
docker build ${BUILD_ARG} -t $(REGISTRY)/executor:latest -f deploy/Dockerfile .
|
||||
docker build ${BUILD_ARG} -t $(REGISTRY)/executor:debug -f deploy/Dockerfile_debug .
|
||||
docker build ${BUILD_ARG} -t $(REGISTRY)/warmer:latest -f deploy/Dockerfile_warmer .
|
||||
docker build ${BUILD_ARG} --build-arg=GOARCH=$(GOARCH) -t $(REGISTRY)/executor:latest -f deploy/Dockerfile .
|
||||
docker build ${BUILD_ARG} --build-arg=GOARCH=$(GOARCH) -t $(REGISTRY)/executor:debug -f deploy/Dockerfile_debug .
|
||||
docker build ${BUILD_ARG} --build-arg=GOARCH=$(GOARCH) -t $(REGISTRY)/warmer:latest -f deploy/Dockerfile_warmer .
|
||||
|
||||
.PHONY: push
|
||||
push:
|
||||
docker push $(REGISTRY)/executor:latest
|
||||
docker push $(REGISTRY)/executor:debug
|
||||
docker push $(REGISTRY)/warmer:latest
|
||||
docker push $(REGISTRY)/executor:latest
|
||||
docker push $(REGISTRY)/executor:debug
|
||||
docker push $(REGISTRY)/warmer:latest
|
||||
|
|
|
|||
54
README.md
54
README.md
|
|
@ -4,19 +4,18 @@
|
|||
|
||||

|
||||
|
||||
kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster.
|
||||
kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster.
|
||||
|
||||
kaniko doesn't depend on a Docker daemon and executes each command within a Dockerfile completely in userspace.
|
||||
This enables building container images in environments that can't easily or securely run a Docker daemon, such as a standard Kubernetes cluster.
|
||||
|
||||
kaniko is meant to be run as an image, `gcr.io/kaniko-project/executor`.
|
||||
We do **not** recommend running the kaniko executor binary in another image, as it might not work.
|
||||
|
||||
kaniko is meant to be run as an image: `gcr.io/kaniko-project/executor`. We do **not** recommend running the kaniko executor binary in another image, as it might not work.
|
||||
|
||||
We'd love to hear from you! Join us on [#kaniko Kubernetes Slack](https://kubernetes.slack.com/messages/CQDCHGX7Y/)
|
||||
|
||||
:mega: **Please fill out our [quick 5-question survey](https://forms.gle/HhZGEM33x4FUz9Qa6)** so that we can learn how satisfied you are with Kaniko, and what improvements we should make. Thank you! :dancers:
|
||||
|
||||
Kaniko is not an officially supported Google project.
|
||||
|
||||
_If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPMENT.md) and [CONTRIBUTING.md](CONTRIBUTING.md)._
|
||||
|
||||
|
|
@ -59,6 +58,7 @@ _If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPME
|
|||
- [--insecure](#--insecure)
|
||||
- [--insecure-pull](#--insecure-pull)
|
||||
- [--no-push](#--no-push)
|
||||
- [--registry-mirror](#--registry-mirror)
|
||||
- [--reproducible](#--reproducible)
|
||||
- [--single-snapshot](#--single-snapshot)
|
||||
- [--skip-tls-verify](#--skip-tls-verify)
|
||||
|
|
@ -67,6 +67,8 @@ _If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPME
|
|||
- [--target](#--target)
|
||||
- [--tarPath](#--tarpath)
|
||||
- [--verbosity](#--verbosity)
|
||||
- [--whitelist-var-run](#--whitelist-var-run)
|
||||
- [--label](#--label)
|
||||
- [Debug Image](#debug-image)
|
||||
- [Security](#security)
|
||||
- [Comparison with Other Tools](#comparison-with-other-tools)
|
||||
|
|
@ -144,6 +146,7 @@ When running kaniko, use the `--context` flag with the appropriate prefix to spe
|
|||
| Source | Prefix | Example |
|
||||
|---------|---------|---------|
|
||||
| Local Directory | dir://[path to a directory in the kaniko container] | `dir:///workspace` |
|
||||
| Local Tar Gz | tar://[path to a .tar.gz in the kaniko container] | `tar://path/to/context.tar.gz` |
|
||||
| GCS Bucket | gs://[bucket name]/[path to .tar.gz] | `gs://kaniko-bucket/path/to/context.tar.gz` |
|
||||
| S3 Bucket | s3://[bucket name]/[path to .tar.gz] | `s3://kaniko-bucket/path/to/context.tar.gz` |
|
||||
| Azure Blob Storage| https://[account].[azureblobhostsuffix]/[container]/[path to .tar.gz] | `https://myaccount.blob.core.windows.net/container/path/to/context.tar.gz` |
|
||||
|
|
@ -258,21 +261,24 @@ kaniko will build and push the final image in this build step.
|
|||
Requirements:
|
||||
|
||||
- [Docker](https://docs.docker.com/install/)
|
||||
- [gcloud](https://cloud.google.com/sdk/install)
|
||||
|
||||
We can run the kaniko executor image locally in a Docker daemon to build and push an image from a Dockerfile.
|
||||
|
||||
1. Load the executor image into the Docker daemon by running:
|
||||
For example, when using gcloud and GCR you could run Kaniko as follows:
|
||||
```shell
|
||||
docker run \
|
||||
-v "$HOME"/.config/gcloud:/root/.config/gcloud \
|
||||
-v /path/to/context:/workspace \
|
||||
gcr.io/kaniko-project/executor:latest \
|
||||
--dockerfile /workspace/Dockerfile
|
||||
--destination "gcr.io/$PROJECT_ID/$IMAGE_NAME:$TAG"
|
||||
--context dir:///workspace/"
|
||||
```
|
||||
|
||||
```shell
|
||||
make images
|
||||
```
|
||||
|
||||
2. Run kaniko in Docker using [`run_in_docker.sh`](./run_in_docker.sh):
|
||||
|
||||
```shell
|
||||
./run_in_docker.sh <path to Dockerfile> <path to build context> <destination of final image>
|
||||
```
|
||||
There is also a utility script [`run_in_docker.sh`](./run_in_docker.sh) that can be used as follows:
|
||||
```shell
|
||||
./run_in_docker.sh <path to Dockerfile> <path to build context> <destination of final image>
|
||||
```
|
||||
|
||||
_NOTE: `run_in_docker.sh` expects a path to a
|
||||
Dockerfile relative to the absolute path of the build context._
|
||||
|
|
@ -282,7 +288,7 @@ context in the local directory `/home/user/kaniko-project`, and a Google Contain
|
|||
as a remote image destination:
|
||||
|
||||
```shell
|
||||
./run_in_docker.sh /workspace/Dockerfile /home/user/kaniko-project gcr.io//<project-id>/<tag>
|
||||
./run_in_docker.sh /workspace/Dockerfile /home/user/kaniko-project gcr.io/$PROJECT_ID/$TAG
|
||||
```
|
||||
|
||||
### Caching
|
||||
|
|
@ -339,7 +345,7 @@ Create a `config.json` file with your Docker registry url and the previous gener
|
|||
|
||||
Run kaniko with the `config.json` inside `/kaniko/.docker/config.json`
|
||||
|
||||
docker run -ti --rm -v `pwd`:/workspace -v config.json:/kaniko/.docker/config.json:ro gcr.io/kaniko-project/executor:latest --dockerfile=Dockerfile --destination=yourimagename
|
||||
docker run -ti --rm -v `pwd`:/workspace -v `pwd`/config.json:/kaniko/.docker/config.json:ro gcr.io/kaniko-project/executor:latest --dockerfile=Dockerfile --destination=yourimagename
|
||||
|
||||
#### Pushing to Amazon ECR
|
||||
|
||||
|
|
@ -449,7 +455,7 @@ For example, to surface the image digest built in a
|
|||
this flag should be set to match the image resource `outputImageDir`.
|
||||
|
||||
_Note: Depending on the built image, the media type of the image manifest might be either
|
||||
`application/vnd.oci.image.manifest.v1+json` or `application/vnd.docker.distribution.manifest.v2+json``._
|
||||
`application/vnd.oci.image.manifest.v1+json` or `application/vnd.docker.distribution.manifest.v2+json`._
|
||||
|
||||
#### --insecure-registry
|
||||
|
||||
|
|
@ -477,6 +483,10 @@ Set this flag if you want to pull images from a plain HTTP registry. It is suppo
|
|||
|
||||
Set this flag if you only want to build the image, without pushing to a registry.
|
||||
|
||||
#### --registry-mirror
|
||||
|
||||
Set this flag if you want to use a registry mirror instead of default `index.docker.io`.
|
||||
|
||||
#### --reproducible
|
||||
|
||||
Set this flag to strip timestamps out of the built image and make it reproducible.
|
||||
|
|
@ -512,6 +522,14 @@ You need to set `--destination` as well (for example `--destination=image`).
|
|||
|
||||
Set this flag as `--verbosity=<panic|fatal|error|warn|info|debug>` to set the logging level. Defaults to `info`.
|
||||
|
||||
#### --whitelist-var-run
|
||||
|
||||
Ignore /var/run when taking image snapshot. Set it to false to preserve /var/run/* in destination image. (Default true).
|
||||
|
||||
#### --label
|
||||
|
||||
Set this flag as `--label key=value` to set some metadata to the final image. This is equivalent as using the `LABEL` within the Dockerfile.
|
||||
|
||||
### Debug Image
|
||||
|
||||
The kaniko executor image is based on scratch and doesn't contain a shell.
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ import (
|
|||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/executor"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/logging"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/timing"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/genuinetools/amicontained/container"
|
||||
|
|
@ -38,14 +39,18 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
opts = &config.KanikoOptions{}
|
||||
logLevel string
|
||||
force bool
|
||||
opts = &config.KanikoOptions{}
|
||||
force bool
|
||||
logLevel string
|
||||
logFormat string
|
||||
)
|
||||
|
||||
func init() {
|
||||
RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", constants.DefaultLogLevel, "Log level (debug, info, warn, error, fatal, panic")
|
||||
RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", logging.DefaultLevel, "Log level (debug, info, warn, error, fatal, panic")
|
||||
RootCmd.PersistentFlags().StringVar(&logFormat, "log-format", logging.FormatColor, "Log format (text, color, json)")
|
||||
|
||||
RootCmd.PersistentFlags().BoolVarP(&force, "force", "", false, "Force building outside of a container")
|
||||
|
||||
addKanikoOptionsFlags()
|
||||
addHiddenFlags(RootCmd)
|
||||
}
|
||||
|
|
@ -55,9 +60,12 @@ var RootCmd = &cobra.Command{
|
|||
Use: "executor",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Use == "executor" {
|
||||
if err := util.ConfigureLogging(logLevel); err != nil {
|
||||
resolveEnvironmentBuildArgs(opts.BuildArgs, os.Getenv)
|
||||
|
||||
if err := logging.Configure(logLevel, logFormat); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !opts.NoPush && len(opts.Destinations) == 0 {
|
||||
return errors.New("You must provide --destination, or use --no-push")
|
||||
}
|
||||
|
|
@ -73,6 +81,8 @@ var RootCmd = &cobra.Command{
|
|||
if len(opts.Destinations) == 0 && opts.ImageNameDigestFile != "" {
|
||||
return errors.New("You must provide --destination if setting ImageNameDigestFile")
|
||||
}
|
||||
// Update whitelisted paths
|
||||
util.UpdateWhitelist(opts.WhitelistVarRun)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
|
@ -144,6 +154,11 @@ func addKanikoOptionsFlags() {
|
|||
RootCmd.PersistentFlags().DurationVarP(&opts.CacheTTL, "cache-ttl", "", time.Hour*336, "Cache timeout in hours. Defaults to two weeks.")
|
||||
RootCmd.PersistentFlags().VarP(&opts.InsecureRegistries, "insecure-registry", "", "Insecure registry using plain HTTP to push and pull. Set it repeatedly for multiple registries.")
|
||||
RootCmd.PersistentFlags().VarP(&opts.SkipTLSVerifyRegistries, "skip-tls-verify-registry", "", "Insecure registry ignoring TLS verify to push and pull. Set it repeatedly for multiple registries.")
|
||||
opts.RegistriesCertificates = make(map[string]string)
|
||||
RootCmd.PersistentFlags().VarP(&opts.RegistriesCertificates, "registry-certificate", "", "Use the provided certificate for TLS communication with the given registry. Expected format is 'my.registry.url=/path/to/the/server/certificate'.")
|
||||
RootCmd.PersistentFlags().StringVarP(&opts.RegistryMirror, "registry-mirror", "", "", "Registry mirror to use has pull-through cache instead of docker.io.")
|
||||
RootCmd.PersistentFlags().BoolVarP(&opts.WhitelistVarRun, "whitelist-var-run", "", true, "Ignore /var/run directory when taking image snapshot. Set it to false to preserve /var/run/ in destination image. (Default true).")
|
||||
RootCmd.PersistentFlags().VarP(&opts.Labels, "label", "", "Set metadata for an image. Set it repeatedly for multiple labels.")
|
||||
}
|
||||
|
||||
// addHiddenFlags marks certain flags as hidden from the executor help text
|
||||
|
|
@ -197,17 +212,28 @@ func resolveDockerfilePath() error {
|
|||
return errors.New("please provide a valid path to a Dockerfile within the build context with --dockerfile")
|
||||
}
|
||||
|
||||
// resolveEnvironmentBuildArgs replace build args without value by the same named environment variable
|
||||
func resolveEnvironmentBuildArgs(arguments []string, resolver func(string) string) {
|
||||
for index, argument := range arguments {
|
||||
i := strings.Index(argument, "=")
|
||||
if i < 0 {
|
||||
value := resolver(argument)
|
||||
arguments[index] = fmt.Sprintf("%s=%s", argument, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// copy Dockerfile to /kaniko/Dockerfile so that if it's specified in the .dockerignore
|
||||
// it won't be copied into the image
|
||||
func copyDockerfile() error {
|
||||
if _, err := util.CopyFile(opts.DockerfilePath, constants.DockerfilePath, ""); err != nil {
|
||||
if _, err := util.CopyFile(opts.DockerfilePath, constants.DockerfilePath, "", util.DoNotChangeUID, util.DoNotChangeGID); err != nil {
|
||||
return errors.Wrap(err, "copying dockerfile")
|
||||
}
|
||||
opts.DockerfilePath = constants.DockerfilePath
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolveSourceContext unpacks the source context if it is a tar in a bucket
|
||||
// resolveSourceContext unpacks the source context if it is a tar in a bucket or in kaniko container
|
||||
// it resets srcContext to be the path to the unpacked build context within the image
|
||||
func resolveSourceContext() error {
|
||||
if opts.SrcContext == "" && opts.Bucket == "" {
|
||||
|
|
|
|||
|
|
@ -97,3 +97,55 @@ func TestIsUrl(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveEnvironmentBuildArgs(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
input []string
|
||||
expected []string
|
||||
mockedEnvironmentResolver func(string) string
|
||||
}{
|
||||
{
|
||||
description: "replace when environment variable is present and value is not specified",
|
||||
input: []string{"variable1"},
|
||||
expected: []string{"variable1=value1"},
|
||||
mockedEnvironmentResolver: func(variable string) string {
|
||||
if variable == "variable1" {
|
||||
return "value1"
|
||||
}
|
||||
return ""
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "do not replace when environment variable is present and value is specified",
|
||||
input: []string{"variable1=value1", "variable2=value2"},
|
||||
expected: []string{"variable1=value1", "variable2=value2"},
|
||||
mockedEnvironmentResolver: func(variable string) string {
|
||||
return "unexpected"
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "do not replace when environment variable is present and empty value is specified",
|
||||
input: []string{"variable1="},
|
||||
expected: []string{"variable1="},
|
||||
mockedEnvironmentResolver: func(variable string) string {
|
||||
return "unexpected"
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "replace with empty value when environment variable is not present or empty and value is not specified",
|
||||
input: []string{"variable1", "variable2=value2"},
|
||||
expected: []string{"variable1=", "variable2=value2"},
|
||||
mockedEnvironmentResolver: func(variable string) string {
|
||||
return ""
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.description, func(t *testing.T) {
|
||||
resolveEnvironmentBuildArgs(tt.input, tt.mockedEnvironmentResolver)
|
||||
testutil.CheckDeepEqual(t, tt.expected, tt.input)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,19 +23,21 @@ import (
|
|||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/cache"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/logging"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
opts = &config.WarmerOptions{}
|
||||
logLevel string
|
||||
opts = &config.WarmerOptions{}
|
||||
logLevel string
|
||||
logFormat string
|
||||
)
|
||||
|
||||
func init() {
|
||||
RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", constants.DefaultLogLevel, "Log level (debug, info, warn, error, fatal, panic")
|
||||
RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", logging.DefaultLevel, "Log level (debug, info, warn, error, fatal, panic")
|
||||
RootCmd.PersistentFlags().StringVar(&logFormat, "log-format", logging.FormatColor, "Log format (text, color, json)")
|
||||
|
||||
addKanikoOptionsFlags()
|
||||
addHiddenFlags()
|
||||
}
|
||||
|
|
@ -43,9 +45,10 @@ func init() {
|
|||
var RootCmd = &cobra.Command{
|
||||
Use: "cache warmer",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := util.ConfigureLogging(logLevel); err != nil {
|
||||
if err := logging.Configure(logLevel, logFormat); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(opts.Images) == 0 {
|
||||
return errors.New("You must select at least one image to cache")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,12 +14,12 @@
|
|||
|
||||
# Builds the static Go image to execute in a Kubernetes job
|
||||
|
||||
FROM golang:1.12
|
||||
FROM golang:1.14
|
||||
ARG GOARCH=amd64
|
||||
WORKDIR /go/src/github.com/GoogleContainerTools/kaniko
|
||||
# Get GCR credential helper
|
||||
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.5.0/docker-credential-gcr_linux_amd64-1.5.0.tar.gz /usr/local/bin/
|
||||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-1.5.0.tar.gz
|
||||
RUN docker-credential-gcr configure-docker
|
||||
# Get Amazon ECR credential helper
|
||||
RUN go get -u github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login
|
||||
RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64
|
||||
|
|
@ -28,15 +28,14 @@ ADD https://aadacr.blob.core.windows.net/acr-docker-credential-helper/docker-cre
|
|||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-acr-linux-amd64.tar.gz
|
||||
|
||||
COPY . .
|
||||
RUN make
|
||||
RUN make GOARCH=${GOARCH}
|
||||
|
||||
FROM scratch
|
||||
COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/executor /kaniko/executor
|
||||
COPY --from=0 /usr/local/bin/docker-credential-gcr /kaniko/docker-credential-gcr
|
||||
COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /kaniko/docker-credential-ecr-login
|
||||
COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr-linux
|
||||
COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr
|
||||
COPY files/ca-certificates.crt /kaniko/ssl/certs/
|
||||
COPY --from=0 /root/.docker/config.json /kaniko/.docker/config.json
|
||||
ENV HOME /root
|
||||
ENV USER /root
|
||||
ENV PATH /usr/local/bin:/kaniko
|
||||
|
|
|
|||
|
|
@ -15,17 +15,21 @@
|
|||
# Builds the static Go image to execute in a Kubernetes job
|
||||
|
||||
# Stage 0: Build the executor binary and get credential helpers
|
||||
FROM golang:1.12
|
||||
FROM golang:1.14
|
||||
ARG GOARCH=amd64
|
||||
WORKDIR /go/src/github.com/GoogleContainerTools/kaniko
|
||||
# Get GCR credential helper
|
||||
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.5.0/docker-credential-gcr_linux_amd64-1.5.0.tar.gz /usr/local/bin/
|
||||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-1.5.0.tar.gz
|
||||
RUN docker-credential-gcr configure-docker
|
||||
# Get Amazon ECR credential helper
|
||||
RUN go get -u github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login
|
||||
RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64
|
||||
# ACR docker credential helper
|
||||
ADD https://aadacr.blob.core.windows.net/acr-docker-credential-helper/docker-credential-acr-linux-amd64.tar.gz /usr/local/bin
|
||||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-acr-linux-amd64.tar.gz
|
||||
|
||||
COPY . .
|
||||
RUN make && make out/warmer
|
||||
RUN make GOARCH=${GOARCH} && make out/warmer
|
||||
|
||||
# Stage 1: Get the busybox shell
|
||||
FROM gcr.io/cloud-builders/bazel:latest
|
||||
|
|
@ -38,11 +42,11 @@ FROM scratch
|
|||
COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/* /kaniko/
|
||||
COPY --from=0 /usr/local/bin/docker-credential-gcr /kaniko/docker-credential-gcr
|
||||
COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /kaniko/docker-credential-ecr-login
|
||||
COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr
|
||||
COPY --from=1 /distroless/bazel-bin/experimental/busybox/busybox/ /busybox/
|
||||
# Declare /busybox as a volume to get it automatically whitelisted
|
||||
VOLUME /busybox
|
||||
COPY files/ca-certificates.crt /kaniko/ssl/certs/
|
||||
COPY --from=0 /root/.docker/config.json /kaniko/.docker/config.json
|
||||
ENV HOME /root
|
||||
ENV USER /root
|
||||
ENV PATH /usr/local/bin:/kaniko:/busybox
|
||||
|
|
|
|||
|
|
@ -14,25 +14,28 @@
|
|||
|
||||
# Builds the static Go image to execute in a Kubernetes job
|
||||
|
||||
FROM golang:1.12
|
||||
FROM golang:1.14
|
||||
ARG GOARCH=amd64
|
||||
WORKDIR /go/src/github.com/GoogleContainerTools/kaniko
|
||||
# Get GCR credential helper
|
||||
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.5.0/docker-credential-gcr_linux_amd64-1.5.0.tar.gz /usr/local/bin/
|
||||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-1.5.0.tar.gz
|
||||
RUN docker-credential-gcr configure-docker
|
||||
# Get Amazon ECR credential helper
|
||||
RUN go get -u github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login
|
||||
RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64
|
||||
# ACR docker credential helper
|
||||
ADD https://aadacr.blob.core.windows.net/acr-docker-credential-helper/docker-credential-acr-linux-amd64.tar.gz /usr/local/bin
|
||||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-acr-linux-amd64.tar.gz
|
||||
|
||||
COPY . .
|
||||
RUN make out/warmer
|
||||
RUN make GOARCH=${GOARCH} out/warmer
|
||||
|
||||
FROM scratch
|
||||
COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/warmer /kaniko/warmer
|
||||
COPY --from=0 /usr/local/bin/docker-credential-gcr /kaniko/docker-credential-gcr
|
||||
COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /kaniko/docker-credential-ecr-login
|
||||
COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr
|
||||
COPY files/ca-certificates.crt /kaniko/ssl/certs/
|
||||
COPY --from=0 /root/.docker/config.json /kaniko/.docker/config.json
|
||||
ENV HOME /root
|
||||
ENV USER /root
|
||||
ENV PATH /usr/local/bin:/kaniko
|
||||
|
|
|
|||
|
|
@ -2,19 +2,21 @@ steps:
|
|||
# First, build kaniko
|
||||
- name: "gcr.io/cloud-builders/docker"
|
||||
args: ["build", "-f", "deploy/Dockerfile",
|
||||
"-t", "gcr.io/kaniko-project/executor:${COMMIT_SHA}", "."]
|
||||
"-t", "gcr.io/$PROJECT_ID/${_EXECUTOR_IMAGE_NAME}:${COMMIT_SHA}", "."]
|
||||
# Then, we want to build kaniko:debug
|
||||
- name: "gcr.io/cloud-builders/docker"
|
||||
args: ["build", "-f", "deploy/Dockerfile_debug",
|
||||
"-t", "gcr.io/kaniko-project/executor:debug-${COMMIT_SHA}", "."]
|
||||
"-t", "gcr.io/$PROJECT_ID/${_EXECUTOR_IMAGE_NAME}:debug-${COMMIT_SHA}", "."]
|
||||
- name: "gcr.io/cloud-builders/docker"
|
||||
args: ["build", "-f", "deploy/Dockerfile_debug",
|
||||
"-t", "gcr.io/kaniko-project/executor:debug", "."]
|
||||
"-t", "gcr.io/$PROJECT_ID/${_EXECUTOR_IMAGE_NAME}:debug", "."]
|
||||
# Then, we want to build the cache warmer
|
||||
- name: "gcr.io/cloud-builders/docker"
|
||||
args: ["build", "-f", "deploy/Dockerfile_warmer",
|
||||
"-t", "gcr.io/kaniko-project/warmer:${COMMIT_SHA}", "."]
|
||||
images: ["gcr.io/kaniko-project/executor:${COMMIT_SHA}",
|
||||
"gcr.io/kaniko-project/executor:debug-${COMMIT_SHA}",
|
||||
"gcr.io/kaniko-project/executor:debug",
|
||||
"gcr.io/kaniko-project/warmer:${COMMIT_SHA}"]
|
||||
"-t", "gcr.io/$PROJECT_ID/${_WARMER_IMAGE_NAME}:${COMMIT_SHA}", "."]
|
||||
images: ["gcr.io/$PROJECT_ID/${_EXECUTOR_IMAGE_NAME}:${COMMIT_SHA}",
|
||||
"gcr.io/$PROJECT_ID/${_EXECUTOR_IMAGE_NAME}:debug-${COMMIT_SHA}",
|
||||
"gcr.io/$PROJECT_ID/${_WARMER_IMAGE_NAME}:${COMMIT_SHA}"]
|
||||
substitutions:
|
||||
_EXECUTOR_IMAGE_NAME: executor
|
||||
_WARMER_IMAGE_NAME: warmer
|
||||
|
|
|
|||
|
|
@ -0,0 +1,72 @@
|
|||
# Title
|
||||
|
||||
* Author(s): \<your name\>
|
||||
* Reviewers: \<reviewer name\>
|
||||
|
||||
If you are already working with someone mention their name.
|
||||
If not, please leave this empty, some one from the core team with assign it to themselves.
|
||||
* Date: \<date\>
|
||||
* Status: [Reviewed/Cancelled/Under implementation/Complete]
|
||||
|
||||
Here is a brief explanation of the Statuses
|
||||
|
||||
1. Reviewed: The proposal PR has been accepted, merged and ready for
|
||||
implementation.
|
||||
2. Under implementation: An accepted proposal is being implemented by actual work.
|
||||
Note: The design might change in this phase based on issues during
|
||||
implementation.
|
||||
3. Cancelled: During or before implementation the proposal was cancelled.
|
||||
It could be due to:
|
||||
* other features added which made the current design proposal obsolete.
|
||||
* No longer a priority.
|
||||
4. Complete: This feature/change is implemented.
|
||||
|
||||
## Background
|
||||
|
||||
In this section, please mention and describe the new feature, redesign
|
||||
or refactor.
|
||||
|
||||
Please provide a brief explanation for the following questions:
|
||||
|
||||
1. Why is this required?
|
||||
2. If this is a redesign, what are the drawbacks of the current implementation?
|
||||
3. Is there any another workaround, and if so, what are its drawbacks?
|
||||
4. Mention related issues, if there are any.
|
||||
|
||||
Here is an example snippet for an enhancement:
|
||||
|
||||
___
|
||||
Currently, Kaniko includes `build-args` when calculating layer cache key even if they are not used
|
||||
in the corresponding dockerfile command.
|
||||
|
||||
This causes a 100% cache miss rate even if the layer contents are same.
|
||||
Change layer caching to include `build-args` in cache key computation only if they are used in command.
|
||||
___
|
||||
|
||||
## Design
|
||||
|
||||
Please describe your solution. Please list any:
|
||||
|
||||
* new command line flags
|
||||
* interface changes
|
||||
* design assumptions
|
||||
|
||||
### Open Issues/Questions
|
||||
|
||||
Please list any open questions here in the following format:
|
||||
|
||||
**\<Question\>**
|
||||
|
||||
Resolution: Please list the resolution if resolved during the design process or
|
||||
specify __Not Yet Resolved__
|
||||
|
||||
## Implementation plan
|
||||
As a team, we've noticed that larger PRs can go unreviewed for long periods of
|
||||
time. Small incremental changes get reviewed faster and are also easier for
|
||||
reviewers.
|
||||
___
|
||||
|
||||
|
||||
## Integration test plan
|
||||
|
||||
Please describe what new test cases you are going to consider.
|
||||
|
|
@ -0,0 +1,149 @@
|
|||
# Filesystem Resolution 01
|
||||
|
||||
* Author(s): cgwippern@google.com
|
||||
* Reviewers: Tejal Desai
|
||||
* Date: 2020-02-12
|
||||
* Status: Reviewed
|
||||
|
||||
## Background
|
||||
|
||||
Kaniko builds Docker image layers as overlay filesystem layers; specifically it
|
||||
creates a tar file which contains the entire content of a given layer in the
|
||||
overlay filesystem. Each overlay layer corresponds to one image layer.
|
||||
|
||||
Overlay filesystems should only contain the objects changed in each layer;
|
||||
meaning that if only one file changes between some layer A and some B, layer B
|
||||
would only contain a single file (the one that changed).
|
||||
|
||||
To accomplish this, Kaniko walks the entire filesystem to discover every object.
|
||||
Some of these objects may actually be a symlink to another object in the
|
||||
filesystem; in these cases we must consider both the link and the target object.
|
||||
|
||||
Kaniko also maintains a set of whitelisted (aka ignored) filepaths. Any object
|
||||
which matches one of these filepaths should be ignored by kaniko.
|
||||
|
||||
This results in a 3 dimensional search space
|
||||
|
||||
* changed relative to previous layer
|
||||
* symlink
|
||||
* whitelisted
|
||||
|
||||
Kaniko must also track which objects are referred to by multiple stages; this
|
||||
functionality is out of scope for this proposal.
|
||||
|
||||
This search space is currently managed in an inconsistent and somewhat ad-hoc
|
||||
way; code that manages the various search dimensions is spread out and
|
||||
duplicated. There are also a number of edge cases which continue
|
||||
to cause bugs.
|
||||
|
||||
The search space dimensions cannot be reduced or substituted.
|
||||
|
||||
Currently there are a number of bugs around symlinks incorrectly resolved,
|
||||
whitelists not respected, and unchanged files added to layers.
|
||||
|
||||
## Design
|
||||
|
||||
During snapshotting, filepaths should be resolved using a consitent API which
|
||||
takes into account both symlinks and whitelist.
|
||||
|
||||
* Callers of this API should not be concerned with the type of object at a given filepath (e.g. symlink or not).
|
||||
* Callers of this API should not be concerned with whether a given path is whitelisted.
|
||||
* This API should return a set of filepaths which can be checked for changes
|
||||
without further link resolution or whitelist checking.
|
||||
|
||||
The API should take a limited set of arguments
|
||||
* A list of absolute filepaths to scan
|
||||
* The whitelist
|
||||
|
||||
The API should return only two arguments
|
||||
* A set of filepaths
|
||||
* error or nil
|
||||
|
||||
The signature of the API should look similar to
|
||||
```
|
||||
ResolveFilePaths(inputPaths []string, whitelist []WhitelistEntry) (resolvedPaths []string, err error)
|
||||
```
|
||||
|
||||
The API will iterate over the set of filepaths and for each item
|
||||
* check whether it is whitelisted; if it is, skip it
|
||||
* check whether it is a symlink
|
||||
* if it is a symlink
|
||||
* resolve the link ancestor (nearest ancestor which is a symlink) and the
|
||||
target
|
||||
* add the link ancestor to the output
|
||||
* check whether the target is whitelisted and if
|
||||
not add the target to the output
|
||||
|
||||
All ancestors of each filepath will also be added to the list, but the previous
|
||||
checks will not be applied to the ancestors. This maintains the current behavior
|
||||
which we believe is needed to maintain correct permissions on the ancestor
|
||||
directories.
|
||||
|
||||
### Open Issues/Questions
|
||||
|
||||
\<Ignore symlinks targeting whitelisted paths?\>
|
||||
|
||||
Given some link `/foo/link/bar` whose target is a whitelisted path such as
|
||||
`/var/run`, should `/foo/link/bar` be added to the layer?
|
||||
|
||||
Resolution: Resolved
|
||||
|
||||
Yes, it should be added.
|
||||
|
||||
\<Adding ancestor directories\>
|
||||
|
||||
According to [this comment](https://github.com/GoogleContainerTools/kaniko/blob/1e9f525509d4e6a066a6e07ab9afbef69b3a3b2c/pkg/snapshot/snapshot.go#L193)
|
||||
the ancestor directories (parent, grandparent, etc) must also be added to the
|
||||
layer to preserve the permissions on those directories. This brings into
|
||||
question whether any filtering needs to happen on these ancestors. IIUC the
|
||||
current whitelist logic it is possible for `/some/dir` to be whitelisted but not
|
||||
`/some/dir/containing-a-file.txt`. If filtering needs to be applied to these
|
||||
ancestors does it make most sense to handle this within the proposed filtering
|
||||
API?
|
||||
|
||||
Resolution: Resolved
|
||||
|
||||
Yes, this should be handled in the API
|
||||
|
||||
\<Should the API handle diff'ing files?\>
|
||||
|
||||
The proposal currently states that the list of files returned from the API
|
||||
should be immediately added to the layer, but this would imply that diff'ing
|
||||
existing files, finding newly created files, and handling deleted files would
|
||||
have already been done. It may be advantageous to handle these outside of the
|
||||
API in order to reduce scope and complexity. If these are handled outside of the
|
||||
API how can we decouple and encapsulate these two functions?
|
||||
|
||||
Resolution: Resolved
|
||||
|
||||
The API will not handle file diffing or whiteouts.
|
||||
|
||||
## Implementation plan
|
||||
|
||||
* Write the new API
|
||||
* Write tests for the new API
|
||||
* Integrate the new API into existing code
|
||||
|
||||
## Integration test plan
|
||||
|
||||
Add integration tests to the existing suite which cover the known bugs
|
||||
|
||||
## Notes
|
||||
|
||||
Given some path `/usr/lib/foo` which is a link to `/etc/foo/`
|
||||
|
||||
And `/etc/foo` contains `/etc/foo/bar.txt`
|
||||
|
||||
Adding a link `/usr/lib/foo/bar.txt` => `/etc/foo/bar.txt` will break the image
|
||||
|
||||
In a linux shell this raises an error
|
||||
```
|
||||
$ ls /usr/lib/bar
|
||||
=> /usr/lib/bar/foo.txt
|
||||
$ ln -s /usr/lib/bar barlink
|
||||
$ ln -s /usr/lib/bar/foo.txt barlink/foo.txt
|
||||
=> ERROR
|
||||
```
|
||||
|
||||
Given some path `/usr/foo/bar` which is a link to `/dev/null`, and `/dev` is
|
||||
whitelisted `/dev/null` should not be added to the image.
|
||||
|
|
@ -11,7 +11,7 @@ spec:
|
|||
"--destination=<user-name>/<repo>"] # replace with your dockerhub account
|
||||
volumeMounts:
|
||||
- name: kaniko-secret
|
||||
mountPath: /root
|
||||
mountPath: /kaniko/.docker
|
||||
- name: dockerfile-storage
|
||||
mountPath: /workspace
|
||||
restartPolicy: Never
|
||||
|
|
@ -21,7 +21,7 @@ spec:
|
|||
secretName: regcred
|
||||
items:
|
||||
- key: .dockerconfigjson
|
||||
path: .docker/config.json
|
||||
path: config.json
|
||||
- name: dockerfile-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: dockerfile-claim
|
||||
|
|
|
|||
98
go.mod
98
go.mod
|
|
@ -1,114 +1,62 @@
|
|||
module github.com/GoogleContainerTools/kaniko
|
||||
|
||||
go 1.13
|
||||
go 1.14
|
||||
|
||||
replace (
|
||||
github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.3+incompatible
|
||||
github.com/containerd/containerd v1.4.0-0.20191014053712-acdcf13d5eaf => github.com/containerd/containerd v0.0.0-20191014053712-acdcf13d5eaf
|
||||
github.com/docker/docker v1.14.0-0.20190319215453-e7b5f7dbe98c => github.com/docker/docker v0.0.0-20190319215453-e7b5f7dbe98c
|
||||
github.com/tonistiigi/fsutil v0.0.0-20190819224149-3d2716dd0a4d => github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.25.0
|
||||
cloud.google.com/go v0.38.0
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v19.1.0+incompatible // indirect
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/Azure/go-autorest v10.15.0+incompatible // indirect
|
||||
github.com/Microsoft/go-winio v0.4.9 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
|
||||
github.com/aws/aws-sdk-go v1.25.19
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect
|
||||
github.com/boltdb/bolt v1.3.1 // indirect
|
||||
github.com/containerd/containerd v1.1.2 // indirect
|
||||
github.com/containerd/continuity v0.0.0-20180712174259-0377f7d76720 // indirect
|
||||
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260 // indirect
|
||||
github.com/coreos/etcd v3.3.9+incompatible // indirect
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
|
||||
github.com/docker/distribution v2.6.0-rc.1.0.20180720172123-0dae0957e5fe+incompatible // indirect
|
||||
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad // indirect
|
||||
github.com/aws/aws-sdk-go v1.27.1
|
||||
github.com/docker/docker v1.14.0-0.20190319215453-e7b5f7dbe98c
|
||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 // indirect
|
||||
github.com/docker/go-units v0.3.3 // indirect
|
||||
github.com/docker/swarmkit v1.12.1-0.20180726190244-7567d47988d8 // indirect
|
||||
github.com/emirpasic/gods v1.9.0 // indirect
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.7 // indirect
|
||||
github.com/genuinetools/amicontained v0.4.3
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/gliderlabs/ssh v0.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.1.1 // indirect
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
|
||||
github.com/golang/protobuf v1.1.0 // indirect
|
||||
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a // indirect
|
||||
github.com/google/go-cmp v0.2.0
|
||||
github.com/google/go-containerregistry v0.0.0-20190820205713-31e00cede111
|
||||
github.com/golang/mock v1.3.1
|
||||
github.com/google/go-cmp v0.3.0
|
||||
github.com/google/go-containerregistry v0.0.0-20200313165449-955bf358a3d8
|
||||
github.com/google/go-github v17.0.0+incompatible
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect
|
||||
github.com/google/martian v2.1.0+incompatible // indirect
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible // indirect
|
||||
github.com/googleapis/gnostic v0.2.0 // indirect
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa // indirect
|
||||
github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.1 // indirect
|
||||
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be // indirect
|
||||
github.com/karrick/godirwalk v1.7.7
|
||||
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e // indirect
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect
|
||||
github.com/mattn/go-shellwords v1.0.3 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/minio/highwayhash v1.0.0
|
||||
github.com/mitchellh/go-homedir v1.0.0 // indirect
|
||||
github.com/moby/buildkit v0.0.0-20180731175856-e57eed420c75
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/opencontainers/runc v1.0.0-rc5 // indirect
|
||||
github.com/moby/buildkit v0.0.0-20191111154543-00bfbab0390c
|
||||
github.com/opencontainers/runtime-spec v1.0.1 // indirect
|
||||
github.com/opencontainers/selinux v1.0.0-rc1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.0.2 // indirect
|
||||
github.com/otiai10/copy v1.0.2
|
||||
github.com/pborman/uuid v1.2.0 // indirect
|
||||
github.com/pelletier/go-buffruneio v0.2.0 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/prometheus/client_golang v0.9.0-pre1.0.20180210140205-a40133b69fbd // indirect
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect
|
||||
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 // indirect
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sergi/go-diff v1.0.0 // indirect
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/spf13/afero v1.2.1
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.1
|
||||
github.com/spf13/afero v1.2.2
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/src-d/gcfg v1.3.0 // indirect
|
||||
github.com/stretchr/testify v1.4.0 // indirect
|
||||
github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20180725061210-b19464cd1b6a // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1 // indirect
|
||||
github.com/vbatts/tar-split v0.10.2 // indirect
|
||||
github.com/xanzy/ssh-agent v0.2.0 // indirect
|
||||
go.opencensus.io v0.14.0 // indirect
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a
|
||||
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect
|
||||
google.golang.org/api v0.0.0-20180730000901-31ca0e01cd79 // indirect
|
||||
google.golang.org/appengine v1.1.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20180731170733-daca94659cb5 // indirect
|
||||
google.golang.org/grpc v1.2.1-0.20180320012744-8124abf74e76 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
gopkg.in/src-d/go-billy.v4 v4.2.0 // indirect
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 // indirect
|
||||
gopkg.in/src-d/go-git.v4 v4.6.0
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
k8s.io/api v0.0.0-20180711052118-183f3326a935 // indirect
|
||||
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d // indirect
|
||||
k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a // indirect
|
||||
k8s.io/kubernetes v1.11.1 // indirect
|
||||
)
|
||||
|
|
|
|||
565
go.sum
565
go.sum
|
|
@ -1,64 +1,149 @@
|
|||
cloud.google.com/go v0.25.0 h1:6vD6xZTc8Jo6To8gHxFDRVsMvWFDgY3rugNszcDalN8=
|
||||
cloud.google.com/go v0.25.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
|
||||
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
|
||||
github.com/Azure/azure-sdk-for-go v19.1.0+incompatible h1:ysqLW+tqZjJWOTE74heH/pDRbr4vlN3yV+dqQYgpyxw=
|
||||
github.com/Azure/azure-sdk-for-go v19.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v38.0.0+incompatible h1:3D2O4g8AwDwyWkM1HpMFVux/ccQJmGJHXsE004Wsu1Q=
|
||||
github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=
|
||||
github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest v10.15.0+incompatible h1:GqDO/9r+7tmkU8HI/DNLVkeucncU8jCul1DLeTaA3GI=
|
||||
github.com/Azure/go-autorest v10.15.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ=
|
||||
github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4=
|
||||
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.1 h1:pZdL8o72rK+avFWl+p9nE8RWi1JInZrWJYlnpfXJwHk=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc=
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.1.0 h1:ISSNzGUh+ZSzizJWOWzs8bwpXIePbGLW4z/AmUFGH5A=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
|
||||
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/hcsshim v0.8.5/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/aws/aws-sdk-go v1.25.19 h1:sp3xP91qIAVhWufyn9qM6Zhhn6kX06WJQcmhRj7QTXc=
|
||||
github.com/aws/aws-sdk-go v1.25.19/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.27.1 h1:MXnqY6SlWySaZAqNnXThOvjRFdiiOuKtC6i7baFdNdU=
|
||||
github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/containerd/containerd v1.1.2 h1:gojrlFOL/4A5zP4BPgK3YrCaVIgAIHZfqGco/+mfJOs=
|
||||
github.com/containerd/containerd v1.1.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20180712174259-0377f7d76720 h1:T5LkgEMACPq7+VPRnkh51WhyV+Q0waOGGtp37Y82org=
|
||||
github.com/containerd/continuity v0.0.0-20180712174259-0377f7d76720/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260 h1:XGyg7oTtD0DoRFhbpV6x1WfV0flKC4UxXU7ab1zC08U=
|
||||
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/coreos/etcd v3.3.9+incompatible h1:iKSVPXGNGqroBx4+RmUXv8emeU7y+ucRZSzTYgzLZwM=
|
||||
github.com/coreos/etcd v3.3.9+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
|
||||
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/containerd v0.0.0-20191014053712-acdcf13d5eaf h1:juYsQtzJOxIaX5TW/3IU7qlvYU/nALKULBOEdiydFd0=
|
||||
github.com/containerd/containerd v0.0.0-20191014053712-acdcf13d5eaf/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.2.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20181001140422-bd77b46c8352/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw=
|
||||
github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c h1:KFbqHhDeaHM7IfFtXHfUHMDaUStpM2YwBR+iJCIOsKk=
|
||||
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/go-cni v0.0.0-20190813230227-49fbd9b210f3/go.mod h1:2wlRxCQdiBY+OcjNg5x8kI+5mEL1fGt25L4IzQHYJsM=
|
||||
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/docker/distribution v2.6.0-rc.1.0.20180720172123-0dae0957e5fe+incompatible h1:8hsJ081BmnqE+fkFjgasY9S8+otMhlWxHcKSYcKzrpk=
|
||||
github.com/docker/distribution v2.6.0-rc.1.0.20180720172123-0dae0957e5fe+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197 h1:7X3lPJrEEhoUt1UnISqyUB4phKf9aAKVMdFXD63DJO8=
|
||||
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/cli v0.0.0-20190321234815-f40f9c240ab0/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017 h1:2HQmlpI3yI9deH18Q6xiSOIjXD4sLI55Y/gfpa8/558=
|
||||
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.0.0-20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.0.0-20190319215453-e7b5f7dbe98c h1:1Pev8v0EhB6Fbu9FHCLzZD74gJdJk+QVmlbezI6OToM=
|
||||
github.com/docker/docker v0.0.0-20190319215453-e7b5f7dbe98c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad h1:VXIse57M5C6ezDuCPyq6QmMvEJ2xclYKZ35SfkXdm3E=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 h1:yWHOI+vFjEsAakUTSrtqc/SAHrhSkmn48pqjidZX3QA=
|
||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.3.1/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20190604151032-3c26b4e7495e/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docker/swarmkit v1.12.1-0.20180726190244-7567d47988d8 h1:ASmFyV8Sc6XrH1ng0TBPpYLspA8b7qRT84IbrQY1jSY=
|
||||
github.com/docker/swarmkit v1.12.1-0.20180726190244-7567d47988d8/go.mod h1:n3Z4lIEl7g261ptkGDBcYi/3qBMDl9csaAhwi2MPejs=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo=
|
||||
github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
|
|
@ -70,226 +155,526 @@ github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
|||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
|
||||
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
|
||||
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a h1:ZJu5NB1Bk5ms4vw0Xu4i+jD32SE9jQXyfnOvwhHqlT0=
|
||||
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-containerregistry v0.0.0-20190820205713-31e00cede111 h1:5F39eE4QsUnAd6iGzt1/zBs3dhX877U2hJyOJHFmQF0=
|
||||
github.com/google/go-containerregistry v0.0.0-20190820205713-31e00cede111/go.mod h1:yZAFP63pRshzrEYLXLGPmUt0Ay+2zdjmMN1loCnRLUk=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-containerregistry v0.0.0-20200313165449-955bf358a3d8 h1:S7U1nPK3fi2xjZkMrQKcRayVtMmqMFJs9UtXQW3GPzM=
|
||||
github.com/google/go-containerregistry v0.0.0-20200313165449-955bf358a3d8/go.mod h1:pD1UFYs7MCAx+ZLShBdttcaOSbyc8F9Na/9IZLNwJeA=
|
||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
|
||||
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
|
||||
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.2.2 h1:DcFegQ7+ECdmkJMfVwWlC+89I4esJ7p8nkGt9ainGDk=
|
||||
github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
||||
github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa h1:0nA8i+6Rwqaq9xlpmVxxTwk6rxiEhX+E6Wh4vPNHiS8=
|
||||
github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa/go.mod h1:6ij3Z20p+OhOkCSrA0gImAWoHYQRGbnlcuk6XYTiaRw=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 h1:yxxFgVz31vFoKKTtRUNbXLNe4GFnbLKqg+0N7yG42L8=
|
||||
github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71/go.mod h1:kbfItVoBJwCfKXDXN4YoAXjxcFVZ7MRrJzyTX6H4giE=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw=
|
||||
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
|
||||
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c/go.mod h1:fHzc09UnyJyqyW+bFuq864eh+wC7dj65aXmXLRe5to0=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/ishidawataru/sctp v0.0.0-20180213033435-07191f837fed/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8=
|
||||
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea/go.mod h1:QMdK4dGB3YhEW2BmA1wgGpPYI3HZy/5gD705PXKUVSg=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE=
|
||||
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/karrick/godirwalk v1.7.7 h1:lLkPCA+C0u1pI4fLFseaupvh5/THlPJIqSPmnGGViKs=
|
||||
github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34=
|
||||
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8=
|
||||
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=
|
||||
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-shellwords v1.0.3 h1:K/VxK7SZ+cvuPgFSLKi5QPI9Vr/ipOf4C1gN+ntueUk=
|
||||
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE=
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
|
||||
github.com/minio/highwayhash v1.0.0 h1:iMSDhgUILCr0TNm8LWlSjF8N0ZIj2qbO8WHp6Q/J2BA=
|
||||
github.com/minio/highwayhash v1.0.0/go.mod h1:xQboMTeM9nY9v/LlAOxFctujiv5+Aq2hR5dxBpaMbdc=
|
||||
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/moby/buildkit v0.0.0-20180731175856-e57eed420c75 h1:xlqeydzXCHEsRtsDyT0rvOBF7t3iTxNSG8gTQk2rfms=
|
||||
github.com/moby/buildkit v0.0.0-20180731175856-e57eed420c75/go.mod h1:nnELdKPRkUAQR6pAB3mRU3+IlbqL3SSaAWqQL8k/K+4=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/moby/buildkit v0.0.0-20191111154543-00bfbab0390c h1:QsHXQZ/5EQsj9IIXVVmTWaeEM0VAhZIrEMJ7XKB1Qu0=
|
||||
github.com/moby/buildkit v0.0.0-20191111154543-00bfbab0390c/go.mod h1:UjmFBX/gZo+/+B1YEiH9Or4s7wBgwvOHhidh3rOtCXw=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c h1:Hww8mOyEKTeON4bZn7FrlLismspbPc1teNRUVH7wLQ8=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c h1:eSfnfIuwhxZyULg1NNuZycJcYkjYVGYe7FczwQReM6U=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v1.0.0-rc5 h1:rYjdzMDXVly2Av0RLs3nf/iVkaWh2UrDhuTdTT2KggQ=
|
||||
github.com/opencontainers/runc v1.0.0-rc5/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc6/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc8.0.20190621203724-f4982d86f7fd h1:w9DJ/JL7fK4VjMoGo4e9gsq2xRhZThNI4PFuAwN8dJ0=
|
||||
github.com/opencontainers/runc v1.0.0-rc8.0.20190621203724-f4982d86f7fd/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v0.0.0-20180909173843-eba862dc2470/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=
|
||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.0.0-rc1 h1:Q70KvmpJSrYzryl/d0tC3vWUiTn23cSdStKodlokEPs=
|
||||
github.com/opencontainers/selinux v1.0.0-rc1/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
|
||||
github.com/opentracing-contrib/go-stdlib v0.0.0-20171029140428-b1a47cfbdd75/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w=
|
||||
github.com/opentracing/opentracing-go v0.0.0-20171003133519-1361b9cd60be/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/otiai10/copy v0.0.0-20180813032824-7e9a647135a1 h1:A7kMXwDPBTfIVRv2l6XV3U6Su3SzLUzZjxnDDQVZDIY=
|
||||
github.com/otiai10/copy v0.0.0-20180813032824-7e9a647135a1/go.mod h1:pXzZSDlN+HPzSdyIBnKNN9ptD9Hx7iZMWIJPTwo4FPE=
|
||||
github.com/otiai10/copy v1.0.2 h1:DDNipYy6RkIkjMwy+AWzgKiNTyj2RUI9yEMeETEpVyc=
|
||||
github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY=
|
||||
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95 h1:+OLn68pqasWca0z5ryit9KGfp3sUsW4Lqg32iRMJyzs=
|
||||
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
|
||||
github.com/otiai10/mint v1.3.0 h1:Ady6MKVezQwHBkGzLFbrsywyp09Ah7rkmfjV3Bcr5uc=
|
||||
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
|
||||
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
|
||||
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
|
||||
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.0-pre1.0.20180210140205-a40133b69fbd h1:R18uzd3mmBHD1ZrNQl83EcOdkKZ5byMwbsKuTxbNeIo=
|
||||
github.com/prometheus/client_golang v0.9.0-pre1.0.20180210140205-a40133b69fbd/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 h1:osmNoEW2SCW3L7EX0km2LYM8HKpNWRiouxjE3XHkyGc=
|
||||
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc=
|
||||
github.com/sirupsen/logrus v1.0.3/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M=
|
||||
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
|
||||
github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e h1:QjF5rxNgRSLHJDwKUvfYP3qOx1vTDzUi/+oSC8FXnCI=
|
||||
github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20180725061210-b19464cd1b6a h1:qd/ItJwqOuirA+l39OBtUOLokgzKyqKsHMTdzHpoWFk=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20180725061210-b19464cd1b6a/go.mod h1:eden9dLzAAuNQ0L7whFr6/Mzgz6btsvQpUnxOOI+CCE=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1 h1:WRlNtJ2whFMKo95/e6uaNuAnn5TxLcMzczqMcfbIDxo=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1/go.mod h1:hP47OZfgT1aNVDJj28EnEKaKg6mjPEoS5Tb4BsWCTPs=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
|
||||
github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v1.2.1/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/vbatts/tar-split v0.10.2 h1:CXd7HEKGkTLjBMinpObcJZU5Hm8EKlor2a1JtX6msXQ=
|
||||
github.com/vbatts/tar-split v0.10.2/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
||||
github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4 h1:czKEIG2Q3YRTgs6x/8xhjVMJD5byPo6cZuostkbTM74=
|
||||
github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4/go.mod h1:inCTmtUdr5KJbreVojo06krnTgaeAz/Z7lynpPk/Q2c=
|
||||
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
|
||||
github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
|
||||
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
|
||||
go.opencensus.io v0.14.0 h1:1eTLxqxSIAylcKoxnNkdhvvBNZDA8JwkKNXxgyma0IA=
|
||||
go.opencensus.io v0.14.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc h1:3ElrZeO6IBP+M8kgu5YFwRo92Gqr+zBg3aooYQ6ziqU=
|
||||
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564 h1:o6ENHFwwr1TZ9CUPQcfo1HGvLP1OPsPOTB7xCIOPNmU=
|
||||
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/api v0.0.0-20180730000901-31ca0e01cd79 h1:wCy2/9bhO1JeP2zZUALrj7ZdZuZoR4mRV57kTxjqRpo=
|
||||
google.golang.org/api v0.0.0-20180730000901-31ca0e01cd79/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262 h1:qsl9y/CJx34tuA7QCPNp86JNJe4spst6Ff8MjvPUdPg=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200210192313-1ace956b0e17 h1:a/Fd23DJvg1CaeDH0dYHahE+hCI0v9rFgxSNIThoUcM=
|
||||
golang.org/x/tools v0.0.0-20200210192313-1ace956b0e17/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.6.1-0.20190607001116-5213b8090861 h1:ppLucX0K/60T3t6LPZQzTOkt5PytkEbQLIaSteq+TpE=
|
||||
google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
|
||||
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180731170733-daca94659cb5 h1:2PjFmwzH/sxgW9CRJDlEiwMHO8rOk1eMDzVL14HC1e4=
|
||||
google.golang.org/genproto v0.0.0-20180731170733-daca94659cb5/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.2.1-0.20180320012744-8124abf74e76 h1:vTe0EiECbrLSj5+6SXMi+eWwYW/bjmd26LE5lv52YVs=
|
||||
google.golang.org/grpc v1.2.1-0.20180320012744-8124abf74e76/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
|
||||
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/src-d/go-billy.v4 v4.2.0 h1:VGbrP1EsYxtvVPEiHui+4//imr4E5MGEFLx66bQtusg=
|
||||
gopkg.in/src-d/go-billy.v4 v4.2.0/go.mod h1:ZHSF0JP+7oD97194otDUCD7Ofbk63+xFcfWP5bT6h+Q=
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
|
||||
gopkg.in/src-d/go-git.v4 v4.6.0 h1:3XrA9Qxiwfj7Iusd7dVYUqxMjJYPsLuBdUeQbwnL/NQ=
|
||||
gopkg.in/src-d/go-git.v4 v4.6.0/go.mod h1:CzbUWqMn4pvmvndg3gnh5iZFmSsbhyhUWdI0IQ60AQo=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
k8s.io/api v0.0.0-20180711052118-183f3326a935 h1:K2hsi4kfw1BvCEKX5J1A51pQtXjico7lOEmSiIHQ3/E=
|
||||
k8s.io/api v0.0.0-20180711052118-183f3326a935/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
|
||||
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d h1:MZjlsu9igBoVPZkXpIGoxI6EonqNsXXZU7hhvfQLkd4=
|
||||
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
|
||||
k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137 h1:4DIWGqvAjLME47asVwjb14H+6bDRu+4h43Ssw6tMAHc=
|
||||
k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.17.4 h1:HbwOhDapkguO8lTAE8OX3hdF2qp8GtpC9CW/MQATXXo=
|
||||
k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA=
|
||||
k8s.io/apimachinery v0.17.4 h1:UzM+38cPUJnzqSQ+E1PY4YxMHIzQyCg29LOoGfo79Zw=
|
||||
k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g=
|
||||
k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I=
|
||||
k8s.io/client-go v0.17.4 h1:VVdVbpTY70jiNHS1eiFkUt7ZIJX3txd29nDxxXH4en8=
|
||||
k8s.io/client-go v0.17.4/go.mod h1:ouF6o5pz3is8qU0/qYL2RnoxOPqgfuidYLowytyLJmc=
|
||||
k8s.io/cloud-provider v0.17.4/go.mod h1:XEjKDzfD+b9MTLXQFlDGkk6Ho8SGMpaU8Uugx/KNK9U=
|
||||
k8s.io/code-generator v0.17.2 h1:pTwl3rLB1fUyxmvEzmVPMM0tBSdUehd7z+bDzpj4lPE=
|
||||
k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
|
||||
k8s.io/component-base v0.17.4 h1:H9cdWZyiGVJfWmWIcHd66IsNBWTk1iEgU7D4kJksEnw=
|
||||
k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE=
|
||||
k8s.io/csi-translation-lib v0.17.4/go.mod h1:CsxmjwxEI0tTNMzffIAcgR9lX4wOh6AKHdxQrT7L0oo=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/kubernetes v1.11.1 h1:wHOPX+teuYaSlUWfL/b24jMH0n7HECbj4Xt8i7kSZIw=
|
||||
k8s.io/kubernetes v1.11.1/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/legacy-cloud-providers v0.17.4 h1:VvFqJGiYAr2gIdoNuqbeZLEdxIFeN4Yt6OLJS9l2oIE=
|
||||
k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||
sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|||
|
||||
if ! [ -x "$(command -v golangci-lint)" ]; then
|
||||
echo "Installing GolangCI-Lint"
|
||||
${DIR}/install_golint.sh -b $GOPATH/bin v1.21.0
|
||||
${DIR}/install_golint.sh -b $GOPATH/bin v1.23.7
|
||||
fi
|
||||
|
||||
golangci-lint run
|
||||
|
|
|
|||
|
|
@ -1,40 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2018 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -ex
|
||||
|
||||
GCS_BUCKET="${GCS_BUCKET:-gs://kaniko-test-bucket}"
|
||||
IMAGE_REPO="${IMAGE_REPO:-gcr.io/kaniko-test}"
|
||||
|
||||
docker version
|
||||
|
||||
# Sets up a kokoro (Google internal integration testing tool) environment
|
||||
if [ -f "$KOKORO_GFILE_DIR"/common.sh ]; then
|
||||
echo "Installing dependencies..."
|
||||
source "$KOKORO_GFILE_DIR/common.sh"
|
||||
mkdir -p /usr/local/go/src/github.com/GoogleContainerTools/
|
||||
cp -r github/kaniko /usr/local/go/src/github.com/GoogleContainerTools/
|
||||
pushd /usr/local/go/src/github.com/GoogleContainerTools/kaniko
|
||||
echo "Installing container-diff..."
|
||||
mv $KOKORO_GFILE_DIR/container-diff-linux-amd64 $KOKORO_GFILE_DIR/container-diff
|
||||
chmod +x $KOKORO_GFILE_DIR/container-diff
|
||||
export PATH=$PATH:$KOKORO_GFILE_DIR
|
||||
cp $KOKORO_ROOT/src/keystore/72508_gcr_application_creds $HOME/.config/gcloud/application_default_credentials.json
|
||||
fi
|
||||
|
||||
echo "Running integration tests..."
|
||||
make out/executor
|
||||
make out/warmer
|
||||
go test ./integration/... -v --bucket "${GCS_BUCKET}" --repo "${IMAGE_REPO}" --timeout 30m "$@"
|
||||
|
|
@ -0,0 +1 @@
|
|||
scripts/integration-test.sh
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integration
|
||||
|
||||
import "strings"
|
||||
|
||||
type integrationTestConfig struct {
|
||||
gcsBucket string
|
||||
imageRepo string
|
||||
onbuildBaseImage string
|
||||
hardlinkBaseImage string
|
||||
serviceAccount string
|
||||
dockerMajorVersion int
|
||||
}
|
||||
|
||||
const gcrRepoPrefix string = "gcr.io/"
|
||||
|
||||
func (config *integrationTestConfig) isGcrRepository() bool {
|
||||
return strings.HasPrefix(config.imageRepo, gcrRepoPrefix)
|
||||
}
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
# Copyright 2020 Google, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM atomist/sdm-base:0.4.1
|
||||
|
||||
COPY package.json package-lock.json ./
|
||||
RUN npm ci \
|
||||
&& npm cache clean --force
|
||||
|
||||
COPY . ./
|
||||
|
||||
USER atomist:atomist
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"name": "foo",
|
||||
"version": "2.0.0",
|
||||
"lockfileVersion": 1,
|
||||
"requires": true,
|
||||
"dependencies": {}
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
{
|
||||
"name": "foo",
|
||||
"version": "2.0.0",
|
||||
"description": "Don't forget to sanitize your inputs",
|
||||
"author": "Little Bobby Tables",
|
||||
"private": false,
|
||||
"devDependencies": {},
|
||||
"scripts": {},
|
||||
"license": "MIT",
|
||||
"dependencies": {}
|
||||
}
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
# Copyright 2020 Google, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM busybox
|
||||
|
||||
RUN addgroup -g 1001 -S appgroup \
|
||||
&& adduser -u 1005 -S al -G appgroup \
|
||||
&& adduser -u 1004 -S bob -G appgroup
|
||||
|
||||
# Add local file with and without chown
|
||||
ADD --chown=1005:appgroup a.txt /a.txt
|
||||
ADD b.txt /b.txt
|
||||
|
||||
# Add remote file with and without chown
|
||||
ADD --chown=bob:1001 https://raw.githubusercontent.com/GoogleContainerTools/kaniko/master/README.md /r1.txt
|
||||
ADD https://raw.githubusercontent.com/GoogleContainerTools/kaniko/master/README.md /r2.txt
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
# Copyright 2020 Google, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine
|
||||
|
||||
ADD test.txt /tmp/
|
||||
|
|
@ -0,0 +1 @@
|
|||
meow
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
# Copyright 2020 Google, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM phusion/baseimage:0.11
|
||||
ADD test-file /etc/service/file
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
FROM marketplace.gcr.io/google/ubuntu1804@sha256:4649ae6b381090fba6db38137eb05e03f44bf43c40149f734241c9f96aa0e001
|
||||
FROM ubuntu:18.04
|
||||
ENV dir /tmp/dir/
|
||||
ONBUILD RUN echo "onbuild" > /tmp/onbuild
|
||||
ONBUILD RUN mkdir $dir
|
||||
|
|
|
|||
|
|
@ -0,0 +1,20 @@
|
|||
# Copyright 2018 Google, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Test to make sure the executor builds an image correctly
|
||||
# when no files are changed
|
||||
|
||||
FROM library/debian:latest
|
||||
RUN echo "hey"
|
||||
MAINTAINER kaniko
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
FROM alpine@sha256:5ce5f501c457015c4b91f91a15ac69157d9b06f1a75cf9107bf2b62e0843983a
|
||||
COPY foo foo
|
||||
COPY foo /foodir/
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
# First, try adding some regular files
|
||||
ADD context/foo foo
|
||||
ADD context/foo /foodir/
|
||||
|
|
|
|||
|
|
@ -0,0 +1,10 @@
|
|||
FROM debian:9.11
|
||||
|
||||
ARG SSH_PRIVATE_KEY
|
||||
ARG SSH_PUBLIC_KEY
|
||||
|
||||
RUN mkdir .ssh && \
|
||||
chmod 700 .ssh && \
|
||||
echo "$SSH_PRIVATE_KEY" > .ssh/id_rsa && \
|
||||
echo "$SSH_PUBLIC_KEY" > .ssh/id_rsa.pub && \
|
||||
chmod 600 .ssh/id_rsa .ssh/id_rsa.pub
|
||||
|
|
@ -16,7 +16,7 @@
|
|||
# If the image is built twice, /date should be the same in both images
|
||||
# if the cache is implemented correctly
|
||||
|
||||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
RUN date > /date
|
||||
COPY context/foo /foo
|
||||
RUN echo hey
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
# /date should be the same regardless of when this image is built
|
||||
# if the cache is implemented correctly
|
||||
|
||||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
WORKDIR /foo
|
||||
RUN apt-get update && apt-get install -y make
|
||||
COPY context/bar /context
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
FROM gcr.io/distroless/base@sha256:628939ac8bf3f49571d05c6c76b8688cb4a851af6c7088e599388259875bde20 AS first
|
||||
FROM debian:10.2 AS first
|
||||
CMD ["mycmd"]
|
||||
|
||||
FROM first
|
||||
ENTRYPOINT ["myentrypoint"] # This should clear out CMD in the config metadata
|
||||
|
|
|
|||
|
|
@ -0,0 +1,10 @@
|
|||
FROM busybox as t
|
||||
RUN mkdir temp
|
||||
RUN echo "hello" > temp/target
|
||||
RUN ln -s target temp/link
|
||||
## Relative link with paths
|
||||
RUN mkdir workdir && cd workdir && ln -s ../temp/target relative_link
|
||||
|
||||
FROM scratch
|
||||
COPY --from=t temp/ dest/
|
||||
COPY --from=t /workdir/relative_link /workdirAnother/
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
FROM alpine
|
||||
|
||||
RUN mkdir -p /some/dir/ && echo 'first' > /some/dir/first.txt
|
||||
|
||||
RUN rm /some/dir/first.txt
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
ENV hey hey
|
||||
ENV PATH /usr/local
|
||||
ENV testmultipleeq="this=is a=test string=with a=lot of=equals"
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
EXPOSE 80
|
||||
EXPOSE 81/udp
|
||||
ENV protocol tcp
|
||||
|
|
|
|||
|
|
@ -12,4 +12,4 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
|
|
|
|||
|
|
@ -0,0 +1,13 @@
|
|||
FROM alpine as base_stage
|
||||
|
||||
RUN echo base_stage
|
||||
|
||||
|
||||
FROM base_stage as BUG_stage
|
||||
|
||||
RUN echo BUG_stage
|
||||
|
||||
|
||||
FROM BUG_stage as final_stage
|
||||
|
||||
RUN echo final_stage
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
FROM registry.access.redhat.com/ubi7/ubi:7.7-214
|
||||
|
||||
# Install GCC, GCC-C++ and make libraries for build environment
|
||||
# Then clean caches
|
||||
RUN yum --disableplugin=subscription-manager update -y \
|
||||
&& yum --disableplugin=subscription-manager install -y \
|
||||
gcc-4.8.5-39.el7 \
|
||||
gcc-c++-4.8.5-39.el7 \
|
||||
make-3.82-24.el7 \
|
||||
&& yum --disableplugin=subscription-manager clean all
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
FROM alpine as base_stage
|
||||
RUN echo base_stage
|
||||
RUN touch meow.txt
|
||||
|
||||
FROM base_stage as BUG_stage
|
||||
RUN echo BUG_stage
|
||||
RUN touch purr.txt
|
||||
|
||||
|
||||
FROM BUG_stage as final_stage
|
||||
RUN echo final_stage
|
||||
RUN touch mew.txt
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
FROM alpine:3.8 AS foo
|
||||
RUN mkdir /foo
|
||||
WORKDIR /foo
|
||||
RUN mkdir some_dir
|
||||
RUN touch some_file
|
||||
RUN chmod 777 some_dir
|
||||
RUN chmod 666 some_file
|
||||
RUN ls -l
|
||||
|
||||
FROM alpine:3.8
|
||||
COPY --from=foo /foo /bar
|
||||
RUN ls -l /bar
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
FROM ubuntu:rolling as builder
|
||||
|
||||
RUN apt-get update && apt-get -y upgrade && apt-get -y install lib32stdc++6 wget
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
FROM alpine
|
||||
|
||||
RUN mkdir -p /some/dir/ && echo 'first' > /some/dir/first.txt
|
||||
|
||||
RUN rm /some/dir/first.txt
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
LABEL foo=bar
|
||||
LABEL "baz"="bat"
|
||||
ENV label1 "mylabel"
|
||||
|
|
|
|||
|
|
@ -1,15 +1,16 @@
|
|||
ARG REGISTRY=gcr.io
|
||||
ARG REPO=google-appengine
|
||||
ARG REGISTRY=docker.io
|
||||
ARG IMAGE=debian
|
||||
ARG TAG=9.11
|
||||
ARG WORD=hello
|
||||
ARG W0RD2=hey
|
||||
|
||||
FROM ${REGISTRY}/${REPO}/debian9 as stage1
|
||||
FROM ${REGISTRY}/${IMAGE}:${TAG} as stage1
|
||||
|
||||
# Should evaluate WORD and create /tmp/hello
|
||||
ARG WORD
|
||||
RUN touch /${WORD}
|
||||
|
||||
FROM ${REGISTRY}/${REPO}/debian9
|
||||
FROM ${REGISTRY}/${IMAGE}:${TAG}
|
||||
|
||||
COPY --from=stage1 /hello /tmp
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM gcr.io/distroless/base@sha256:628939ac8bf3f49571d05c6c76b8688cb4a851af6c7088e599388259875bde20
|
||||
FROM debian:10.2
|
||||
CMD ["command", "one"]
|
||||
CMD ["command", "two"]
|
||||
CMD echo "hello"
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM gcr.io/google-appengine/debian9@sha256:f0159d14385afcb58a9b2fa8955c0cb64bd3abc365e8589f8c2dd38150fbfdbe as base
|
||||
FROM debian:9.11 as base
|
||||
COPY . .
|
||||
|
||||
FROM scratch as second
|
||||
|
|
@ -20,4 +20,4 @@ FROM fedora@sha256:c4cc32b09c6ae3f1353e7e33a8dda93dc41676b923d6d89afa996b421cc5a
|
|||
FROM fourth
|
||||
ARG file
|
||||
COPY --from=second /foo ${file}
|
||||
COPY --from=gcr.io/google-appengine/debian9@sha256:00109fa40230a081f5ecffe0e814725042ff62a03e2d1eae0563f1f82eaeae9b /etc/os-release /new
|
||||
COPY --from=debian:9.11 /etc/os-release /new
|
||||
|
|
|
|||
|
|
@ -1,2 +1,2 @@
|
|||
FROM tenstartups/alpine@sha256:31dc8b12e0f73a1de899146c3663644b7668f8fd198cfe9b266886c9abfa944b
|
||||
FROM alpine@sha256:ab00606a42621fb68f2ed6ad3c88be54397f981a7b70a79db3d1172b11c4367d
|
||||
RUN pwd
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
RUN echo "hey" > /etc/foo
|
||||
RUN echo "baz" > /etc/baz
|
||||
RUN cp /etc/baz /etc/bar
|
||||
|
|
|
|||
|
|
@ -15,6 +15,6 @@
|
|||
# Test to make sure the executor builds an image correctly
|
||||
# when no files are changed
|
||||
|
||||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
RUN echo "hey"
|
||||
MAINTAINER kaniko
|
||||
|
|
|
|||
|
|
@ -12,6 +12,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
USER testuser:testgroup
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,35 @@
|
|||
# Copyright 2020 Google, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM debian:9.11
|
||||
|
||||
# default values for the root user.
|
||||
RUN touch $HOME/hello
|
||||
|
||||
# testuser1 with the default home created by useradd.
|
||||
|
||||
RUN groupadd testgroup && \
|
||||
useradd --create-home --gid testgroup alice && \
|
||||
useradd --create-home --uid 1111 --home-dir /home/john --gid testgroup bob
|
||||
|
||||
USER alice:testgroup
|
||||
RUN touch $HOME/hello
|
||||
|
||||
USER bob
|
||||
RUN touch $HOME/hello
|
||||
|
||||
USER root
|
||||
|
||||
USER 1111
|
||||
RUN touch $HOME/world
|
||||
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
RUN useradd testuser
|
||||
RUN groupadd testgroup
|
||||
USER testuser:testgroup
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
RUN mkdir /foo
|
||||
RUN echo "hello" > /foo/hey
|
||||
VOLUME /foo/bar /tmp /qux/quux
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||
FROM debian:9.11
|
||||
VOLUME /foo1
|
||||
RUN echo "hello" > /foo1/hello
|
||||
WORKDIR /foo1/bar
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM gcr.io/google-appengine/debian9@sha256:6b3aa04751aa2ac3b0c7be4ee71148b66d693ad212ce6d3244bd2a2a147f314a
|
||||
FROM debian:9.11
|
||||
COPY context/foo foo
|
||||
WORKDIR /test
|
||||
# Test that this will be appended on to the previous command, to create /test/workdir
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
|
@ -46,10 +47,11 @@ const (
|
|||
|
||||
// Arguments to build Dockerfiles with, used for both docker and kaniko builds
|
||||
var argsMap = map[string][]string{
|
||||
"Dockerfile_test_run": {"file=/file"},
|
||||
"Dockerfile_test_workdir": {"workdir=/arg/workdir"},
|
||||
"Dockerfile_test_add": {"file=context/foo"},
|
||||
"Dockerfile_test_onbuild": {"file=/tmp/onbuild"},
|
||||
"Dockerfile_test_run": {"file=/file"},
|
||||
"Dockerfile_test_workdir": {"workdir=/arg/workdir"},
|
||||
"Dockerfile_test_add": {"file=context/foo"},
|
||||
"Dockerfile_test_arg_secret": {"SSH_PRIVATE_KEY", "SSH_PUBLIC_KEY=Pµbl1cK€Y"},
|
||||
"Dockerfile_test_onbuild": {"file=/tmp/onbuild"},
|
||||
"Dockerfile_test_scratch": {
|
||||
"image=scratch",
|
||||
"hello=hello-value",
|
||||
|
|
@ -59,6 +61,11 @@ var argsMap = map[string][]string{
|
|||
"Dockerfile_test_multistage": {"file=/foo2"},
|
||||
}
|
||||
|
||||
// Environment to build Dockerfiles with, used for both docker and kaniko builds
|
||||
var envsMap = map[string][]string{
|
||||
"Dockerfile_test_arg_secret": {"SSH_PRIVATE_KEY=ThEPriv4t3Key"},
|
||||
}
|
||||
|
||||
// Arguments to build Dockerfiles with when building with docker
|
||||
var additionalDockerFlagsMap = map[string][]string{
|
||||
"Dockerfile_test_target": {"--target=second"},
|
||||
|
|
@ -71,6 +78,36 @@ var additionalKanikoFlagsMap = map[string][]string{
|
|||
"Dockerfile_test_target": {"--target=second"},
|
||||
}
|
||||
|
||||
// output check to do when building with kaniko
|
||||
var outputChecks = map[string]func(string, []byte) error{
|
||||
"Dockerfile_test_arg_secret": checkArgsNotPrinted,
|
||||
}
|
||||
|
||||
// Checks if argument are not printed in output.
|
||||
// Argument may be passed through --build-arg key=value manner or --build-arg key with key in environment
|
||||
func checkArgsNotPrinted(dockerfile string, out []byte) error {
|
||||
for _, arg := range argsMap[dockerfile] {
|
||||
argSplitted := strings.Split(arg, "=")
|
||||
if len(argSplitted) == 2 {
|
||||
if idx := bytes.Index(out, []byte(argSplitted[1])); idx >= 0 {
|
||||
return fmt.Errorf("Argument value %s for argument %s displayed in output", argSplitted[1], argSplitted[0])
|
||||
}
|
||||
} else if len(argSplitted) == 1 {
|
||||
if envs, ok := envsMap[dockerfile]; ok {
|
||||
for _, env := range envs {
|
||||
envSplitted := strings.Split(env, "=")
|
||||
if len(envSplitted) == 2 {
|
||||
if idx := bytes.Index(out, []byte(envSplitted[1])); idx >= 0 {
|
||||
return fmt.Errorf("Argument value %s for argument %s displayed in output", envSplitted[1], argSplitted[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var bucketContextTests = []string{"Dockerfile_test_copy_bucket"}
|
||||
var reproducibleTests = []string{"Dockerfile_test_reproducible"}
|
||||
|
||||
|
|
@ -115,18 +152,15 @@ func FindDockerFiles(dockerfilesPath string) ([]string, error) {
|
|||
// keeps track of which files have been built.
|
||||
type DockerFileBuilder struct {
|
||||
// Holds all available docker files and whether or not they've been built
|
||||
FilesBuilt map[string]bool
|
||||
filesBuilt map[string]struct{}
|
||||
DockerfilesToIgnore map[string]struct{}
|
||||
TestCacheDockerfiles map[string]struct{}
|
||||
}
|
||||
|
||||
// NewDockerFileBuilder will create a DockerFileBuilder initialized with dockerfiles, which
|
||||
// it will assume are all as yet unbuilt.
|
||||
func NewDockerFileBuilder(dockerfiles []string) *DockerFileBuilder {
|
||||
d := DockerFileBuilder{FilesBuilt: map[string]bool{}}
|
||||
for _, f := range dockerfiles {
|
||||
d.FilesBuilt[f] = false
|
||||
}
|
||||
func NewDockerFileBuilder() *DockerFileBuilder {
|
||||
d := DockerFileBuilder{filesBuilt: map[string]struct{}{}}
|
||||
d.DockerfilesToIgnore = map[string]struct{}{
|
||||
// TODO: remove test_user_run from this when https://github.com/GoogleContainerTools/container-diff/issues/237 is fixed
|
||||
"Dockerfile_test_user_run": {},
|
||||
|
|
@ -151,40 +185,69 @@ func addServiceAccountFlags(flags []string, serviceAccount string) []string {
|
|||
return flags
|
||||
}
|
||||
|
||||
// BuildImage will build dockerfile (located at dockerfilesPath) using both kaniko and docker.
|
||||
// The resulting image will be tagged with imageRepo. If the dockerfile will be built with
|
||||
// context (i.e. it is in `buildContextTests`) the context will be pulled from gcsBucket.
|
||||
func (d *DockerFileBuilder) BuildImage(config *gcpConfig, dockerfilesPath, dockerfile string) error {
|
||||
gcsBucket, serviceAccount, imageRepo := config.gcsBucket, config.serviceAccount, config.imageRepo
|
||||
_, ex, _, _ := runtime.Caller(0)
|
||||
cwd := filepath.Dir(ex)
|
||||
|
||||
fmt.Printf("Building images for Dockerfile %s\n", dockerfile)
|
||||
func (d *DockerFileBuilder) BuildDockerImage(imageRepo, dockerfilesPath, dockerfile, contextDir string) error {
|
||||
fmt.Printf("Building image for Dockerfile %s\n", dockerfile)
|
||||
|
||||
var buildArgs []string
|
||||
buildArgFlag := "--build-arg"
|
||||
for _, arg := range argsMap[dockerfile] {
|
||||
buildArgs = append(buildArgs, buildArgFlag)
|
||||
buildArgs = append(buildArgs, arg)
|
||||
buildArgs = append(buildArgs, buildArgFlag, arg)
|
||||
}
|
||||
|
||||
// build docker image
|
||||
additionalFlags := append(buildArgs, additionalDockerFlagsMap[dockerfile]...)
|
||||
dockerImage := strings.ToLower(imageRepo + dockerPrefix + dockerfile)
|
||||
dockerCmd := exec.Command("docker",
|
||||
append([]string{"build",
|
||||
"-t", dockerImage,
|
||||
"-f", path.Join(dockerfilesPath, dockerfile),
|
||||
"."},
|
||||
additionalFlags...)...,
|
||||
)
|
||||
|
||||
timer := timing.Start(dockerfile + "_docker")
|
||||
dockerArgs := []string{
|
||||
"build",
|
||||
"-t", dockerImage,
|
||||
}
|
||||
|
||||
if dockerfilesPath != "" {
|
||||
dockerArgs = append(dockerArgs, "-f", path.Join(dockerfilesPath, dockerfile))
|
||||
}
|
||||
|
||||
dockerArgs = append(dockerArgs, contextDir)
|
||||
dockerArgs = append(dockerArgs, additionalFlags...)
|
||||
|
||||
dockerCmd := exec.Command("docker", dockerArgs...)
|
||||
if env, ok := envsMap[dockerfile]; ok {
|
||||
dockerCmd.Env = append(dockerCmd.Env, env...)
|
||||
}
|
||||
|
||||
out, err := RunCommandWithoutTest(dockerCmd)
|
||||
timing.DefaultRun.Stop(timer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to build image %s with docker command \"%s\": %s %s", dockerImage, dockerCmd.Args, err, string(out))
|
||||
}
|
||||
fmt.Printf("Build image for Dockerfile %s as %s. docker build output: %s \n", dockerfile, dockerImage, out)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BuildImage will build dockerfile (located at dockerfilesPath) using both kaniko and docker.
|
||||
// The resulting image will be tagged with imageRepo. If the dockerfile will be built with
|
||||
// context (i.e. it is in `buildContextTests`) the context will be pulled from gcsBucket.
|
||||
func (d *DockerFileBuilder) BuildImage(config *integrationTestConfig, dockerfilesPath, dockerfile string) error {
|
||||
_, ex, _, _ := runtime.Caller(0)
|
||||
cwd := filepath.Dir(ex)
|
||||
|
||||
return d.BuildImageWithContext(config, dockerfilesPath, dockerfile, cwd)
|
||||
}
|
||||
|
||||
func (d *DockerFileBuilder) BuildImageWithContext(config *integrationTestConfig, dockerfilesPath, dockerfile, contextDir string) error {
|
||||
if _, present := d.filesBuilt[dockerfile]; present {
|
||||
return nil
|
||||
}
|
||||
gcsBucket, serviceAccount, imageRepo := config.gcsBucket, config.serviceAccount, config.imageRepo
|
||||
|
||||
var buildArgs []string
|
||||
buildArgFlag := "--build-arg"
|
||||
for _, arg := range argsMap[dockerfile] {
|
||||
buildArgs = append(buildArgs, buildArgFlag, arg)
|
||||
}
|
||||
|
||||
timer := timing.Start(dockerfile + "_docker")
|
||||
d.BuildDockerImage(imageRepo, dockerfilesPath, dockerfile, contextDir)
|
||||
timing.DefaultRun.Stop(timer)
|
||||
|
||||
contextFlag := "-c"
|
||||
contextPath := buildContextPath
|
||||
|
|
@ -217,19 +280,31 @@ func (d *DockerFileBuilder) BuildImage(config *gcpConfig, dockerfilesPath, docke
|
|||
}
|
||||
|
||||
// build kaniko image
|
||||
additionalFlags = append(buildArgs, additionalKanikoFlagsMap[dockerfile]...)
|
||||
additionalFlags := append(buildArgs, additionalKanikoFlagsMap[dockerfile]...)
|
||||
kanikoImage := GetKanikoImage(imageRepo, dockerfile)
|
||||
fmt.Printf("Going to build image with kaniko: %s, flags: %s \n", kanikoImage, additionalFlags)
|
||||
dockerRunFlags := []string{
|
||||
"run", "-e", benchmarkEnv,
|
||||
"-v", cwd + ":/workspace",
|
||||
|
||||
dockerRunFlags := []string{"run", "--net=host",
|
||||
"-e", benchmarkEnv,
|
||||
"-v", contextDir + ":/workspace",
|
||||
"-v", benchmarkDir + ":/kaniko/benchmarks",
|
||||
}
|
||||
|
||||
if env, ok := envsMap[dockerfile]; ok {
|
||||
for _, envVariable := range env {
|
||||
dockerRunFlags = append(dockerRunFlags, "-e", envVariable)
|
||||
}
|
||||
}
|
||||
|
||||
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, serviceAccount)
|
||||
|
||||
kanikoDockerfilePath := path.Join(buildContextPath, dockerfilesPath, dockerfile)
|
||||
if dockerfilesPath == "" {
|
||||
kanikoDockerfilePath = path.Join(buildContextPath, "Dockerfile")
|
||||
}
|
||||
|
||||
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
|
||||
"-f", path.Join(buildContextPath, dockerfilesPath, dockerfile),
|
||||
"-f", kanikoDockerfilePath,
|
||||
"-d", kanikoImage, reproducibleFlag,
|
||||
contextFlag, contextPath)
|
||||
dockerRunFlags = append(dockerRunFlags, additionalFlags...)
|
||||
|
|
@ -237,13 +312,21 @@ func (d *DockerFileBuilder) BuildImage(config *gcpConfig, dockerfilesPath, docke
|
|||
kanikoCmd := exec.Command("docker", dockerRunFlags...)
|
||||
|
||||
timer = timing.Start(dockerfile + "_kaniko")
|
||||
out, err = RunCommandWithoutTest(kanikoCmd)
|
||||
out, err := RunCommandWithoutTest(kanikoCmd)
|
||||
timing.DefaultRun.Stop(timer)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to build image %s with kaniko command \"%s\": %s %s", dockerImage, kanikoCmd.Args, err, string(out))
|
||||
return fmt.Errorf("Failed to build image %s with kaniko command \"%s\": %s %s", kanikoImage, kanikoCmd.Args, err, string(out))
|
||||
}
|
||||
|
||||
d.FilesBuilt[dockerfile] = true
|
||||
if outputCheck := outputChecks[dockerfile]; outputCheck != nil {
|
||||
if err := outputCheck(dockerfile, out); err != nil {
|
||||
return fmt.Errorf("Output check failed for image %s with kaniko command \"%s\": %s %s", kanikoImage, kanikoCmd.Args, err, string(out))
|
||||
}
|
||||
}
|
||||
|
||||
d.filesBuilt[dockerfile] = struct{}{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -251,7 +334,7 @@ func populateVolumeCache() error {
|
|||
_, ex, _, _ := runtime.Caller(0)
|
||||
cwd := filepath.Dir(ex)
|
||||
warmerCmd := exec.Command("docker",
|
||||
append([]string{"run",
|
||||
append([]string{"run", "--net=host",
|
||||
"-d",
|
||||
"-v", os.Getenv("HOME") + "/.config/gcloud:/root/.config/gcloud",
|
||||
"-v", cwd + ":/workspace",
|
||||
|
|
@ -269,7 +352,7 @@ func populateVolumeCache() error {
|
|||
}
|
||||
|
||||
// buildCachedImages builds the images for testing caching via kaniko where version is the nth time this image has been built
|
||||
func (d *DockerFileBuilder) buildCachedImages(config *gcpConfig, cacheRepo, dockerfilesPath string, version int) error {
|
||||
func (d *DockerFileBuilder) buildCachedImages(config *integrationTestConfig, cacheRepo, dockerfilesPath string, version int) error {
|
||||
imageRepo, serviceAccount := config.imageRepo, config.serviceAccount
|
||||
_, ex, _, _ := runtime.Caller(0)
|
||||
cwd := filepath.Dir(ex)
|
||||
|
|
@ -284,7 +367,7 @@ func (d *DockerFileBuilder) buildCachedImages(config *gcpConfig, cacheRepo, dock
|
|||
}
|
||||
kanikoImage := GetVersionedKanikoImage(imageRepo, dockerfile, version)
|
||||
|
||||
dockerRunFlags := []string{"run",
|
||||
dockerRunFlags := []string{"run", "--net=host",
|
||||
"-v", cwd + ":/workspace",
|
||||
"-e", benchmarkEnv}
|
||||
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, serviceAccount)
|
||||
|
|
@ -308,28 +391,45 @@ func (d *DockerFileBuilder) buildCachedImages(config *gcpConfig, cacheRepo, dock
|
|||
}
|
||||
|
||||
// buildRelativePathsImage builds the images for testing passing relatives paths to Kaniko
|
||||
func (d *DockerFileBuilder) buildRelativePathsImage(imageRepo, dockerfile, serviceAccount string) error {
|
||||
func (d *DockerFileBuilder) buildRelativePathsImage(imageRepo, dockerfile, serviceAccount, buildContextPath string) error {
|
||||
_, ex, _, _ := runtime.Caller(0)
|
||||
cwd := filepath.Dir(ex)
|
||||
|
||||
buildContextPath := "./relative-subdirectory"
|
||||
kanikoImage := GetKanikoImage(imageRepo, dockerfile)
|
||||
dockerImage := GetDockerImage(imageRepo, "test_relative_"+dockerfile)
|
||||
kanikoImage := GetKanikoImage(imageRepo, "test_relative_"+dockerfile)
|
||||
|
||||
dockerRunFlags := []string{"run", "-v", cwd + ":/workspace"}
|
||||
dockerCmd := exec.Command("docker",
|
||||
append([]string{"build",
|
||||
"-t", dockerImage,
|
||||
"-f", dockerfile,
|
||||
"./context"},
|
||||
)...,
|
||||
)
|
||||
|
||||
timer := timing.Start(dockerfile + "_docker")
|
||||
out, err := RunCommandWithoutTest(dockerCmd)
|
||||
timing.DefaultRun.Stop(timer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to build image %s with docker command \"%s\": %s %s", dockerImage, dockerCmd.Args, err, string(out))
|
||||
}
|
||||
|
||||
dockerRunFlags := []string{"run", "--net=host", "-v", cwd + ":/workspace"}
|
||||
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, serviceAccount)
|
||||
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
|
||||
"-f", dockerfile,
|
||||
"-d", kanikoImage,
|
||||
"--digest-file", "./digest",
|
||||
"-c", buildContextPath)
|
||||
|
||||
kanikoCmd := exec.Command("docker", dockerRunFlags...)
|
||||
|
||||
timer := timing.Start(dockerfile + "_kaniko_relative_paths")
|
||||
_, err := RunCommandWithoutTest(kanikoCmd)
|
||||
timer = timing.Start(dockerfile + "_kaniko_relative_paths")
|
||||
out, err = RunCommandWithoutTest(kanikoCmd)
|
||||
timing.DefaultRun.Stop(timer)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to build relative path image %s with kaniko command \"%s\": %s", kanikoImage, kanikoCmd.Args, err)
|
||||
return fmt.Errorf(
|
||||
"Failed to build relative path image %s with kaniko command \"%s\": %s\n%s",
|
||||
kanikoImage, kanikoCmd.Args, err, string(out))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -36,13 +36,16 @@ import (
|
|||
"github.com/GoogleContainerTools/kaniko/pkg/timing"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var config *gcpConfig
|
||||
var config *integrationTestConfig
|
||||
var imageBuilder *DockerFileBuilder
|
||||
var allDockerfiles []string
|
||||
|
||||
const (
|
||||
daemonPrefix = "daemon://"
|
||||
integrationPath = "integration"
|
||||
dockerfilesPath = "dockerfiles"
|
||||
emptyContainerDiff = `[
|
||||
{
|
||||
|
|
@ -80,35 +83,57 @@ func getDockerMajorVersion() int {
|
|||
}
|
||||
return ver
|
||||
}
|
||||
func launchTests(m *testing.M) (int, error) {
|
||||
|
||||
if config.isGcrRepository() {
|
||||
contextFile, err := CreateIntegrationTarball()
|
||||
if err != nil {
|
||||
return 1, errors.Wrap(err, "Failed to create tarball of integration files for build context")
|
||||
}
|
||||
|
||||
fileInBucket, err := UploadFileToBucket(config.gcsBucket, contextFile, contextFile)
|
||||
if err != nil {
|
||||
return 1, errors.Wrap(err, "Failed to upload build context")
|
||||
}
|
||||
|
||||
if err = os.Remove(contextFile); err != nil {
|
||||
return 1, errors.Wrap(err, fmt.Sprintf("Failed to remove tarball at %s", contextFile))
|
||||
}
|
||||
|
||||
RunOnInterrupt(func() { DeleteFromBucket(fileInBucket) })
|
||||
defer DeleteFromBucket(fileInBucket)
|
||||
}
|
||||
if err := buildRequiredImages(); err != nil {
|
||||
return 1, errors.Wrap(err, "Error while building images")
|
||||
}
|
||||
|
||||
imageBuilder = NewDockerFileBuilder()
|
||||
|
||||
return m.Run(), nil
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
if !meetsRequirements() {
|
||||
fmt.Println("Missing required tools")
|
||||
os.Exit(1)
|
||||
}
|
||||
config = initGCPConfig()
|
||||
|
||||
contextFile, err := CreateIntegrationTarball()
|
||||
if err != nil {
|
||||
fmt.Println("Failed to create tarball of integration files for build context", err)
|
||||
if allDockerfiles, err = FindDockerFiles(dockerfilesPath); err != nil {
|
||||
fmt.Println("Coudn't create map of dockerfiles", err)
|
||||
os.Exit(1)
|
||||
} else {
|
||||
config = initIntegrationTestConfig()
|
||||
exitCode, err := launchTests(m)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
fileInBucket, err := UploadFileToBucket(config.gcsBucket, contextFile, contextFile)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to upload build context", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = os.Remove(contextFile)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to remove tarball at %s: %s\n", contextFile, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
RunOnInterrupt(func() { DeleteFromBucket(fileInBucket) })
|
||||
defer DeleteFromBucket(fileInBucket)
|
||||
}
|
||||
|
||||
func buildRequiredImages() error {
|
||||
setupCommands := []struct {
|
||||
name string
|
||||
command []string
|
||||
|
|
@ -123,7 +148,7 @@ func TestMain(m *testing.M) {
|
|||
},
|
||||
{
|
||||
name: "Building onbuild base image",
|
||||
command: []string{"docker", "build", "-t", config.onbuildBaseImage, "-f", "dockerfiles/Dockerfile_onbuild_base", "."},
|
||||
command: []string{"docker", "build", "-t", config.onbuildBaseImage, "-f", fmt.Sprintf("%s/Dockerfile_onbuild_base", dockerfilesPath), "."},
|
||||
},
|
||||
{
|
||||
name: "Pushing onbuild base image",
|
||||
|
|
@ -131,7 +156,7 @@ func TestMain(m *testing.M) {
|
|||
},
|
||||
{
|
||||
name: "Building hardlink base image",
|
||||
command: []string{"docker", "build", "-t", config.hardlinkBaseImage, "-f", "dockerfiles/Dockerfile_hardlink_base", "."},
|
||||
command: []string{"docker", "build", "-t", config.hardlinkBaseImage, "-f", fmt.Sprintf("%s/Dockerfile_hardlink_base", dockerfilesPath), "."},
|
||||
},
|
||||
{
|
||||
name: "Pushing hardlink base image",
|
||||
|
|
@ -143,24 +168,14 @@ func TestMain(m *testing.M) {
|
|||
fmt.Println(setupCmd.name)
|
||||
cmd := exec.Command(setupCmd.command[0], setupCmd.command[1:]...)
|
||||
if out, err := RunCommandWithoutTest(cmd); err != nil {
|
||||
fmt.Printf("%s failed: %s", setupCmd.name, err)
|
||||
fmt.Println(string(out))
|
||||
os.Exit(1)
|
||||
return errors.Wrap(err, fmt.Sprintf("%s failed: %s", setupCmd.name, string(out)))
|
||||
}
|
||||
}
|
||||
|
||||
dockerfiles, err := FindDockerFiles(dockerfilesPath)
|
||||
if err != nil {
|
||||
fmt.Printf("Coudn't create map of dockerfiles: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
imageBuilder = NewDockerFileBuilder(dockerfiles)
|
||||
|
||||
os.Exit(m.Run())
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
for dockerfile := range imageBuilder.FilesBuilt {
|
||||
for _, dockerfile := range allDockerfiles {
|
||||
t.Run("test_"+dockerfile, func(t *testing.T) {
|
||||
dockerfile := dockerfile
|
||||
t.Parallel()
|
||||
|
|
@ -172,18 +187,11 @@ func TestRun(t *testing.T) {
|
|||
}
|
||||
|
||||
buildImage(t, dockerfile, imageBuilder)
|
||||
imageBuilder.FilesBuilt[dockerfile] = true
|
||||
|
||||
dockerImage := GetDockerImage(config.imageRepo, dockerfile)
|
||||
kanikoImage := GetKanikoImage(config.imageRepo, dockerfile)
|
||||
|
||||
// container-diff
|
||||
daemonDockerImage := daemonPrefix + dockerImage
|
||||
containerdiffCmd := exec.Command("container-diff", "diff", "--no-cache",
|
||||
daemonDockerImage, kanikoImage,
|
||||
"-q", "--type=file", "--type=metadata", "--json")
|
||||
diff := RunCommand(containerdiffCmd, t)
|
||||
t.Logf("diff = %s", string(diff))
|
||||
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
|
||||
|
||||
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
|
||||
checkContainerDiffOutput(t, diff, expected)
|
||||
|
|
@ -197,9 +205,26 @@ func TestRun(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func getGitRepo() string {
|
||||
var branch, repoSlug string
|
||||
if _, ok := os.LookupEnv("TRAVIS"); ok {
|
||||
if os.Getenv("TRAVIS_PULL_REQUEST") != "false" {
|
||||
branch = os.Getenv("TRAVIS_PULL_REQUEST_BRANCH")
|
||||
repoSlug = os.Getenv("TRAVIS_PULL_REQUEST_SLUG")
|
||||
log.Printf("Travis CI Pull request source repo: %s branch: %s\n", repoSlug, branch)
|
||||
} else {
|
||||
branch = os.Getenv("TRAVIS_BRANCH")
|
||||
repoSlug = os.Getenv("TRAVIS_REPO_SLUG")
|
||||
log.Printf("Travis CI repo: %s branch: %s\n", repoSlug, branch)
|
||||
}
|
||||
return "github.com/" + repoSlug + "#refs/heads/" + branch
|
||||
}
|
||||
return "github.com/GoogleContainerTools/kaniko"
|
||||
}
|
||||
|
||||
func TestGitBuildcontext(t *testing.T) {
|
||||
repo := "github.com/GoogleContainerTools/kaniko"
|
||||
dockerfile := "integration/dockerfiles/Dockerfile_test_run_2"
|
||||
repo := getGitRepo()
|
||||
dockerfile := fmt.Sprintf("%s/%s/Dockerfile_test_run_2", integrationPath, dockerfilesPath)
|
||||
|
||||
// Build with docker
|
||||
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_test_git")
|
||||
|
|
@ -210,12 +235,12 @@ func TestGitBuildcontext(t *testing.T) {
|
|||
repo})...)
|
||||
out, err := RunCommandWithoutTest(dockerCmd)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to build image %s with docker command \"%s\": %s %s", dockerImage, dockerCmd.Args, err, string(out))
|
||||
t.Errorf("Failed to build image %s with docker command %q: %s %s", dockerImage, dockerCmd.Args, err, string(out))
|
||||
}
|
||||
|
||||
// Build with kaniko
|
||||
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_git")
|
||||
dockerRunFlags := []string{"run"}
|
||||
dockerRunFlags := []string{"run", "--net=host"}
|
||||
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
|
||||
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
|
||||
"-f", dockerfile,
|
||||
|
|
@ -226,27 +251,21 @@ func TestGitBuildcontext(t *testing.T) {
|
|||
|
||||
out, err = RunCommandWithoutTest(kanikoCmd)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to build image %s with kaniko command \"%s\": %v %s", dockerImage, kanikoCmd.Args, err, string(out))
|
||||
t.Errorf("Failed to build image %s with kaniko command %q: %v %s", dockerImage, kanikoCmd.Args, err, string(out))
|
||||
}
|
||||
|
||||
// container-diff
|
||||
daemonDockerImage := daemonPrefix + dockerImage
|
||||
containerdiffCmd := exec.Command("container-diff", "diff", "--no-cache",
|
||||
daemonDockerImage, kanikoImage,
|
||||
"-q", "--type=file", "--type=metadata", "--json")
|
||||
diff := RunCommand(containerdiffCmd, t)
|
||||
t.Logf("diff = %s", string(diff))
|
||||
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
|
||||
|
||||
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
|
||||
checkContainerDiffOutput(t, diff, expected)
|
||||
}
|
||||
|
||||
func TestGitBuildContextWithBranch(t *testing.T) {
|
||||
repo := "github.com/GoogleContainerTools/kaniko#refs/tags/v0.10.0"
|
||||
dockerfile := "integration/dockerfiles/Dockerfile_test_run_2"
|
||||
func TestBuildViaRegistryMirror(t *testing.T) {
|
||||
repo := getGitRepo()
|
||||
dockerfile := "integration/dockerfiles/Dockerfile_registry_mirror"
|
||||
|
||||
// Build with docker
|
||||
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_test_git")
|
||||
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_registry_mirror")
|
||||
dockerCmd := exec.Command("docker",
|
||||
append([]string{"build",
|
||||
"-t", dockerImage,
|
||||
|
|
@ -254,32 +273,70 @@ func TestGitBuildContextWithBranch(t *testing.T) {
|
|||
repo})...)
|
||||
out, err := RunCommandWithoutTest(dockerCmd)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to build image %s with docker command \"%s\": %s %s", dockerImage, dockerCmd.Args, err, string(out))
|
||||
t.Errorf("Failed to build image %s with docker command %q: %s %s", dockerImage, dockerCmd.Args, err, string(out))
|
||||
}
|
||||
|
||||
// Build with kaniko
|
||||
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_git")
|
||||
dockerRunFlags := []string{"run"}
|
||||
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_registry_mirror")
|
||||
dockerRunFlags := []string{"run", "--net=host"}
|
||||
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
|
||||
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
|
||||
"-f", dockerfile,
|
||||
"-d", kanikoImage,
|
||||
"--registry-mirror", "us-mirror.gcr.io",
|
||||
"-c", fmt.Sprintf("git://%s", repo))
|
||||
|
||||
kanikoCmd := exec.Command("docker", dockerRunFlags...)
|
||||
|
||||
out, err = RunCommandWithoutTest(kanikoCmd)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to build image %s with kaniko command \"%s\": %v %s", dockerImage, kanikoCmd.Args, err, string(out))
|
||||
t.Errorf("Failed to build image %s with kaniko command %q: %v %s", dockerImage, kanikoCmd.Args, err, string(out))
|
||||
}
|
||||
|
||||
// container-diff
|
||||
daemonDockerImage := daemonPrefix + dockerImage
|
||||
containerdiffCmd := exec.Command("container-diff", "diff", "--no-cache",
|
||||
daemonDockerImage, kanikoImage,
|
||||
"-q", "--type=file", "--type=metadata", "--json")
|
||||
diff := RunCommand(containerdiffCmd, t)
|
||||
t.Logf("diff = %s", string(diff))
|
||||
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
|
||||
|
||||
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
|
||||
checkContainerDiffOutput(t, diff, expected)
|
||||
}
|
||||
|
||||
func TestBuildWithLabels(t *testing.T) {
|
||||
repo := getGitRepo()
|
||||
dockerfile := "integration/dockerfiles/Dockerfile_test_label"
|
||||
|
||||
testLabel := "mylabel=myvalue"
|
||||
|
||||
// Build with docker
|
||||
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_test_label:mylabel")
|
||||
dockerCmd := exec.Command("docker",
|
||||
append([]string{"build",
|
||||
"-t", dockerImage,
|
||||
"-f", dockerfile,
|
||||
"--label", testLabel,
|
||||
repo})...)
|
||||
out, err := RunCommandWithoutTest(dockerCmd)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to build image %s with docker command %q: %s %s", dockerImage, dockerCmd.Args, err, string(out))
|
||||
}
|
||||
|
||||
// Build with kaniko
|
||||
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_label:mylabel")
|
||||
dockerRunFlags := []string{"run", "--net=host"}
|
||||
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
|
||||
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
|
||||
"-f", dockerfile,
|
||||
"-d", kanikoImage,
|
||||
"--label", testLabel,
|
||||
"-c", fmt.Sprintf("git://%s", repo),
|
||||
)
|
||||
|
||||
kanikoCmd := exec.Command("docker", dockerRunFlags...)
|
||||
|
||||
out, err = RunCommandWithoutTest(kanikoCmd)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to build image %s with kaniko command %q: %v %s", dockerImage, kanikoCmd.Args, err, string(out))
|
||||
}
|
||||
|
||||
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
|
||||
|
||||
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
|
||||
checkContainerDiffOutput(t, diff, expected)
|
||||
|
|
@ -290,7 +347,7 @@ func TestLayers(t *testing.T) {
|
|||
"Dockerfile_test_add": 12,
|
||||
"Dockerfile_test_scratch": 3,
|
||||
}
|
||||
for dockerfile := range imageBuilder.FilesBuilt {
|
||||
for _, dockerfile := range allDockerfiles {
|
||||
t.Run("test_layer_"+dockerfile, func(t *testing.T) {
|
||||
dockerfile := dockerfile
|
||||
|
||||
|
|
@ -300,7 +357,6 @@ func TestLayers(t *testing.T) {
|
|||
}
|
||||
|
||||
buildImage(t, dockerfile, imageBuilder)
|
||||
imageBuilder.FilesBuilt[dockerfile] = true
|
||||
|
||||
// Pull the kaniko image
|
||||
dockerImage := GetDockerImage(config.imageRepo, dockerfile)
|
||||
|
|
@ -318,15 +374,10 @@ func TestLayers(t *testing.T) {
|
|||
}
|
||||
|
||||
func buildImage(t *testing.T, dockerfile string, imageBuilder *DockerFileBuilder) {
|
||||
if imageBuilder.FilesBuilt[dockerfile] {
|
||||
return
|
||||
}
|
||||
|
||||
if err := imageBuilder.BuildImage(config, dockerfilesPath, dockerfile); err != nil {
|
||||
t.Errorf("Error building image: %s", err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -351,13 +402,7 @@ func TestCache(t *testing.T) {
|
|||
kanikoVersion0 := GetVersionedKanikoImage(config.imageRepo, dockerfile, 0)
|
||||
kanikoVersion1 := GetVersionedKanikoImage(config.imageRepo, dockerfile, 1)
|
||||
|
||||
// container-diff
|
||||
containerdiffCmd := exec.Command("container-diff", "diff",
|
||||
kanikoVersion0, kanikoVersion1,
|
||||
"-q", "--type=file", "--type=metadata", "--json")
|
||||
|
||||
diff := RunCommand(containerdiffCmd, t)
|
||||
t.Logf("diff = %s", diff)
|
||||
diff := containerDiff(t, kanikoVersion0, kanikoVersion1)
|
||||
|
||||
expected := fmt.Sprintf(emptyContainerDiff, kanikoVersion0, kanikoVersion1, kanikoVersion0, kanikoVersion1)
|
||||
checkContainerDiffOutput(t, diff, expected)
|
||||
|
|
@ -371,22 +416,29 @@ func TestCache(t *testing.T) {
|
|||
|
||||
func TestRelativePaths(t *testing.T) {
|
||||
|
||||
dockerfile := "Dockerfile_test_copy"
|
||||
dockerfile := "Dockerfile_relative_copy"
|
||||
|
||||
t.Run("test_relative_"+dockerfile, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
imageBuilder.buildRelativePathsImage(config.imageRepo, dockerfile, config.serviceAccount)
|
||||
|
||||
dockerImage := GetDockerImage(config.imageRepo, dockerfile)
|
||||
kanikoImage := GetKanikoImage(config.imageRepo, dockerfile)
|
||||
dockerfile = filepath.Join("./dockerfiles", dockerfile)
|
||||
|
||||
// container-diff
|
||||
daemonDockerImage := daemonPrefix + dockerImage
|
||||
containerdiffCmd := exec.Command("container-diff", "diff", "--no-cache",
|
||||
daemonDockerImage, kanikoImage,
|
||||
"-q", "--type=file", "--type=metadata", "--json")
|
||||
diff := RunCommand(containerdiffCmd, t)
|
||||
t.Logf("diff = %s", string(diff))
|
||||
contextPath := "./context"
|
||||
|
||||
err := imageBuilder.buildRelativePathsImage(
|
||||
config.imageRepo,
|
||||
dockerfile,
|
||||
config.serviceAccount,
|
||||
contextPath,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dockerImage := GetDockerImage(config.imageRepo, "test_relative_"+dockerfile)
|
||||
kanikoImage := GetKanikoImage(config.imageRepo, "test_relative_"+dockerfile)
|
||||
|
||||
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
|
||||
|
||||
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
|
||||
checkContainerDiffOutput(t, diff, expected)
|
||||
|
|
@ -560,15 +612,6 @@ func logBenchmarks(benchmark string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
type gcpConfig struct {
|
||||
gcsBucket string
|
||||
imageRepo string
|
||||
onbuildBaseImage string
|
||||
hardlinkBaseImage string
|
||||
serviceAccount string
|
||||
dockerMajorVersion int
|
||||
}
|
||||
|
||||
type imageDetails struct {
|
||||
name string
|
||||
numLayers int
|
||||
|
|
@ -579,8 +622,8 @@ func (i imageDetails) String() string {
|
|||
return fmt.Sprintf("Image: [%s] Digest: [%s] Number of Layers: [%d]", i.name, i.digest, i.numLayers)
|
||||
}
|
||||
|
||||
func initGCPConfig() *gcpConfig {
|
||||
var c gcpConfig
|
||||
func initIntegrationTestConfig() *integrationTestConfig {
|
||||
var c integrationTestConfig
|
||||
flag.StringVar(&c.gcsBucket, "bucket", "gs://kaniko-test-bucket", "The gcs bucket argument to uploaded the tar-ed contents of the `integration` dir to.")
|
||||
flag.StringVar(&c.imageRepo, "repo", "gcr.io/kaniko-test", "The (docker) image repo to build and push images to during the test. `gcloud` must be authenticated with this repo or serviceAccount must be set.")
|
||||
flag.StringVar(&c.serviceAccount, "serviceAccount", "", "The path to the service account push images to GCR and upload/download files to GCS.")
|
||||
|
|
@ -598,8 +641,12 @@ func initGCPConfig() *gcpConfig {
|
|||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", absPath)
|
||||
}
|
||||
|
||||
if c.gcsBucket == "" || c.imageRepo == "" {
|
||||
log.Fatalf("You must provide a gcs bucket (\"%s\" was provided) and a docker repo (\"%s\" was provided)", c.gcsBucket, c.imageRepo)
|
||||
if c.imageRepo == "" {
|
||||
log.Fatal("You must provide a image repository")
|
||||
}
|
||||
|
||||
if c.isGcrRepository() && c.gcsBucket == "" {
|
||||
log.Fatalf("You must provide a gcs bucket when using a Google Container Registry (\"%s\" was provided)", c.imageRepo)
|
||||
}
|
||||
if !strings.HasSuffix(c.imageRepo, "/") {
|
||||
c.imageRepo = c.imageRepo + "/"
|
||||
|
|
@ -622,3 +669,16 @@ func meetsRequirements() bool {
|
|||
}
|
||||
return hasRequirements
|
||||
}
|
||||
|
||||
// containerDiff compares the container images image1 and image2.
|
||||
func containerDiff(t *testing.T, image1, image2 string, flags ...string) []byte {
|
||||
flags = append([]string{"diff"}, flags...)
|
||||
flags = append(flags, image1, image2,
|
||||
"-q", "--type=file", "--type=metadata", "--json")
|
||||
|
||||
containerdiffCmd := exec.Command("container-diff", flags...)
|
||||
diff := RunCommand(containerdiffCmd, t)
|
||||
t.Logf("diff = %s", string(diff))
|
||||
|
||||
return diff
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestWithContext(t *testing.T) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dir := filepath.Join(cwd, "dockerfiles-with-context")
|
||||
|
||||
testDirs, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
builder := NewDockerFileBuilder()
|
||||
|
||||
for _, tdInfo := range testDirs {
|
||||
name := tdInfo.Name()
|
||||
testDir := filepath.Join(dir, name)
|
||||
|
||||
t.Run("test_with_context_"+name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if err := builder.BuildImageWithContext(
|
||||
config, "", name, testDir,
|
||||
); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dockerImage := GetDockerImage(config.imageRepo, name)
|
||||
kanikoImage := GetKanikoImage(config.imageRepo, name)
|
||||
|
||||
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
|
||||
|
||||
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
|
||||
checkContainerDiffOutput(t, diff, expected)
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
if err := logBenchmarks("benchmark"); err != nil {
|
||||
t.Logf("Failed to create benchmark file: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: kaniko-test-{{.Name}}
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kaniko
|
||||
image: localhost:5000/executor:debug
|
||||
args: [ "--context=dir:///workspace",
|
||||
"--destination={{.KanikoImage}}"]
|
||||
volumeMounts:
|
||||
- name: context
|
||||
mountPath: /workspace
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: context
|
||||
hostPath:
|
||||
path: {{.Context}}
|
||||
backoffLimit: 1
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type K8sConfig struct {
|
||||
KanikoImage string
|
||||
Context string
|
||||
Name string
|
||||
}
|
||||
|
||||
func TestK8s(t *testing.T) {
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dir := filepath.Join(cwd, "dockerfiles-with-context")
|
||||
|
||||
testDirs, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
builder := NewDockerFileBuilder()
|
||||
|
||||
for _, tdInfo := range testDirs {
|
||||
name := tdInfo.Name()
|
||||
testDir := filepath.Join(dir, name)
|
||||
|
||||
t.Run("test_k8s_with_context_"+name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
if err := builder.BuildDockerImage(
|
||||
config.imageRepo, "", name, testDir,
|
||||
); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dockerImage := GetDockerImage(config.imageRepo, name)
|
||||
kanikoImage := GetKanikoImage(config.imageRepo, name)
|
||||
|
||||
tmpfile, err := ioutil.TempFile("", "k8s-job-*.yaml")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.Remove(tmpfile.Name()) // clean up
|
||||
tmpl := template.Must(template.ParseFiles("k8s-job.yaml"))
|
||||
job := K8sConfig{KanikoImage: kanikoImage, Context: testDir, Name: name}
|
||||
if err := tmpl.Execute(tmpfile, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Testing K8s based Kaniko building of dockerfile %s and push to %s \n",
|
||||
testDir, kanikoImage)
|
||||
content, err := ioutil.ReadFile(tmpfile.Name())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("K8s template %s:\n%s\n", tmpfile.Name(), content)
|
||||
|
||||
kubeCmd := exec.Command("kubectl", "apply", "-f", tmpfile.Name())
|
||||
RunCommand(kubeCmd, t)
|
||||
|
||||
fmt.Printf("Waiting for K8s kaniko build job to finish: %s\n",
|
||||
"job/kaniko-test-"+job.Name)
|
||||
|
||||
kubeWaitCmd := exec.Command("kubectl", "wait", "--for=condition=complete", "--timeout=60s",
|
||||
"job/kaniko-test-"+job.Name)
|
||||
RunCommand(kubeWaitCmd, t)
|
||||
|
||||
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
|
||||
|
||||
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
|
||||
checkContainerDiffOutput(t, diff, expected)
|
||||
})
|
||||
}
|
||||
|
||||
if err := logBenchmarks("benchmark"); err != nil {
|
||||
t.Logf("Failed to create benchmark file: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -43,12 +43,12 @@ func (b *AzureBlob) UnpackTarFromBuildContext() (string, error) {
|
|||
return "", errors.New("AZURE_STORAGE_ACCESS_KEY environment variable is not set")
|
||||
}
|
||||
|
||||
// Get storage accoutname for Azure Blob Storage
|
||||
// Get storage accountName for Azure Blob Storage
|
||||
u, _ := url.Parse(b.context)
|
||||
parts := azblob.NewBlobURLParts(*u)
|
||||
accountName := strings.Split(parts.Host, ".")[0]
|
||||
|
||||
// Generate credentail with accountname and accountkey
|
||||
// Generate credential with accountName and accountKey
|
||||
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
|
||||
if err != nil {
|
||||
return parts.Host, err
|
||||
|
|
@ -62,7 +62,7 @@ func (b *AzureBlob) UnpackTarFromBuildContext() (string, error) {
|
|||
return tarPath, err
|
||||
}
|
||||
|
||||
// Downloading contextfile from Azure Blob Storage
|
||||
// Downloading context file from Azure Blob Storage
|
||||
p := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||
blobURL := azblob.NewBlobURL(*u, p)
|
||||
ctx := context.Background()
|
||||
|
|
|
|||
|
|
@ -24,6 +24,10 @@ import (
|
|||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
TarBuildContextPrefix = "tar://"
|
||||
)
|
||||
|
||||
// BuildContext unifies calls to download and unpack the build context.
|
||||
type BuildContext interface {
|
||||
// Unpacks a build context and returns the directory where it resides
|
||||
|
|
@ -51,6 +55,8 @@ func GetBuildContext(srcContext string) (BuildContext, error) {
|
|||
return &AzureBlob{context: srcContext}, nil
|
||||
}
|
||||
return nil, errors.New("url provided for https context is not in a supported format, please use the https url for Azure Blob Storage")
|
||||
case TarBuildContextPrefix:
|
||||
return &Tar{context: context}, nil
|
||||
}
|
||||
return nil, errors.New("unknown build context prefix provided, please use one of the following: gs://, dir://, s3://, git://, https://")
|
||||
return nil, errors.New("unknown build context prefix provided, please use one of the following: gs://, dir://, tar://, s3://, git://, https://")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package buildcontext
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Tar unifies calls to download and unpack the build context.
|
||||
type Tar struct {
|
||||
context string
|
||||
}
|
||||
|
||||
// UnpackTarFromBuildContext unpack the compressed tar file
|
||||
func (t *Tar) UnpackTarFromBuildContext() (string, error) {
|
||||
directory := constants.BuildContextDir
|
||||
if err := os.MkdirAll(directory, 0750); err != nil {
|
||||
return "", errors.Wrap(err, "unpacking tar from build context")
|
||||
}
|
||||
|
||||
return directory, util.UnpackCompressedTar(t.context, directory)
|
||||
}
|
||||
|
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package buildcontext
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
)
|
||||
|
||||
func TestBuildWithLocalTar(t *testing.T) {
|
||||
_, ex, _, _ := runtime.Caller(0)
|
||||
cwd := filepath.Dir(ex)
|
||||
|
||||
testDir := "test_dir"
|
||||
testDirLongPath := filepath.Join(cwd, testDir)
|
||||
dirUnpack := filepath.Join(testDirLongPath, "dir_where_to_unpack")
|
||||
|
||||
if err := os.MkdirAll(dirUnpack, 0750); err != nil {
|
||||
t.Errorf("Failed to create dir_where_to_extract: %v", err)
|
||||
}
|
||||
|
||||
validDockerfile := "Dockerfile_valid"
|
||||
invalidDockerfile := "Dockerfile_invalid"
|
||||
nonExistingDockerfile := "Dockerfile_non_existing"
|
||||
|
||||
files := map[string]string{
|
||||
validDockerfile: "FROM debian:9.11\nRUN echo \"valid\"",
|
||||
invalidDockerfile: "FROM debian:9.11\nRUN echo \"invalid\"",
|
||||
}
|
||||
|
||||
if err := testutil.SetupFiles(testDir, files); err != nil {
|
||||
t.Errorf("Failed to setup files %v on %s: %v", files, testDir, err)
|
||||
}
|
||||
|
||||
if err := os.Chdir(testDir); err != nil {
|
||||
t.Fatalf("Failed to Chdir on %s: %v", testDir, err)
|
||||
}
|
||||
|
||||
validTarPath := fmt.Sprintf("%s.tar.gz", validDockerfile)
|
||||
invalidTarPath := fmt.Sprintf("%s.tar.gz", invalidDockerfile)
|
||||
nonExistingTarPath := fmt.Sprintf("%s.tar.gz", nonExistingDockerfile)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
// Create Tar Gz File with dockerfile inside
|
||||
go func(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
validTarFile, err := os.Create(validTarPath)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create %s: %v", validTarPath, err)
|
||||
}
|
||||
defer validTarFile.Close()
|
||||
|
||||
invalidTarFile, err := os.Create(invalidTarPath)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create %s: %v", invalidTarPath, err)
|
||||
}
|
||||
defer invalidTarFile.Close()
|
||||
|
||||
gw := gzip.NewWriter(validTarFile)
|
||||
defer gw.Close()
|
||||
|
||||
tw := util.NewTar(gw)
|
||||
defer tw.Close()
|
||||
|
||||
if err := tw.AddFileToTar(validDockerfile); err != nil {
|
||||
t.Errorf("Failed to add %s to %s: %v", validDockerfile, validTarPath, err)
|
||||
}
|
||||
}(&wg)
|
||||
|
||||
// Waiting for the Tar Gz file creation to be done before moving on
|
||||
wg.Wait()
|
||||
|
||||
tests := []struct {
|
||||
dockerfile string
|
||||
srcContext string
|
||||
unpackShouldErr bool
|
||||
srcShaShouldErr bool
|
||||
destShaShouldErr bool
|
||||
}{
|
||||
{
|
||||
dockerfile: validDockerfile,
|
||||
srcContext: filepath.Join(testDir, validTarPath),
|
||||
unpackShouldErr: false,
|
||||
srcShaShouldErr: false,
|
||||
destShaShouldErr: false,
|
||||
},
|
||||
{
|
||||
dockerfile: invalidDockerfile,
|
||||
srcContext: filepath.Join(testDir, invalidTarPath),
|
||||
unpackShouldErr: true,
|
||||
srcShaShouldErr: false,
|
||||
destShaShouldErr: true,
|
||||
},
|
||||
{
|
||||
dockerfile: nonExistingDockerfile,
|
||||
srcContext: filepath.Join(testDir, nonExistingTarPath),
|
||||
unpackShouldErr: true,
|
||||
srcShaShouldErr: true,
|
||||
destShaShouldErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.dockerfile, func(t *testing.T) {
|
||||
err := util.UnpackCompressedTar(filepath.Join(cwd, tt.srcContext), dirUnpack)
|
||||
testutil.CheckError(t, tt.unpackShouldErr, err)
|
||||
srcSHA, err := getSHAFromFilePath(tt.dockerfile)
|
||||
testutil.CheckError(t, tt.srcShaShouldErr, err)
|
||||
destSHA, err := getSHAFromFilePath(filepath.Join(dirUnpack, tt.dockerfile))
|
||||
testutil.CheckError(t, tt.destShaShouldErr, err)
|
||||
if err == nil {
|
||||
testutil.CheckDeepEqual(t, srcSHA, destSHA)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(testDirLongPath); err != nil {
|
||||
t.Errorf("Failed to remove %s: %v", testDirLongPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
func getSHAFromFilePath(f string) (string, error) {
|
||||
data, err := ioutil.ReadFile(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sha, err := util.SHA256(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return sha, nil
|
||||
}
|
||||
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
|
||||
|
|
@ -46,6 +47,11 @@ type AddCommand struct {
|
|||
func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
|
||||
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
||||
|
||||
uid, gid, err := util.GetUserGroup(a.cmd.Chown, replacementEnvs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting user group from chown")
|
||||
}
|
||||
|
||||
srcs, dest, err := util.ResolveEnvAndWildcards(a.cmd.SourcesAndDest, a.buildcontext, replacementEnvs)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -65,19 +71,19 @@ func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui
|
|||
return err
|
||||
}
|
||||
logrus.Infof("Adding remote URL %s to %s", src, urlDest)
|
||||
if err := util.DownloadFileToDest(src, urlDest); err != nil {
|
||||
return err
|
||||
if err := util.DownloadFileToDest(src, urlDest, uid, gid); err != nil {
|
||||
return errors.Wrap(err, "downloading remote source file")
|
||||
}
|
||||
a.snapshotFiles = append(a.snapshotFiles, urlDest)
|
||||
} else if util.IsFileLocalTarArchive(fullPath) {
|
||||
tarDest, err := util.DestinationFilepath("", dest, config.WorkingDir)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "determining dest for tar")
|
||||
}
|
||||
logrus.Infof("Unpacking local tar archive %s to %s", src, tarDest)
|
||||
extractedFiles, err := util.UnpackLocalTarArchive(fullPath, tarDest)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "unpacking local tar")
|
||||
}
|
||||
logrus.Debugf("Added %v from local tar archive %s", extractedFiles, src)
|
||||
a.snapshotFiles = append(a.snapshotFiles, extractedFiles...)
|
||||
|
|
@ -93,12 +99,13 @@ func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui
|
|||
copyCmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: append(unresolvedSrcs, dest),
|
||||
Chown: a.cmd.Chown,
|
||||
},
|
||||
buildcontext: a.buildcontext,
|
||||
}
|
||||
|
||||
if err := copyCmd.ExecuteCommand(config, buildArgs); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "executing copy command")
|
||||
}
|
||||
a.snapshotFiles = append(a.snapshotFiles, copyCmd.snapshotFiles...)
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
Copyright 2020 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package commands
|
||||
|
||||
import v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
||||
type Cached interface {
|
||||
Layer() v1.Layer
|
||||
ReadSuccess() bool
|
||||
}
|
||||
|
||||
type caching struct {
|
||||
layer v1.Layer
|
||||
readSuccess bool
|
||||
}
|
||||
|
||||
func (c caching) Layer() v1.Layer {
|
||||
return c.layer
|
||||
}
|
||||
|
||||
func (c caching) ReadSuccess() bool {
|
||||
return c.readSuccess
|
||||
}
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
Copyright 2020 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package commands
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_caching(t *testing.T) {
|
||||
c := caching{layer: fakeLayer{}, readSuccess: true}
|
||||
|
||||
actual := c.Layer().(fakeLayer)
|
||||
expected := fakeLayer{}
|
||||
actualLen, expectedLen := len(actual.TarContent), len(expected.TarContent)
|
||||
if actualLen != expectedLen {
|
||||
t.Errorf("expected layer tar content to be %v but was %v", expectedLen, actualLen)
|
||||
}
|
||||
|
||||
if !c.ReadSuccess() {
|
||||
t.Errorf("expected ReadSuccess to be %v but was %v", true, c.ReadSuccess())
|
||||
}
|
||||
}
|
||||
|
|
@ -20,18 +20,23 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
// for testing
|
||||
var (
|
||||
getUserGroup = util.GetUserGroup
|
||||
)
|
||||
|
||||
type CopyCommand struct {
|
||||
BaseCommand
|
||||
cmd *instructions.CopyCommand
|
||||
|
|
@ -46,50 +51,55 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu
|
|||
}
|
||||
|
||||
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
||||
uid, gid, err := getUserGroup(c.cmd.Chown, replacementEnvs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting user group from chown")
|
||||
}
|
||||
|
||||
srcs, dest, err := util.ResolveEnvAndWildcards(c.cmd.SourcesAndDest, c.buildcontext, replacementEnvs)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "resolving src")
|
||||
}
|
||||
|
||||
// For each source, iterate through and copy it over
|
||||
for _, src := range srcs {
|
||||
fullPath := filepath.Join(c.buildcontext, src)
|
||||
|
||||
fi, err := os.Lstat(fullPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not copy source")
|
||||
}
|
||||
if fi.IsDir() && !strings.HasSuffix(fullPath, string(os.PathSeparator)) {
|
||||
fullPath += "/"
|
||||
}
|
||||
cwd := config.WorkingDir
|
||||
if cwd == "" {
|
||||
cwd = constants.RootDir
|
||||
}
|
||||
destPath, err := util.DestinationFilepath(src, dest, cwd)
|
||||
|
||||
destPath, err := util.DestinationFilepath(fullPath, dest, cwd)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "find destination path")
|
||||
}
|
||||
|
||||
// If the destination dir is a symlink we need to resolve the path and use
|
||||
// that instead of the symlink path
|
||||
destPath, err = resolveIfSymlink(destPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "resolving dest symlink")
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
if !filepath.IsAbs(dest) {
|
||||
// we need to add '/' to the end to indicate the destination is a directory
|
||||
dest = filepath.Join(cwd, dest) + "/"
|
||||
}
|
||||
copiedFiles, err := util.CopyDir(fullPath, dest, c.buildcontext)
|
||||
copiedFiles, err := util.CopyDir(fullPath, destPath, c.buildcontext, uid, gid)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "copying dir")
|
||||
}
|
||||
c.snapshotFiles = append(c.snapshotFiles, copiedFiles...)
|
||||
} else if fi.Mode()&os.ModeSymlink != 0 {
|
||||
// If file is a symlink, we want to create the same relative symlink
|
||||
} else if util.IsSymlink(fi) {
|
||||
// If file is a symlink, we want to copy the target file to destPath
|
||||
exclude, err := util.CopySymlink(fullPath, destPath, c.buildcontext)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "copying symlink")
|
||||
}
|
||||
if exclude {
|
||||
continue
|
||||
|
|
@ -97,9 +107,9 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu
|
|||
c.snapshotFiles = append(c.snapshotFiles, destPath)
|
||||
} else {
|
||||
// ... Else, we want to copy over a file
|
||||
exclude, err := util.CopyFile(fullPath, destPath, c.buildcontext)
|
||||
exclude, err := util.CopyFile(fullPath, destPath, c.buildcontext, uid, gid)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "copying file")
|
||||
}
|
||||
if exclude {
|
||||
continue
|
||||
|
|
@ -153,6 +163,7 @@ func (c *CopyCommand) From() string {
|
|||
|
||||
type CachingCopyCommand struct {
|
||||
BaseCommand
|
||||
caching
|
||||
img v1.Image
|
||||
extractedFiles []string
|
||||
cmd *instructions.CopyCommand
|
||||
|
|
@ -167,9 +178,22 @@ func (cr *CachingCopyCommand) ExecuteCommand(config *v1.Config, buildArgs *docke
|
|||
if cr.img == nil {
|
||||
return errors.New(fmt.Sprintf("cached command image is nil %v", cr.String()))
|
||||
}
|
||||
cr.extractedFiles, err = util.GetFSFromImage(RootDir, cr.img, cr.extractFn)
|
||||
|
||||
logrus.Infof("extractedFiles: %s", cr.extractedFiles)
|
||||
layers, err := cr.img.Layers()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "retrieve image layers")
|
||||
}
|
||||
|
||||
if len(layers) != 1 {
|
||||
return errors.New(fmt.Sprintf("expected %d layers but got %d", 1, len(layers)))
|
||||
}
|
||||
|
||||
cr.layer = layers[0]
|
||||
cr.readSuccess = true
|
||||
|
||||
cr.extractedFiles, err = util.GetFSFromLayers(RootDir, layers, util.ExtractFunc(cr.extractFn), util.IncludeWhiteout())
|
||||
|
||||
logrus.Debugf("extractedFiles: %s", cr.extractedFiles)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "extracting fs from image")
|
||||
}
|
||||
|
|
@ -182,7 +206,10 @@ func (cr *CachingCopyCommand) FilesUsedFromContext(config *v1.Config, buildArgs
|
|||
}
|
||||
|
||||
func (cr *CachingCopyCommand) FilesToSnapshot() []string {
|
||||
return cr.extractedFiles
|
||||
f := cr.extractedFiles
|
||||
logrus.Debugf("files extracted by caching copy command %s", f)
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (cr *CachingCopyCommand) String() string {
|
||||
|
|
@ -197,21 +224,42 @@ func (cr *CachingCopyCommand) From() string {
|
|||
}
|
||||
|
||||
func resolveIfSymlink(destPath string) (string, error) {
|
||||
baseDir := filepath.Dir(destPath)
|
||||
if info, err := os.Lstat(baseDir); err == nil {
|
||||
switch mode := info.Mode(); {
|
||||
case mode&os.ModeSymlink != 0:
|
||||
linkPath, err := os.Readlink(baseDir)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "error reading symlink")
|
||||
}
|
||||
absLinkPath := filepath.Join(filepath.Dir(baseDir), linkPath)
|
||||
newPath := filepath.Join(absLinkPath, filepath.Base(destPath))
|
||||
logrus.Tracef("Updating destination path from %v to %v due to symlink", destPath, newPath)
|
||||
return newPath, nil
|
||||
}
|
||||
if !filepath.IsAbs(destPath) {
|
||||
return "", errors.New("dest path must be abs")
|
||||
}
|
||||
return destPath, nil
|
||||
|
||||
var nonexistentPaths []string
|
||||
|
||||
newPath := destPath
|
||||
for newPath != "/" {
|
||||
_, err := os.Lstat(newPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
dir, file := filepath.Split(newPath)
|
||||
newPath = filepath.Clean(dir)
|
||||
nonexistentPaths = append(nonexistentPaths, file)
|
||||
continue
|
||||
} else {
|
||||
return "", errors.Wrap(err, "failed to lstat")
|
||||
}
|
||||
}
|
||||
|
||||
newPath, err = filepath.EvalSymlinks(newPath)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to eval symlinks")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
for i := len(nonexistentPaths) - 1; i >= 0; i-- {
|
||||
newPath = filepath.Join(newPath, nonexistentPaths[i])
|
||||
}
|
||||
|
||||
if destPath != newPath {
|
||||
logrus.Tracef("Updating destination path from %v to %v due to symlink", destPath, newPath)
|
||||
}
|
||||
|
||||
return filepath.Clean(newPath), nil
|
||||
}
|
||||
|
||||
func copyCmdFilesUsedFromContext(
|
||||
|
|
|
|||
|
|
@ -23,12 +23,14 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -62,6 +64,9 @@ func setupTestTemp() string {
|
|||
}
|
||||
cperr := filepath.Walk(srcPath,
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if path != srcPath {
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -194,14 +199,28 @@ func Test_resolveIfSymlink(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
cases := []testCase{{destPath: thepath, expectedPath: thepath, err: nil}}
|
||||
|
||||
cases := []testCase{
|
||||
{destPath: thepath, expectedPath: thepath, err: nil},
|
||||
{destPath: "/", expectedPath: "/", err: nil},
|
||||
}
|
||||
baseDir = tmpDir
|
||||
symLink := filepath.Join(baseDir, "symlink")
|
||||
if err := os.Symlink(filepath.Base(thepath), symLink); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
cases = append(cases, testCase{filepath.Join(symLink, "foo.txt"), filepath.Join(thepath, "foo.txt"), nil})
|
||||
cases = append(cases,
|
||||
testCase{filepath.Join(symLink, "foo.txt"), filepath.Join(thepath, "foo.txt"), nil},
|
||||
testCase{filepath.Join(symLink, "inner", "foo.txt"), filepath.Join(thepath, "inner", "foo.txt"), nil},
|
||||
)
|
||||
|
||||
absSymlink := filepath.Join(tmpDir, "abs-symlink")
|
||||
if err := os.Symlink(thepath, absSymlink); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
cases = append(cases,
|
||||
testCase{filepath.Join(absSymlink, "foo.txt"), filepath.Join(thepath, "foo.txt"), nil},
|
||||
testCase{filepath.Join(absSymlink, "inner", "foo.txt"), filepath.Join(thepath, "inner", "foo.txt"), nil},
|
||||
)
|
||||
|
||||
for i, c := range cases {
|
||||
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
|
||||
|
|
@ -291,14 +310,14 @@ func Test_CachingCopyCommand_ExecuteCommand(t *testing.T) {
|
|||
c := &CachingCopyCommand{
|
||||
img: fakeImage{},
|
||||
}
|
||||
tc := testCase{
|
||||
desctiption: "with image containing no layers",
|
||||
}
|
||||
c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
tc.command = c
|
||||
return tc
|
||||
return testCase{
|
||||
desctiption: "with image containing no layers",
|
||||
expectErr: true,
|
||||
command: c,
|
||||
}
|
||||
}(),
|
||||
func() testCase {
|
||||
c := &CachingCopyCommand{
|
||||
|
|
@ -361,6 +380,546 @@ func Test_CachingCopyCommand_ExecuteCommand(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
if c.layer == nil && tc.expectLayer {
|
||||
t.Error("expected the command to have a layer set but instead was nil")
|
||||
} else if c.layer != nil && !tc.expectLayer {
|
||||
t.Error("expected the command to have no layer set but instead found a layer")
|
||||
}
|
||||
|
||||
if c.readSuccess != tc.expectLayer {
|
||||
t.Errorf("expected read success to be %v but was %v", tc.expectLayer, c.readSuccess)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyCommand_ExecuteCommand_Extended(t *testing.T) {
|
||||
setupDirs := func(t *testing.T) (string, string) {
|
||||
testDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dir := filepath.Join(testDir, "bar")
|
||||
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
file := filepath.Join(dir, "bam.txt")
|
||||
|
||||
if err := ioutil.WriteFile(file, []byte("meow"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
targetPath := filepath.Join(dir, "dam.txt")
|
||||
if err := ioutil.WriteFile(targetPath, []byte("woof"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Symlink("dam.txt", filepath.Join(dir, "sym.link")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return testDir, filepath.Base(dir)
|
||||
}
|
||||
|
||||
t.Run("copy dir to another dir", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
expected, err := ioutil.ReadDir(filepath.Join(testDir, srcDir))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{srcDir, "dest"},
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err = cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.CheckNoError(t, err)
|
||||
// Check if "dest" dir exists with contents of srcDir
|
||||
actual, err := ioutil.ReadDir(filepath.Join(testDir, "dest"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, f := range actual {
|
||||
testutil.CheckDeepEqual(t, expected[i].Name(), f.Name())
|
||||
testutil.CheckDeepEqual(t, expected[i].Mode(), f.Mode())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("copy file to a dir", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{filepath.Join(srcDir, "bam.txt"), "dest/"},
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err := cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
testutil.CheckNoError(t, err)
|
||||
// Check if "dest" dir exists with file bam.txt
|
||||
files, err := ioutil.ReadDir(filepath.Join(testDir, "dest"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.CheckDeepEqual(t, 1, len(files))
|
||||
testutil.CheckDeepEqual(t, files[0].Name(), "bam.txt")
|
||||
})
|
||||
|
||||
t.Run("copy file to a filepath", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{filepath.Join(srcDir, "bam.txt"), "dest"},
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err := cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
testutil.CheckNoError(t, err)
|
||||
// Check if bam.txt is copied to dest file
|
||||
if _, err := os.Lstat(filepath.Join(testDir, "dest")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
})
|
||||
t.Run("copy file to a dir without trailing /", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
destDir := filepath.Join(testDir, "dest")
|
||||
if err := os.MkdirAll(destDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{filepath.Join(srcDir, "bam.txt"), "dest"},
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err := cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
testutil.CheckNoError(t, err)
|
||||
// Check if "dest" dir exists with file bam.txt
|
||||
files, err := ioutil.ReadDir(filepath.Join(testDir, "dest"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.CheckDeepEqual(t, 1, len(files))
|
||||
testutil.CheckDeepEqual(t, files[0].Name(), "bam.txt")
|
||||
|
||||
})
|
||||
|
||||
t.Run("copy symlink file to a dir", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{filepath.Join(srcDir, "sym.link"), "dest/"},
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err := cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
testutil.CheckNoError(t, err)
|
||||
// Check if "dest" dir exists with link sym.link
|
||||
files, err := ioutil.ReadDir(filepath.Join(testDir, "dest"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// bam.txt and sym.link should be present
|
||||
testutil.CheckDeepEqual(t, 1, len(files))
|
||||
testutil.CheckDeepEqual(t, files[0].Name(), "sym.link")
|
||||
testutil.CheckDeepEqual(t, true, files[0].Mode()&os.ModeSymlink != 0)
|
||||
linkName, err := os.Readlink(filepath.Join(testDir, "dest", "sym.link"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.CheckDeepEqual(t, linkName, "dam.txt")
|
||||
})
|
||||
|
||||
t.Run("copy deadlink symlink file to a dir", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
doesNotExists := filepath.Join(testDir, "dead.txt")
|
||||
if err := ioutil.WriteFile(doesNotExists, []byte("remove me"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Symlink("../dead.txt", filepath.Join(testDir, srcDir, "dead.link")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Remove(doesNotExists); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{filepath.Join(srcDir, "dead.link"), "dest/"},
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err := cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
testutil.CheckNoError(t, err)
|
||||
// Check if "dest" dir exists with link dead.link
|
||||
files, err := ioutil.ReadDir(filepath.Join(testDir, "dest"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.CheckDeepEqual(t, 1, len(files))
|
||||
testutil.CheckDeepEqual(t, files[0].Name(), "dead.link")
|
||||
testutil.CheckDeepEqual(t, true, files[0].Mode()&os.ModeSymlink != 0)
|
||||
linkName, err := os.Readlink(filepath.Join(testDir, "dest", "dead.link"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.CheckDeepEqual(t, linkName, "../dead.txt")
|
||||
})
|
||||
|
||||
t.Run("copy src symlink dir to a dir", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
expected, err := ioutil.ReadDir(filepath.Join(testDir, srcDir))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
another := filepath.Join(testDir, "another")
|
||||
os.Symlink(filepath.Join(testDir, srcDir), another)
|
||||
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{"another", "dest"},
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err = cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
testutil.CheckNoError(t, err)
|
||||
// Check if "dest" dir exists with contents of srcDir
|
||||
actual, err := ioutil.ReadDir(filepath.Join(testDir, "dest"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, f := range actual {
|
||||
testutil.CheckDeepEqual(t, expected[i].Name(), f.Name())
|
||||
testutil.CheckDeepEqual(t, expected[i].Mode(), f.Mode())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("copy dir with a symlink to a file outside of current src dir", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
expected, err := ioutil.ReadDir(filepath.Join(testDir, srcDir))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
anotherSrc := filepath.Join(testDir, "anotherSrc")
|
||||
if err := os.MkdirAll(anotherSrc, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
targetPath := filepath.Join(anotherSrc, "target.txt")
|
||||
if err := ioutil.WriteFile(targetPath, []byte("woof"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.Symlink(targetPath, filepath.Join(testDir, srcDir, "zSym.link")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{srcDir, "dest"},
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err = cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
testutil.CheckNoError(t, err)
|
||||
// Check if "dest" dir exists contents of srcDir and an extra zSym.link created
|
||||
// in this test
|
||||
actual, err := ioutil.ReadDir(filepath.Join(testDir, "dest"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.CheckDeepEqual(t, 4, len(actual))
|
||||
for i, f := range expected {
|
||||
testutil.CheckDeepEqual(t, f.Name(), actual[i].Name())
|
||||
testutil.CheckDeepEqual(t, f.Mode(), actual[i].Mode())
|
||||
}
|
||||
linkName, err := os.Readlink(filepath.Join(testDir, "dest", "zSym.link"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.CheckDeepEqual(t, linkName, targetPath)
|
||||
})
|
||||
|
||||
t.Run("copy src symlink dir to a dir", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
expected, err := ioutil.ReadDir(filepath.Join(testDir, srcDir))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
another := filepath.Join(testDir, "another")
|
||||
os.Symlink(filepath.Join(testDir, srcDir), another)
|
||||
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{"another", "dest"},
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err = cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
testutil.CheckNoError(t, err)
|
||||
// Check if "dest" dir exists with bam.txt and "dest" dir is a symlink
|
||||
actual, err := ioutil.ReadDir(filepath.Join(testDir, "dest"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, f := range actual {
|
||||
testutil.CheckDeepEqual(t, expected[i].Name(), f.Name())
|
||||
testutil.CheckDeepEqual(t, expected[i].Mode(), f.Mode())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("copy src dir to a dest dir which is a symlink", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
expected, err := ioutil.ReadDir(filepath.Join(testDir, srcDir))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
dest := filepath.Join(testDir, "dest")
|
||||
if err := os.MkdirAll(dest, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
linkedDest := filepath.Join(testDir, "linkDest")
|
||||
if err := os.Symlink(dest, linkedDest); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{srcDir, linkedDest},
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err = cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
testutil.CheckNoError(t, err)
|
||||
// Check if "linkdest" dir exists with contents of srcDir
|
||||
actual, err := ioutil.ReadDir(filepath.Join(testDir, "linkDest"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, f := range expected {
|
||||
testutil.CheckDeepEqual(t, f.Name(), actual[i].Name())
|
||||
testutil.CheckDeepEqual(t, f.Mode(), actual[i].Mode())
|
||||
}
|
||||
// Check if linkDest -> dest
|
||||
linkName, err := os.Readlink(filepath.Join(testDir, "linkDest"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.CheckDeepEqual(t, linkName, dest)
|
||||
})
|
||||
|
||||
t.Run("copy src file to a dest dir which is a symlink", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
dest := filepath.Join(testDir, "dest")
|
||||
if err := os.MkdirAll(dest, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
linkedDest := filepath.Join(testDir, "linkDest")
|
||||
if err := os.Symlink(dest, linkedDest); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{fmt.Sprintf("%s/bam.txt", srcDir), linkedDest},
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err := cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
testutil.CheckNoError(t, err)
|
||||
// Check if "linkDest" link is same.
|
||||
actual, err := ioutil.ReadDir(filepath.Join(testDir, "dest"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.CheckDeepEqual(t, "bam.txt", actual[0].Name())
|
||||
c, err := ioutil.ReadFile(filepath.Join(testDir, "dest", "bam.txt"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.CheckDeepEqual(t, "meow", string(c))
|
||||
// Check if linkDest -> dest
|
||||
linkName, err := os.Readlink(filepath.Join(testDir, "linkDest"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testutil.CheckDeepEqual(t, linkName, dest)
|
||||
})
|
||||
|
||||
t.Run("copy src file to a dest dir with chown", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
original := getUserGroup
|
||||
defer func() { getUserGroup = original }()
|
||||
|
||||
uid := os.Getuid()
|
||||
gid := os.Getgid()
|
||||
|
||||
getUserGroup = func(userStr string, _ []string) (int64, int64, error) {
|
||||
return int64(uid), int64(gid), nil
|
||||
}
|
||||
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{fmt.Sprintf("%s/bam.txt", srcDir), testDir},
|
||||
Chown: "alice:group",
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err := cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
testutil.CheckNoError(t, err)
|
||||
|
||||
actual, err := ioutil.ReadDir(filepath.Join(testDir))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testutil.CheckDeepEqual(t, "bam.txt", actual[0].Name())
|
||||
|
||||
if stat, ok := actual[0].Sys().(*syscall.Stat_t); ok {
|
||||
if int(stat.Uid) != uid {
|
||||
t.Errorf("uid don't match, got %d, expected %d", stat.Uid, uid)
|
||||
}
|
||||
if int(stat.Gid) != gid {
|
||||
t.Errorf("gid don't match, got %d, expected %d", stat.Gid, gid)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("copy src file to a dest dir with chown and random user", func(t *testing.T) {
|
||||
testDir, srcDir := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
original := getUserGroup
|
||||
defer func() { getUserGroup = original }()
|
||||
|
||||
getUserGroup = func(userStr string, _ []string) (int64, int64, error) {
|
||||
return 12345, 12345, nil
|
||||
}
|
||||
|
||||
cmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: []string{fmt.Sprintf("%s/bam.txt", srcDir), testDir},
|
||||
Chown: "missing:missing",
|
||||
},
|
||||
buildcontext: testDir,
|
||||
}
|
||||
|
||||
cfg := &v1.Config{
|
||||
Cmd: nil,
|
||||
Env: []string{},
|
||||
WorkingDir: testDir,
|
||||
}
|
||||
|
||||
err := cmd.ExecuteCommand(cfg, dockerfile.NewBuildArgs([]string{}))
|
||||
if !errors.Is(err, os.ErrPermission) {
|
||||
testutil.CheckNoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
|
|
@ -41,7 +40,8 @@ type RunCommand struct {
|
|||
|
||||
// for testing
|
||||
var (
|
||||
userLookup = user.Lookup
|
||||
userLookup = user.Lookup
|
||||
userLookupID = user.LookupId
|
||||
)
|
||||
|
||||
func (r *RunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
|
||||
|
|
@ -68,40 +68,31 @@ func (r *RunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui
|
|||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
||||
cmd.Env = addDefaultHOME(config.User, replacementEnvs)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
u := config.User
|
||||
userAndGroup := strings.Split(u, ":")
|
||||
userStr, err := util.ResolveEnvironmentReplacement(userAndGroup[0], replacementEnvs, false)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "resolving user %s", userAndGroup[0])
|
||||
}
|
||||
|
||||
// If specified, run the command as a specific user
|
||||
if config.User != "" {
|
||||
userAndGroup := strings.Split(config.User, ":")
|
||||
userStr := userAndGroup[0]
|
||||
var groupStr string
|
||||
if len(userAndGroup) > 1 {
|
||||
groupStr = userAndGroup[1]
|
||||
}
|
||||
|
||||
uidStr, gidStr, err := util.GetUserFromUsername(userStr, groupStr)
|
||||
if userStr != "" {
|
||||
uid, gid, err := util.GetUIDAndGIDFromString(userStr, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// uid and gid need to be uint32
|
||||
uid64, err := strconv.ParseUint(uidStr, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uid := uint32(uid64)
|
||||
var gid uint32
|
||||
if gidStr != "" {
|
||||
gid64, err := strconv.ParseUint(gidStr, 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gid = uint32(gid64)
|
||||
}
|
||||
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uid, Gid: gid}
|
||||
}
|
||||
|
||||
env, err := addDefaultHOME(userStr, replacementEnvs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "adding default HOME variable")
|
||||
}
|
||||
|
||||
cmd.Env = env
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return errors.Wrap(err, "starting command")
|
||||
}
|
||||
|
|
@ -123,32 +114,31 @@ func (r *RunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui
|
|||
}
|
||||
|
||||
// addDefaultHOME adds the default value for HOME if it isn't already set
|
||||
func addDefaultHOME(u string, envs []string) []string {
|
||||
func addDefaultHOME(u string, envs []string) ([]string, error) {
|
||||
for _, env := range envs {
|
||||
split := strings.SplitN(env, "=", 2)
|
||||
if split[0] == constants.HOME {
|
||||
return envs
|
||||
return envs, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If user isn't set, set default value of HOME
|
||||
if u == "" || u == constants.RootUser {
|
||||
return append(envs, fmt.Sprintf("%s=%s", constants.HOME, constants.DefaultHOMEValue))
|
||||
return append(envs, fmt.Sprintf("%s=%s", constants.HOME, constants.DefaultHOMEValue)), nil
|
||||
}
|
||||
|
||||
// If user is set to username, set value of HOME to /home/${user}
|
||||
// Otherwise the user is set to uid and HOME is /
|
||||
home := "/"
|
||||
userObj, err := userLookup(u)
|
||||
if err == nil {
|
||||
if userObj.HomeDir != "" {
|
||||
home = userObj.HomeDir
|
||||
if err != nil {
|
||||
if uo, e := userLookupID(u); e == nil {
|
||||
userObj = uo
|
||||
} else {
|
||||
home = fmt.Sprintf("/home/%s", userObj.Username)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return append(envs, fmt.Sprintf("%s=%s", constants.HOME, home))
|
||||
return append(envs, fmt.Sprintf("%s=%s", constants.HOME, userObj.HomeDir)), nil
|
||||
}
|
||||
|
||||
// String returns some information about the command for the image config
|
||||
|
|
@ -184,6 +174,7 @@ func (r *RunCommand) ShouldCacheOutput() bool {
|
|||
|
||||
type CachingRunCommand struct {
|
||||
BaseCommand
|
||||
caching
|
||||
img v1.Image
|
||||
extractedFiles []string
|
||||
cmd *instructions.RunCommand
|
||||
|
|
@ -197,7 +188,25 @@ func (cr *CachingRunCommand) ExecuteCommand(config *v1.Config, buildArgs *docker
|
|||
if cr.img == nil {
|
||||
return errors.New(fmt.Sprintf("command image is nil %v", cr.String()))
|
||||
}
|
||||
cr.extractedFiles, err = util.GetFSFromImage(constants.RootDir, cr.img, cr.extractFn)
|
||||
|
||||
layers, err := cr.img.Layers()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "retrieving image layers")
|
||||
}
|
||||
|
||||
if len(layers) != 1 {
|
||||
return errors.New(fmt.Sprintf("expected %d layers but got %d", 1, len(layers)))
|
||||
}
|
||||
|
||||
cr.layer = layers[0]
|
||||
cr.readSuccess = true
|
||||
|
||||
cr.extractedFiles, err = util.GetFSFromLayers(
|
||||
constants.RootDir,
|
||||
layers,
|
||||
util.ExtractFunc(cr.extractFn),
|
||||
util.IncludeWhiteout(),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "extracting fs from image")
|
||||
}
|
||||
|
|
@ -206,7 +215,10 @@ func (cr *CachingRunCommand) ExecuteCommand(config *v1.Config, buildArgs *docker
|
|||
}
|
||||
|
||||
func (cr *CachingRunCommand) FilesToSnapshot() []string {
|
||||
return cr.extractedFiles
|
||||
f := cr.extractedFiles
|
||||
logrus.Debugf("files extracted from caching run command %s", f)
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func (cr *CachingRunCommand) String() string {
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package commands
|
|||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
|
@ -33,11 +34,13 @@ import (
|
|||
|
||||
func Test_addDefaultHOME(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
user string
|
||||
mockUser *user.User
|
||||
initial []string
|
||||
expected []string
|
||||
name string
|
||||
user string
|
||||
mockUser *user.User
|
||||
lookupError error
|
||||
mockUserID *user.User
|
||||
initial []string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "HOME already set",
|
||||
|
|
@ -78,31 +81,19 @@ func Test_addDefaultHOME(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "HOME not set and user set",
|
||||
user: "www-add",
|
||||
mockUser: &user.User{
|
||||
Username: "www-add",
|
||||
name: "USER is set using the UID",
|
||||
user: "newuser",
|
||||
lookupError: errors.New("User not found"),
|
||||
mockUserID: &user.User{
|
||||
Username: "user",
|
||||
HomeDir: "/home/user",
|
||||
},
|
||||
initial: []string{
|
||||
"PATH=/something/else",
|
||||
},
|
||||
expected: []string{
|
||||
"PATH=/something/else",
|
||||
"HOME=/home/www-add",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "HOME not set and user is set",
|
||||
user: "newuser",
|
||||
mockUser: &user.User{
|
||||
Username: "newuser",
|
||||
},
|
||||
initial: []string{
|
||||
"PATH=/something/else",
|
||||
},
|
||||
expected: []string{
|
||||
"PATH=/something/else",
|
||||
"HOME=/home/newuser",
|
||||
"HOME=/home/user",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
|
@ -122,10 +113,14 @@ func Test_addDefaultHOME(t *testing.T) {
|
|||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
userLookup = func(username string) (*user.User, error) { return test.mockUser, nil }
|
||||
defer func() { userLookup = user.Lookup }()
|
||||
actual := addDefaultHOME(test.user, test.initial)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, nil, test.expected, actual)
|
||||
userLookup = func(username string) (*user.User, error) { return test.mockUser, test.lookupError }
|
||||
userLookupID = func(username string) (*user.User, error) { return test.mockUserID, nil }
|
||||
defer func() {
|
||||
userLookup = user.Lookup
|
||||
userLookupID = user.LookupId
|
||||
}()
|
||||
actual, err := addDefaultHOME(test.user, test.initial)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, err, test.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -238,14 +233,16 @@ func Test_CachingRunCommand_ExecuteCommand(t *testing.T) {
|
|||
c := &CachingRunCommand{
|
||||
img: fakeImage{},
|
||||
}
|
||||
tc := testCase{
|
||||
desctiption: "with image containing no layers",
|
||||
}
|
||||
|
||||
c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
tc.command = c
|
||||
return tc
|
||||
|
||||
return testCase{
|
||||
desctiption: "with image containing no layers",
|
||||
expectErr: true,
|
||||
command: c,
|
||||
}
|
||||
}(),
|
||||
func() testCase {
|
||||
c := &CachingRunCommand{
|
||||
|
|
@ -310,6 +307,16 @@ func Test_CachingRunCommand_ExecuteCommand(t *testing.T) {
|
|||
t.Errorf("expected files used from context to be empty but was not")
|
||||
}
|
||||
}
|
||||
|
||||
if c.layer == nil && tc.expectLayer {
|
||||
t.Error("expected the command to have a layer set but instead was nil")
|
||||
} else if c.layer != nil && !tc.expectLayer {
|
||||
t.Error("expected the command to have no layer set but instead found a layer")
|
||||
}
|
||||
|
||||
if c.readSuccess != tc.expectLayer {
|
||||
t.Errorf("expected read success to be %v but was %v", tc.expectLayer, c.readSuccess)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,15 +17,22 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// for testing
|
||||
var (
|
||||
Lookup = util.Lookup
|
||||
)
|
||||
|
||||
type UserCommand struct {
|
||||
BaseCommand
|
||||
cmd *instructions.UserCommand
|
||||
|
|
@ -38,19 +45,17 @@ func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu
|
|||
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
||||
userStr, err := util.ResolveEnvironmentReplacement(userAndGroup[0], replacementEnvs, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var groupStr string
|
||||
if len(userAndGroup) > 1 {
|
||||
groupStr, err = util.ResolveEnvironmentReplacement(userAndGroup[1], replacementEnvs, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return errors.Wrap(err, fmt.Sprintf("resolving user %s", userAndGroup[0]))
|
||||
}
|
||||
|
||||
if groupStr != "" {
|
||||
if len(userAndGroup) > 1 {
|
||||
groupStr, err := util.ResolveEnvironmentReplacement(userAndGroup[1], replacementEnvs, false)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("resolving group %s", userAndGroup[1]))
|
||||
}
|
||||
userStr = userStr + ":" + groupStr
|
||||
}
|
||||
|
||||
config.User = userStr
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,9 +16,12 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/user"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
|
@ -27,47 +30,69 @@ import (
|
|||
|
||||
var userTests = []struct {
|
||||
user string
|
||||
userObj *user.User
|
||||
expectedUID string
|
||||
expectedGID string
|
||||
}{
|
||||
{
|
||||
user: "root",
|
||||
userObj: &user.User{Uid: "root", Gid: "root"},
|
||||
expectedUID: "root",
|
||||
},
|
||||
{
|
||||
user: "root-add",
|
||||
userObj: &user.User{Uid: "root-add", Gid: "root"},
|
||||
expectedUID: "root-add",
|
||||
},
|
||||
{
|
||||
user: "0",
|
||||
userObj: &user.User{Uid: "0", Gid: "0"},
|
||||
expectedUID: "0",
|
||||
},
|
||||
{
|
||||
user: "fakeUser",
|
||||
userObj: &user.User{Uid: "fakeUser", Gid: "fakeUser"},
|
||||
expectedUID: "fakeUser",
|
||||
},
|
||||
{
|
||||
user: "root:root",
|
||||
expectedUID: "root:root",
|
||||
},
|
||||
{
|
||||
user: "0:root",
|
||||
expectedUID: "0:root",
|
||||
},
|
||||
{
|
||||
user: "root:0",
|
||||
expectedUID: "root:0",
|
||||
},
|
||||
{
|
||||
user: "0:0",
|
||||
expectedUID: "0:0",
|
||||
},
|
||||
{
|
||||
user: "$envuser",
|
||||
user: "root",
|
||||
userObj: &user.User{Uid: "root", Gid: "some"},
|
||||
expectedUID: "root",
|
||||
},
|
||||
{
|
||||
user: "root:$envgroup",
|
||||
expectedUID: "root:root",
|
||||
user: "0",
|
||||
userObj: &user.User{Uid: "0"},
|
||||
expectedUID: "0",
|
||||
},
|
||||
{
|
||||
user: "root",
|
||||
userObj: &user.User{Uid: "root"},
|
||||
expectedUID: "root",
|
||||
expectedGID: "f0",
|
||||
},
|
||||
{
|
||||
user: "0",
|
||||
userObj: &user.User{Uid: "0"},
|
||||
expectedUID: "0",
|
||||
},
|
||||
{
|
||||
user: "$envuser",
|
||||
userObj: &user.User{Uid: "root", Gid: "root"},
|
||||
expectedUID: "root",
|
||||
},
|
||||
{
|
||||
user: "root",
|
||||
userObj: &user.User{Uid: "root"},
|
||||
expectedUID: "root",
|
||||
},
|
||||
{
|
||||
user: "some",
|
||||
userObj: &user.User{Uid: "some"},
|
||||
expectedUID: "some",
|
||||
},
|
||||
{
|
||||
user: "some",
|
||||
expectedUID: "some",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -76,7 +101,7 @@ func TestUpdateUser(t *testing.T) {
|
|||
cfg := &v1.Config{
|
||||
Env: []string{
|
||||
"envuser=root",
|
||||
"envgroup=root",
|
||||
"envgroup=grp",
|
||||
},
|
||||
}
|
||||
cmd := UserCommand{
|
||||
|
|
@ -84,6 +109,13 @@ func TestUpdateUser(t *testing.T) {
|
|||
User: test.user,
|
||||
},
|
||||
}
|
||||
Lookup = func(_ string) (*user.User, error) {
|
||||
if test.userObj != nil {
|
||||
return test.userObj, nil
|
||||
}
|
||||
return nil, fmt.Errorf("error while looking up user")
|
||||
}
|
||||
defer func() { Lookup = util.Lookup }()
|
||||
buildArgs := dockerfile.NewBuildArgs([]string{})
|
||||
err := cmd.ExecuteCommand(cfg, buildArgs)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedUID, cfg.User)
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -39,6 +40,7 @@ func (b *multiArg) Set(value string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// The third is Type() string
|
||||
func (b *multiArg) Type() string {
|
||||
return "multi-arg type"
|
||||
}
|
||||
|
|
@ -51,3 +53,32 @@ func (b *multiArg) Contains(v string) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// This type is used to supported passing in multiple key=value flags
|
||||
type keyValueArg map[string]string
|
||||
|
||||
// Now, for our new type, implement the two methods of
|
||||
// the flag.Value interface...
|
||||
// The first method is String() string
|
||||
func (a *keyValueArg) String() string {
|
||||
var result []string
|
||||
for key := range *a {
|
||||
result = append(result, fmt.Sprintf("%s=%s", key, (*a)[key]))
|
||||
}
|
||||
return strings.Join(result, ",")
|
||||
}
|
||||
|
||||
// The second method is Set(value string) error
|
||||
func (a *keyValueArg) Set(value string) error {
|
||||
valueSplit := strings.SplitN(value, "=", 2)
|
||||
if len(valueSplit) < 2 {
|
||||
return fmt.Errorf("invalid argument value. expect key=value, got %s", value)
|
||||
}
|
||||
(*a)[valueSplit[0]] = valueSplit[1]
|
||||
return nil
|
||||
}
|
||||
|
||||
// The third is Type() string
|
||||
func (a *keyValueArg) Type() string {
|
||||
return "key-value-arg type"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2020 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestMultiArg_Set_shouldAppendValue(t *testing.T) {
|
||||
var arg multiArg
|
||||
arg.Set("value1")
|
||||
if len(arg) != 1 || arg[0] != "value1" {
|
||||
t.Error("Fist value was not appended")
|
||||
}
|
||||
arg.Set("value2")
|
||||
if len(arg) != 2 || arg[1] != "value2" {
|
||||
t.Error("Second value was not appended")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_KeyValueArg_Set_shouldSplitArgument(t *testing.T) {
|
||||
arg := make(keyValueArg)
|
||||
arg.Set("key=value")
|
||||
if arg["key"] != "value" {
|
||||
t.Error("Invalid split. key=value should be split to key=>value")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_KeyValueArg_Set_shouldAcceptEqualAsValue(t *testing.T) {
|
||||
arg := make(keyValueArg)
|
||||
arg.Set("key=value=something")
|
||||
if arg["key"] != "value=something" {
|
||||
t.Error("Invalid split. key=value=something should be split to key=>value=something")
|
||||
}
|
||||
}
|
||||
|
|
@ -39,8 +39,13 @@ type KanikoOptions struct {
|
|||
DigestFile string
|
||||
ImageNameDigestFile string
|
||||
OCILayoutPath string
|
||||
RegistryMirror string
|
||||
Destinations multiArg
|
||||
BuildArgs multiArg
|
||||
InsecureRegistries multiArg
|
||||
Labels multiArg
|
||||
SkipTLSVerifyRegistries multiArg
|
||||
RegistriesCertificates keyValueArg
|
||||
Insecure bool
|
||||
SkipTLSVerify bool
|
||||
InsecurePull bool
|
||||
|
|
@ -50,8 +55,7 @@ type KanikoOptions struct {
|
|||
NoPush bool
|
||||
Cache bool
|
||||
Cleanup bool
|
||||
InsecureRegistries multiArg
|
||||
SkipTLSVerifyRegistries multiArg
|
||||
WhitelistVarRun bool
|
||||
}
|
||||
|
||||
// WarmerOptions are options that are set by command line arguments to the cache warmer.
|
||||
|
|
|
|||
|
|
@ -17,9 +17,6 @@ limitations under the License.
|
|||
package constants
|
||||
|
||||
const (
|
||||
// DefaultLogLevel is the default log level
|
||||
DefaultLogLevel = "info"
|
||||
|
||||
// RootDir is the path to the root directory
|
||||
RootDir = "/"
|
||||
|
||||
|
|
|
|||
|
|
@ -90,14 +90,17 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) {
|
|||
// baseImageIndex returns the index of the stage the current stage is built off
|
||||
// returns -1 if the current stage isn't built off a previous stage
|
||||
func baseImageIndex(currentStage int, stages []instructions.Stage) int {
|
||||
currentStageBaseName := strings.ToLower(stages[currentStage].BaseName)
|
||||
|
||||
for i, stage := range stages {
|
||||
if i > currentStage {
|
||||
break
|
||||
}
|
||||
if stage.Name == stages[currentStage].BaseName {
|
||||
if stage.Name == currentStageBaseName {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
|
|
@ -245,15 +248,19 @@ func ParseCommands(cmdArray []string) ([]instructions.Command, error) {
|
|||
|
||||
// SaveStage returns true if the current stage will be needed later in the Dockerfile
|
||||
func saveStage(index int, stages []instructions.Stage) bool {
|
||||
currentStageName := stages[index].Name
|
||||
|
||||
for stageIndex, stage := range stages {
|
||||
if stageIndex <= index {
|
||||
continue
|
||||
}
|
||||
if stage.BaseName == stages[index].Name {
|
||||
|
||||
if strings.ToLower(stage.BaseName) == currentStageName {
|
||||
if stage.BaseName != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,11 +20,11 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
otiai10Cpy "github.com/otiai10/copy"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
|
|
@ -74,7 +74,7 @@ type stageBuilder struct {
|
|||
stageIdxToDigest map[string]string
|
||||
snapshotter snapShotter
|
||||
layerCache cache.LayerCache
|
||||
pushCache cachePusher
|
||||
pushLayerToCache cachePusher
|
||||
}
|
||||
|
||||
// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
|
||||
|
|
@ -84,7 +84,7 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, cross
|
|||
return nil, err
|
||||
}
|
||||
|
||||
imageConfig, err := initializeConfig(sourceImage)
|
||||
imageConfig, err := initializeConfig(sourceImage, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -117,7 +117,7 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, cross
|
|||
layerCache: &cache.RegistryCache{
|
||||
Opts: opts,
|
||||
},
|
||||
pushCache: pushLayerToCache,
|
||||
pushLayerToCache: pushLayerToCache,
|
||||
}
|
||||
|
||||
for _, cmd := range s.stage.Commands {
|
||||
|
|
@ -136,7 +136,7 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, cross
|
|||
return s, nil
|
||||
}
|
||||
|
||||
func initializeConfig(img partial.WithConfigFile) (*v1.ConfigFile, error) {
|
||||
func initializeConfig(img partial.WithConfigFile, opts *config.KanikoOptions) (*v1.ConfigFile, error) {
|
||||
imageConfig, err := img.ConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -145,12 +145,37 @@ func initializeConfig(img partial.WithConfigFile) (*v1.ConfigFile, error) {
|
|||
if imageConfig.Config.Env == nil {
|
||||
imageConfig.Config.Env = constants.ScratchEnvVars
|
||||
}
|
||||
|
||||
if opts == nil {
|
||||
return imageConfig, nil
|
||||
}
|
||||
|
||||
if l := len(opts.Labels); l > 0 {
|
||||
if imageConfig.Config.Labels == nil {
|
||||
imageConfig.Config.Labels = make(map[string]string)
|
||||
}
|
||||
for _, label := range opts.Labels {
|
||||
parts := strings.SplitN(label, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("labels must be of the form key=value, got %s", label)
|
||||
}
|
||||
|
||||
imageConfig.Config.Labels[parts[0]] = parts[1]
|
||||
}
|
||||
}
|
||||
|
||||
return imageConfig, nil
|
||||
}
|
||||
|
||||
func (s *stageBuilder) populateCompositeKey(command fmt.Stringer, files []string, compositeKey CompositeCache) (CompositeCache, error) {
|
||||
func (s *stageBuilder) populateCompositeKey(command fmt.Stringer, files []string, compositeKey CompositeCache, args *dockerfile.BuildArgs, env []string) (CompositeCache, error) {
|
||||
// First replace all the environment variables or args in the command
|
||||
replacementEnvs := args.ReplacementEnvs(env)
|
||||
resolvedCmd, err := util.ResolveEnvironmentReplacement(command.String(), replacementEnvs, false)
|
||||
if err != nil {
|
||||
return compositeKey, err
|
||||
}
|
||||
// Add the next command to the cache key.
|
||||
compositeKey.AddKey(command.String())
|
||||
compositeKey.AddKey(resolvedCmd)
|
||||
switch v := command.(type) {
|
||||
case *commands.CopyCommand:
|
||||
compositeKey = s.populateCopyCmdCompositeKey(command, v.From(), compositeKey)
|
||||
|
|
@ -158,8 +183,10 @@ func (s *stageBuilder) populateCompositeKey(command fmt.Stringer, files []string
|
|||
compositeKey = s.populateCopyCmdCompositeKey(command, v.From(), compositeKey)
|
||||
}
|
||||
|
||||
srcCtx := s.opts.SrcContext
|
||||
|
||||
for _, f := range files {
|
||||
if err := compositeKey.AddPath(f); err != nil {
|
||||
if err := compositeKey.AddPath(f, srcCtx); err != nil {
|
||||
return compositeKey, err
|
||||
}
|
||||
}
|
||||
|
|
@ -201,7 +228,7 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro
|
|||
return errors.Wrap(err, "failed to get files used from context")
|
||||
}
|
||||
|
||||
compositeKey, err = s.populateCompositeKey(command, files, compositeKey)
|
||||
compositeKey, err = s.populateCompositeKey(command, files, compositeKey, s.args, cfg.Env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -251,8 +278,6 @@ func (s *stageBuilder) build() error {
|
|||
compositeKey = NewCompositeCache(s.baseImageDigest)
|
||||
}
|
||||
|
||||
compositeKey.AddKey(s.opts.BuildArgs...)
|
||||
|
||||
// Apply optimizations to the instructions.
|
||||
if err := s.optimize(*compositeKey, s.cf.Config); err != nil {
|
||||
return errors.Wrap(err, "failed to optimize instructions")
|
||||
|
|
@ -309,7 +334,7 @@ func (s *stageBuilder) build() error {
|
|||
return errors.Wrap(err, "failed to get files used from context")
|
||||
}
|
||||
|
||||
*compositeKey, err = s.populateCompositeKey(command, files, *compositeKey)
|
||||
*compositeKey, err = s.populateCompositeKey(command, files, *compositeKey, s.args, s.cf.Config.Env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -326,29 +351,47 @@ func (s *stageBuilder) build() error {
|
|||
continue
|
||||
}
|
||||
|
||||
tarPath, err := s.takeSnapshot(files)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to take snapshot")
|
||||
fn := func() bool {
|
||||
switch v := command.(type) {
|
||||
case commands.Cached:
|
||||
return v.ReadSuccess()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("build: composite key for command %v %v", command.String(), compositeKey)
|
||||
ck, err := compositeKey.Hash()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to hash composite key")
|
||||
}
|
||||
if fn() {
|
||||
v := command.(commands.Cached)
|
||||
layer := v.Layer()
|
||||
if err := s.saveLayerToImage(layer, command.String()); err != nil {
|
||||
return errors.Wrap(err, "failed to save layer")
|
||||
}
|
||||
} else {
|
||||
tarPath, err := s.takeSnapshot(files)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to take snapshot")
|
||||
}
|
||||
|
||||
logrus.Debugf("build: cache key for command %v %v", command.String(), ck)
|
||||
logrus.Debugf("build: composite key for command %v %v", command.String(), compositeKey)
|
||||
ck, err := compositeKey.Hash()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to hash composite key")
|
||||
}
|
||||
|
||||
// Push layer to cache (in parallel) now along with new config file
|
||||
if s.opts.Cache && command.ShouldCacheOutput() {
|
||||
cacheGroup.Go(func() error {
|
||||
return s.pushCache(s.opts, ck, tarPath, command.String())
|
||||
})
|
||||
}
|
||||
if err := s.saveSnapshotToImage(command.String(), tarPath); err != nil {
|
||||
return errors.Wrap(err, "failed to save snapshot to image")
|
||||
logrus.Debugf("build: cache key for command %v %v", command.String(), ck)
|
||||
|
||||
// Push layer to cache (in parallel) now along with new config file
|
||||
if s.opts.Cache && command.ShouldCacheOutput() {
|
||||
cacheGroup.Go(func() error {
|
||||
return s.pushLayerToCache(s.opts, ck, tarPath, command.String())
|
||||
})
|
||||
}
|
||||
if err := s.saveSnapshotToImage(command.String(), tarPath); err != nil {
|
||||
return errors.Wrap(err, "failed to save snapshot to image")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := cacheGroup.Wait(); err != nil {
|
||||
logrus.Warnf("error uploading layer to cache: %s", err)
|
||||
}
|
||||
|
|
@ -398,22 +441,40 @@ func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool {
|
|||
}
|
||||
|
||||
func (s *stageBuilder) saveSnapshotToImage(createdBy string, tarPath string) error {
|
||||
if tarPath == "" {
|
||||
layer, err := s.saveSnapshotToLayer(tarPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if layer == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.saveLayerToImage(layer, createdBy)
|
||||
}
|
||||
|
||||
func (s *stageBuilder) saveSnapshotToLayer(tarPath string) (v1.Layer, error) {
|
||||
if tarPath == "" {
|
||||
return nil, nil
|
||||
}
|
||||
fi, err := os.Stat(tarPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "tar file path does not exist")
|
||||
return nil, errors.Wrap(err, "tar file path does not exist")
|
||||
}
|
||||
if fi.Size() <= emptyTarSize {
|
||||
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
layer, err := tarball.LayerFromFile(tarPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return layer, nil
|
||||
}
|
||||
func (s *stageBuilder) saveLayerToImage(layer v1.Layer, createdBy string) error {
|
||||
var err error
|
||||
s.image, err = mutate.Append(s.image,
|
||||
mutate.Addendum{
|
||||
Layer: layer,
|
||||
|
|
@ -448,7 +509,7 @@ func CalculateDependencies(opts *config.KanikoOptions) (map[int][]string, error)
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
cfg, err := initializeConfig(image)
|
||||
cfg, err := initializeConfig(image, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -529,6 +590,17 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
configFile, err := sourceImage.ConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configFile.OS = runtime.GOOS
|
||||
configFile.Architecture = runtime.GOARCH
|
||||
sourceImage, err = mutate.ConfigFile(sourceImage, configFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, err := sourceImage.Digest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -574,8 +646,10 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
return nil, err
|
||||
}
|
||||
for _, p := range filesToSave {
|
||||
logrus.Infof("Saving file %s for later use.", p)
|
||||
otiai10Cpy.Copy(p, filepath.Join(dstDir, p))
|
||||
logrus.Infof("Saving file %s for later use", p)
|
||||
if err := util.CopyFileOrSymlink(p, dstDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the filesystem
|
||||
|
|
@ -587,16 +661,23 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// fileToSave returns all the files matching the given pattern in deps.
|
||||
// If a file is a symlink, it also returns the target file.
|
||||
func filesToSave(deps []string) ([]string, error) {
|
||||
allFiles := []string{}
|
||||
srcFiles := []string{}
|
||||
for _, src := range deps {
|
||||
srcs, err := filepath.Glob(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allFiles = append(allFiles, srcs...)
|
||||
for _, f := range srcs {
|
||||
if link, err := util.EvalSymLink(f); err == nil {
|
||||
srcFiles = append(srcFiles, link)
|
||||
}
|
||||
srcFiles = append(srcFiles, f)
|
||||
}
|
||||
}
|
||||
return allFiles, nil
|
||||
return srcFiles, nil
|
||||
}
|
||||
|
||||
func fetchExtraStages(stages []config.KanikoStage, opts *config.KanikoOptions) error {
|
||||
|
|
@ -684,7 +765,7 @@ func getHasher(snapshotMode string) (func(string) (string, error), error) {
|
|||
}
|
||||
|
||||
func resolveOnBuild(stage *config.KanikoStage, config *v1.Config) error {
|
||||
if config.OnBuild == nil {
|
||||
if config.OnBuild == nil || len(config.OnBuild) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Otherwise, parse into commands
|
||||
|
|
|
|||
|
|
@ -462,7 +462,7 @@ func TestInitializeConfig(t *testing.T) {
|
|||
t.Errorf("error seen when running test %s", err)
|
||||
t.Fail()
|
||||
}
|
||||
actual, _ := initializeConfig(img)
|
||||
actual, _ := initializeConfig(img, nil)
|
||||
testutil.CheckDeepEqual(t, tt.expected, actual.Config)
|
||||
}
|
||||
}
|
||||
|
|
@ -497,7 +497,8 @@ func Test_stageBuilder_optimize(t *testing.T) {
|
|||
cf := &v1.ConfigFile{}
|
||||
snap := fakeSnapShotter{}
|
||||
lc := &fakeLayerCache{retrieve: tc.retrieve}
|
||||
sb := &stageBuilder{opts: tc.opts, cf: cf, snapshotter: snap, layerCache: lc}
|
||||
sb := &stageBuilder{opts: tc.opts, cf: cf, snapshotter: snap, layerCache: lc,
|
||||
args: dockerfile.NewBuildArgs([]string{})}
|
||||
ck := CompositeCache{}
|
||||
file, err := ioutil.TempFile("", "foo")
|
||||
if err != nil {
|
||||
|
|
@ -517,10 +518,135 @@ func Test_stageBuilder_optimize(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type stageContext struct {
|
||||
command fmt.Stringer
|
||||
args *dockerfile.BuildArgs
|
||||
env []string
|
||||
}
|
||||
|
||||
func newStageContext(command string, args map[string]string, env []string) stageContext {
|
||||
dockerArgs := dockerfile.NewBuildArgs([]string{})
|
||||
for k, v := range args {
|
||||
dockerArgs.AddArg(k, &v)
|
||||
}
|
||||
return stageContext{MockDockerCommand{command: command}, dockerArgs, env}
|
||||
}
|
||||
|
||||
func Test_stageBuilder_populateCompositeKey(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
cmd1 stageContext
|
||||
cmd2 stageContext
|
||||
shdEqual bool
|
||||
}{
|
||||
{
|
||||
description: "cache key for same command, different buildargs, args not used in command",
|
||||
cmd1: newStageContext(
|
||||
"RUN echo const > test",
|
||||
map[string]string{"ARG": "foo"},
|
||||
[]string{"ENV=foo1"},
|
||||
),
|
||||
cmd2: newStageContext(
|
||||
"RUN echo const > test",
|
||||
map[string]string{"ARG": "bar"},
|
||||
[]string{"ENV=bar1"},
|
||||
),
|
||||
shdEqual: true,
|
||||
},
|
||||
{
|
||||
description: "cache key for same command with same build args",
|
||||
cmd1: newStageContext(
|
||||
"RUN echo $ARG > test",
|
||||
map[string]string{"ARG": "foo"},
|
||||
[]string{},
|
||||
),
|
||||
cmd2: newStageContext(
|
||||
"RUN echo $ARG > test",
|
||||
map[string]string{"ARG": "foo"},
|
||||
[]string{},
|
||||
),
|
||||
shdEqual: true,
|
||||
},
|
||||
{
|
||||
description: "cache key for same command with same env",
|
||||
cmd1: newStageContext(
|
||||
"RUN echo $ENV > test",
|
||||
map[string]string{"ARG": "foo"},
|
||||
[]string{"ENV=same"},
|
||||
),
|
||||
cmd2: newStageContext(
|
||||
"RUN echo $ENV > test",
|
||||
map[string]string{"ARG": "bar"},
|
||||
[]string{"ENV=same"},
|
||||
),
|
||||
shdEqual: true,
|
||||
},
|
||||
{
|
||||
description: "cache key for same command with a build arg values",
|
||||
cmd1: newStageContext(
|
||||
"RUN echo $ARG > test",
|
||||
map[string]string{"ARG": "foo"},
|
||||
[]string{},
|
||||
),
|
||||
cmd2: newStageContext(
|
||||
"RUN echo $ARG > test",
|
||||
map[string]string{"ARG": "bar"},
|
||||
[]string{},
|
||||
),
|
||||
},
|
||||
{
|
||||
description: "cache key for same command with different env values",
|
||||
cmd1: newStageContext(
|
||||
"RUN echo $ENV > test",
|
||||
map[string]string{"ARG": "foo"},
|
||||
[]string{"ENV=1"},
|
||||
),
|
||||
cmd2: newStageContext(
|
||||
"RUN echo $ENV > test",
|
||||
map[string]string{"ARG": "foo"},
|
||||
[]string{"ENV=2"},
|
||||
),
|
||||
},
|
||||
{
|
||||
description: "cache key for different command same context",
|
||||
cmd1: newStageContext(
|
||||
"RUN echo other > test",
|
||||
map[string]string{"ARG": "foo"},
|
||||
[]string{"ENV=1"},
|
||||
),
|
||||
cmd2: newStageContext(
|
||||
"RUN echo another > test",
|
||||
map[string]string{"ARG": "foo"},
|
||||
[]string{"ENV=1"},
|
||||
),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
sb := &stageBuilder{opts: &config.KanikoOptions{SrcContext: "workspace"}}
|
||||
ck := CompositeCache{}
|
||||
|
||||
ck1, err := sb.populateCompositeKey(tc.cmd1.command, []string{}, ck, tc.cmd1.args, tc.cmd1.env)
|
||||
if err != nil {
|
||||
t.Errorf("Expected error to be nil but was %v", err)
|
||||
}
|
||||
ck2, err := sb.populateCompositeKey(tc.cmd2.command, []string{}, ck, tc.cmd2.args, tc.cmd2.env)
|
||||
if err != nil {
|
||||
t.Errorf("Expected error to be nil but was %v", err)
|
||||
}
|
||||
key1, key2 := hashCompositeKeys(t, ck1, ck2)
|
||||
if b := key1 == key2; b != tc.shdEqual {
|
||||
t.Errorf("expected keys to be equal as %t but found %t", tc.shdEqual, !tc.shdEqual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_stageBuilder_build(t *testing.T) {
|
||||
type testcase struct {
|
||||
description string
|
||||
opts *config.KanikoOptions
|
||||
args map[string]string
|
||||
layerCache *fakeLayerCache
|
||||
expectedCacheKeys []string
|
||||
pushedCacheKeys []string
|
||||
|
|
@ -538,12 +664,13 @@ func Test_stageBuilder_build(t *testing.T) {
|
|||
filePath := filepath.Join(dir, file)
|
||||
ch := NewCompositeCache("", "meow")
|
||||
|
||||
ch.AddPath(filePath)
|
||||
ch.AddPath(filePath, "")
|
||||
hash, err := ch.Hash()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't create hash %v", err)
|
||||
}
|
||||
command := MockDockerCommand{
|
||||
command: "meow",
|
||||
contextFiles: []string{filePath},
|
||||
cacheCommand: MockCachedDockerCommand{
|
||||
contextFiles: []string{filePath},
|
||||
|
|
@ -570,12 +697,13 @@ func Test_stageBuilder_build(t *testing.T) {
|
|||
filePath := filepath.Join(dir, file)
|
||||
ch := NewCompositeCache("", "meow")
|
||||
|
||||
ch.AddPath(filePath)
|
||||
ch.AddPath(filePath, "")
|
||||
hash, err := ch.Hash()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't create hash %v", err)
|
||||
}
|
||||
command := MockDockerCommand{
|
||||
command: "meow",
|
||||
contextFiles: []string{filePath},
|
||||
cacheCommand: MockCachedDockerCommand{
|
||||
contextFiles: []string{filePath},
|
||||
|
|
@ -618,7 +746,7 @@ func Test_stageBuilder_build(t *testing.T) {
|
|||
tarContent := generateTar(t, dir, filename)
|
||||
|
||||
ch := NewCompositeCache("", "")
|
||||
ch.AddPath(filepath)
|
||||
ch.AddPath(filepath, "")
|
||||
|
||||
hash, err := ch.Hash()
|
||||
if err != nil {
|
||||
|
|
@ -662,7 +790,7 @@ func Test_stageBuilder_build(t *testing.T) {
|
|||
}
|
||||
filePath := filepath.Join(dir, filename)
|
||||
ch := NewCompositeCache("", "")
|
||||
ch.AddPath(filePath)
|
||||
ch.AddPath(filePath, "")
|
||||
|
||||
hash, err := ch.Hash()
|
||||
if err != nil {
|
||||
|
|
@ -713,7 +841,7 @@ func Test_stageBuilder_build(t *testing.T) {
|
|||
}
|
||||
|
||||
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
|
||||
ch.AddPath(filePath)
|
||||
ch.AddPath(filePath, "")
|
||||
|
||||
hash2, err := ch.Hash()
|
||||
if err != nil {
|
||||
|
|
@ -721,7 +849,7 @@ func Test_stageBuilder_build(t *testing.T) {
|
|||
}
|
||||
ch = NewCompositeCache("", fmt.Sprintf("COPY %s foo.txt", filename))
|
||||
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
|
||||
ch.AddPath(filePath)
|
||||
ch.AddPath(filePath, "")
|
||||
|
||||
image := fakeImage{
|
||||
ImageLayers: []v1.Layer{
|
||||
|
|
@ -777,14 +905,14 @@ COPY %s bar.txt
|
|||
}
|
||||
filePath := filepath.Join(dir, filename)
|
||||
ch := NewCompositeCache("", fmt.Sprintf("COPY %s foo.txt", filename))
|
||||
ch.AddPath(filePath)
|
||||
ch.AddPath(filePath, "")
|
||||
|
||||
hash1, err := ch.Hash()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't create hash %v", err)
|
||||
}
|
||||
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
|
||||
ch.AddPath(filePath)
|
||||
ch.AddPath(filePath, "")
|
||||
|
||||
hash2, err := ch.Hash()
|
||||
if err != nil {
|
||||
|
|
@ -792,7 +920,7 @@ COPY %s bar.txt
|
|||
}
|
||||
ch = NewCompositeCache("", fmt.Sprintf("COPY %s foo.txt", filename))
|
||||
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
|
||||
ch.AddPath(filePath)
|
||||
ch.AddPath(filePath, "")
|
||||
|
||||
image := fakeImage{
|
||||
ImageLayers: []v1.Layer{
|
||||
|
|
@ -838,6 +966,117 @@ COPY %s bar.txt
|
|||
commands: getCommands(dir, cmds),
|
||||
}
|
||||
}(),
|
||||
func() testcase {
|
||||
dir, _ := tempDirAndFile(t)
|
||||
ch := NewCompositeCache("")
|
||||
ch.AddKey("RUN foobar")
|
||||
hash, err := ch.Hash()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't create hash %v", err)
|
||||
}
|
||||
|
||||
command := MockDockerCommand{
|
||||
command: "RUN foobar",
|
||||
contextFiles: []string{},
|
||||
cacheCommand: MockCachedDockerCommand{
|
||||
contextFiles: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
return testcase{
|
||||
description: "cached run command with no build arg value used uses cached layer and does not push anything",
|
||||
config: &v1.ConfigFile{Config: v1.Config{WorkingDir: dir}},
|
||||
opts: &config.KanikoOptions{Cache: true},
|
||||
args: map[string]string{
|
||||
"test": "value",
|
||||
},
|
||||
expectedCacheKeys: []string{hash},
|
||||
commands: []commands.DockerCommand{command},
|
||||
// layer key needs to be read.
|
||||
layerCache: &fakeLayerCache{
|
||||
img: &fakeImage{ImageLayers: []v1.Layer{fakeLayer{}}},
|
||||
keySequence: []string{hash},
|
||||
},
|
||||
rootDir: dir,
|
||||
}
|
||||
}(),
|
||||
func() testcase {
|
||||
dir, _ := tempDirAndFile(t)
|
||||
|
||||
ch := NewCompositeCache("")
|
||||
ch.AddKey("RUN value")
|
||||
hash, err := ch.Hash()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't create hash %v", err)
|
||||
}
|
||||
|
||||
command := MockDockerCommand{
|
||||
command: "RUN $arg",
|
||||
contextFiles: []string{},
|
||||
cacheCommand: MockCachedDockerCommand{
|
||||
contextFiles: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
return testcase{
|
||||
description: "cached run command with same build arg does not push layer",
|
||||
config: &v1.ConfigFile{Config: v1.Config{WorkingDir: dir}},
|
||||
opts: &config.KanikoOptions{Cache: true},
|
||||
args: map[string]string{
|
||||
"arg": "value",
|
||||
},
|
||||
// layer key that exists
|
||||
layerCache: &fakeLayerCache{
|
||||
img: &fakeImage{ImageLayers: []v1.Layer{fakeLayer{}}},
|
||||
keySequence: []string{hash},
|
||||
},
|
||||
expectedCacheKeys: []string{hash},
|
||||
commands: []commands.DockerCommand{command},
|
||||
rootDir: dir,
|
||||
}
|
||||
}(),
|
||||
func() testcase {
|
||||
dir, _ := tempDirAndFile(t)
|
||||
|
||||
ch1 := NewCompositeCache("")
|
||||
ch1.AddKey("RUN value")
|
||||
hash1, err := ch1.Hash()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't create hash %v", err)
|
||||
}
|
||||
|
||||
ch2 := NewCompositeCache("")
|
||||
ch2.AddKey("RUN anotherValue")
|
||||
hash2, err := ch2.Hash()
|
||||
if err != nil {
|
||||
t.Errorf("couldn't create hash %v", err)
|
||||
}
|
||||
command := MockDockerCommand{
|
||||
command: "RUN $arg",
|
||||
contextFiles: []string{},
|
||||
cacheCommand: MockCachedDockerCommand{
|
||||
contextFiles: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
return testcase{
|
||||
description: "cached run command with another build arg pushes layer",
|
||||
config: &v1.ConfigFile{Config: v1.Config{WorkingDir: dir}},
|
||||
opts: &config.KanikoOptions{Cache: true},
|
||||
args: map[string]string{
|
||||
"arg": "anotherValue",
|
||||
},
|
||||
// layer for arg=value already exists
|
||||
layerCache: &fakeLayerCache{
|
||||
img: &fakeImage{ImageLayers: []v1.Layer{fakeLayer{}}},
|
||||
keySequence: []string{hash1},
|
||||
},
|
||||
expectedCacheKeys: []string{hash2},
|
||||
pushedCacheKeys: []string{hash2},
|
||||
commands: []commands.DockerCommand{command},
|
||||
rootDir: dir,
|
||||
}
|
||||
}(),
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
|
|
@ -875,18 +1114,21 @@ COPY %s bar.txt
|
|||
}
|
||||
keys := []string{}
|
||||
sb := &stageBuilder{
|
||||
args: &dockerfile.BuildArgs{}, //required or code will panic
|
||||
args: dockerfile.NewBuildArgs([]string{}), //required or code will panic
|
||||
image: tc.image,
|
||||
opts: tc.opts,
|
||||
cf: cf,
|
||||
snapshotter: snap,
|
||||
layerCache: lc,
|
||||
pushCache: func(_ *config.KanikoOptions, cacheKey, _, _ string) error {
|
||||
pushLayerToCache: func(_ *config.KanikoOptions, cacheKey, _, _ string) error {
|
||||
keys = append(keys, cacheKey)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
sb.cmds = tc.commands
|
||||
for key, value := range tc.args {
|
||||
sb.args.AddArg(key, &value)
|
||||
}
|
||||
tmp := commands.RootDir
|
||||
if tc.rootDir != "" {
|
||||
commands.RootDir = tc.rootDir
|
||||
|
|
@ -993,3 +1235,15 @@ func generateTar(t *testing.T, dir string, fileNames ...string) []byte {
|
|||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func hashCompositeKeys(t *testing.T, ck1 CompositeCache, ck2 CompositeCache) (string, string) {
|
||||
key1, err := ck1.Hash()
|
||||
if err != nil {
|
||||
t.Errorf("could not hash composite key due to %s", err)
|
||||
}
|
||||
key2, err := ck2.Hash()
|
||||
if err != nil {
|
||||
t.Errorf("could not hash composite key due to %s", err)
|
||||
}
|
||||
return key1, key2
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewCompositeCache returns an initialized composite cache object.
|
||||
|
|
@ -54,18 +55,28 @@ func (s *CompositeCache) Hash() (string, error) {
|
|||
return util.SHA256(strings.NewReader(s.Key()))
|
||||
}
|
||||
|
||||
func (s *CompositeCache) AddPath(p string) error {
|
||||
func (s *CompositeCache) AddPath(p, context string) error {
|
||||
sha := sha256.New()
|
||||
fi, err := os.Lstat(p)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not add path")
|
||||
}
|
||||
|
||||
if fi.Mode().IsDir() {
|
||||
k, err := HashDir(p)
|
||||
empty, k, err := hashDir(p, context)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.keys = append(s.keys, k)
|
||||
|
||||
// Only add the hash of this directory to the key
|
||||
// if there is any whitelisted content.
|
||||
if !empty || !util.ExcludeFile(p, context) {
|
||||
s.keys = append(s.keys, k)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if util.ExcludeFile(p, context) {
|
||||
return nil
|
||||
}
|
||||
fh, err := util.CacheHasher()(p)
|
||||
|
|
@ -81,12 +92,18 @@ func (s *CompositeCache) AddPath(p string) error {
|
|||
}
|
||||
|
||||
// HashDir returns a hash of the directory.
|
||||
func HashDir(p string) (string, error) {
|
||||
func hashDir(p, context string) (bool, string, error) {
|
||||
sha := sha256.New()
|
||||
empty := true
|
||||
if err := filepath.Walk(p, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
exclude := util.ExcludeFile(path, context)
|
||||
if exclude {
|
||||
return nil
|
||||
}
|
||||
|
||||
fileHash, err := util.CacheHasher()(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -94,10 +111,11 @@ func HashDir(p string) (string, error) {
|
|||
if _, err := sha.Write([]byte(fileHash)); err != nil {
|
||||
return err
|
||||
}
|
||||
empty = false
|
||||
return nil
|
||||
}); err != nil {
|
||||
return "", err
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%x", sha.Sum(nil)), nil
|
||||
return empty, fmt.Sprintf("%x", sha.Sum(nil)), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,9 +19,12 @@ package executor
|
|||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
)
|
||||
|
||||
func Test_NewCompositeCache(t *testing.T) {
|
||||
|
|
@ -77,7 +80,7 @@ func Test_CompositeCache_AddPath_dir(t *testing.T) {
|
|||
|
||||
fn := func() string {
|
||||
r := NewCompositeCache()
|
||||
if err := r.AddPath(tmpDir); err != nil {
|
||||
if err := r.AddPath(tmpDir, ""); err != nil {
|
||||
t.Errorf("expected error to be nil but was %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -115,7 +118,7 @@ func Test_CompositeCache_AddPath_file(t *testing.T) {
|
|||
p := tmpfile.Name()
|
||||
fn := func() string {
|
||||
r := NewCompositeCache()
|
||||
if err := r.AddPath(p); err != nil {
|
||||
if err := r.AddPath(p, ""); err != nil {
|
||||
t.Errorf("expected error to be nil but was %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -135,3 +138,433 @@ func Test_CompositeCache_AddPath_file(t *testing.T) {
|
|||
t.Errorf("expected hash %v to equal hash %v", hash1, hash2)
|
||||
}
|
||||
}
|
||||
|
||||
func createFilesystemStructure(root string, directories, files []string) error {
|
||||
for _, d := range directories {
|
||||
dirPath := path.Join(root, d)
|
||||
if err := os.MkdirAll(dirPath, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, fileName := range files {
|
||||
filePath := path.Join(root, fileName)
|
||||
err := ioutil.WriteFile(filePath, []byte(fileName), 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setIgnoreContext(content string) error {
|
||||
dockerIgnoreDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(dockerIgnoreDir)
|
||||
err = ioutil.WriteFile(dockerIgnoreDir+".dockerignore", []byte(content), 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.GetExcludedFiles(dockerIgnoreDir, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hashDirectory(dirpath string) (string, error) {
|
||||
cache1 := NewCompositeCache()
|
||||
err := cache1.AddPath(dirpath, dirpath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
hash, err := cache1.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func Test_CompositeKey_AddPath_Works(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
directories []string
|
||||
files []string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
directories: []string{},
|
||||
files: []string{},
|
||||
},
|
||||
{
|
||||
name: "dirs",
|
||||
directories: []string{"foo", "bar", "foobar", "f/o/o"},
|
||||
files: []string{},
|
||||
},
|
||||
{
|
||||
name: "files",
|
||||
directories: []string{},
|
||||
files: []string{"foo", "bar", "foobar"},
|
||||
},
|
||||
{
|
||||
name: "all",
|
||||
directories: []string{"foo", "bar"},
|
||||
files: []string{"foo/bar", "bar/baz", "foobar"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testDir1, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating tempdir: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(testDir1)
|
||||
err = createFilesystemStructure(testDir1, test.directories, test.files)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
|
||||
testDir2, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating tempdir: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(testDir2)
|
||||
err = createFilesystemStructure(testDir2, test.directories, test.files)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
|
||||
hash1, err := hashDirectory(testDir1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate hash: %s", err)
|
||||
}
|
||||
hash2, err := hashDirectory(testDir2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate hash: %s", err)
|
||||
}
|
||||
|
||||
if hash1 != hash2 {
|
||||
t.Errorf("Expected equal hashes, got: %s and %s", hash1, hash2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_CompositeKey_AddPath_WithExtraFile_Works(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
directories []string
|
||||
files []string
|
||||
extraFile string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
directories: []string{},
|
||||
files: []string{},
|
||||
extraFile: "file",
|
||||
},
|
||||
{
|
||||
name: "dirs",
|
||||
directories: []string{"foo", "bar", "foobar", "f/o/o"},
|
||||
files: []string{},
|
||||
extraFile: "f/o/o/extra",
|
||||
},
|
||||
{
|
||||
name: "files",
|
||||
directories: []string{},
|
||||
files: []string{"foo", "bar", "foobar"},
|
||||
extraFile: "foo.extra",
|
||||
},
|
||||
{
|
||||
name: "all",
|
||||
directories: []string{"foo", "bar"},
|
||||
files: []string{"foo/bar", "bar/baz", "foobar"},
|
||||
extraFile: "bar/extra",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testDir1, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating tempdir: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(testDir1)
|
||||
err = createFilesystemStructure(testDir1, test.directories, test.files)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
|
||||
testDir2, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating tempdir: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(testDir2)
|
||||
err = createFilesystemStructure(testDir2, test.directories, test.files)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
extraPath := path.Join(testDir2, test.extraFile)
|
||||
err = ioutil.WriteFile(extraPath, []byte(test.extraFile), 0644)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
|
||||
hash1, err := hashDirectory(testDir1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate hash: %s", err)
|
||||
}
|
||||
hash2, err := hashDirectory(testDir2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate hash: %s", err)
|
||||
}
|
||||
|
||||
if hash1 == hash2 {
|
||||
t.Errorf("Expected different hashes, got: %s and %s", hash1, hash2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_CompositeKey_AddPath_WithExtraDir_Works(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
directories []string
|
||||
files []string
|
||||
extraDir string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
directories: []string{},
|
||||
files: []string{},
|
||||
extraDir: "extra",
|
||||
},
|
||||
{
|
||||
name: "dirs",
|
||||
directories: []string{"foo", "bar", "foobar", "f/o/o"},
|
||||
files: []string{},
|
||||
extraDir: "f/o/o/extra",
|
||||
},
|
||||
{
|
||||
name: "files",
|
||||
directories: []string{},
|
||||
files: []string{"foo", "bar", "foobar"},
|
||||
extraDir: "foo.extra",
|
||||
},
|
||||
{
|
||||
name: "all",
|
||||
directories: []string{"foo", "bar"},
|
||||
files: []string{"foo/bar", "bar/baz", "foobar"},
|
||||
extraDir: "bar/extra",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testDir1, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating tempdir: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(testDir1)
|
||||
err = createFilesystemStructure(testDir1, test.directories, test.files)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
|
||||
testDir2, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating tempdir: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(testDir2)
|
||||
err = createFilesystemStructure(testDir2, test.directories, test.files)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
extraPath := path.Join(testDir2, test.extraDir)
|
||||
err = os.MkdirAll(extraPath, 0644)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
|
||||
hash1, err := hashDirectory(testDir1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate hash: %s", err)
|
||||
}
|
||||
hash2, err := hashDirectory(testDir2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate hash: %s", err)
|
||||
}
|
||||
|
||||
if hash1 == hash2 {
|
||||
t.Errorf("Expected different hashes, got: %s and %s", hash1, hash2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_CompositeKey_AddPath_WithExtraFilIgnored_Works(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
directories []string
|
||||
files []string
|
||||
extraFile string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
directories: []string{},
|
||||
files: []string{},
|
||||
extraFile: "extra",
|
||||
},
|
||||
{
|
||||
name: "dirs",
|
||||
directories: []string{"foo", "bar", "foobar", "f/o/o"},
|
||||
files: []string{},
|
||||
extraFile: "f/o/o/extra",
|
||||
},
|
||||
{
|
||||
name: "files",
|
||||
directories: []string{},
|
||||
files: []string{"foo", "bar", "foobar"},
|
||||
extraFile: "extra",
|
||||
},
|
||||
{
|
||||
name: "all",
|
||||
directories: []string{"foo", "bar"},
|
||||
files: []string{"foo/bar", "bar/baz", "foobar"},
|
||||
extraFile: "bar/extra",
|
||||
},
|
||||
}
|
||||
|
||||
err := setIgnoreContext("**/extra")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting exlusion context: %s", err)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testDir1, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating tempdir: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(testDir1)
|
||||
err = createFilesystemStructure(testDir1, test.directories, test.files)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
|
||||
testDir2, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating tempdir: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(testDir2)
|
||||
err = createFilesystemStructure(testDir2, test.directories, test.files)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
extraPath := path.Join(testDir2, test.extraFile)
|
||||
err = ioutil.WriteFile(extraPath, []byte(test.extraFile), 0644)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
|
||||
hash1, err := hashDirectory(testDir1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate hash: %s", err)
|
||||
}
|
||||
hash2, err := hashDirectory(testDir2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate hash: %s", err)
|
||||
}
|
||||
|
||||
if hash1 != hash2 {
|
||||
t.Errorf("Expected equal hashes, got: %s and %s", hash1, hash2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_CompositeKey_AddPath_WithExtraDirIgnored_Works(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
directories []string
|
||||
files []string
|
||||
extraDir string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
directories: []string{},
|
||||
files: []string{},
|
||||
extraDir: "extra",
|
||||
},
|
||||
{
|
||||
name: "dirs",
|
||||
directories: []string{"foo", "bar", "foobar", "f/o/o"},
|
||||
files: []string{},
|
||||
extraDir: "f/o/o/extra",
|
||||
},
|
||||
{
|
||||
name: "files",
|
||||
directories: []string{},
|
||||
files: []string{"foo", "bar", "foobar"},
|
||||
extraDir: "extra",
|
||||
},
|
||||
{
|
||||
name: "all",
|
||||
directories: []string{"foo", "bar"},
|
||||
files: []string{"foo/bar", "bar/baz", "foobar"},
|
||||
extraDir: "bar/extra",
|
||||
},
|
||||
}
|
||||
|
||||
err := setIgnoreContext("**/extra")
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting exlusion context: %s", err)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testDir1, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating tempdir: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(testDir1)
|
||||
err = createFilesystemStructure(testDir1, test.directories, test.files)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
|
||||
testDir2, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating tempdir: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(testDir2)
|
||||
err = createFilesystemStructure(testDir2, test.directories, test.files)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
extraPath := path.Join(testDir2, test.extraDir)
|
||||
err = os.MkdirAll(extraPath, 0644)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating filesytem structure: %s", err)
|
||||
}
|
||||
|
||||
hash1, err := hashDirectory(testDir1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate hash: %s", err)
|
||||
}
|
||||
hash2, err := hashDirectory(testDir2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate hash: %s", err)
|
||||
}
|
||||
|
||||
if hash1 != hash2 {
|
||||
t.Errorf("Expected equal hashes, got: %s and %s", hash1, hash2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,13 +43,14 @@ func (f fakeSnapShotter) TakeSnapshot(_ []string) (string, error) {
|
|||
}
|
||||
|
||||
type MockDockerCommand struct {
|
||||
command string
|
||||
contextFiles []string
|
||||
cacheCommand commands.DockerCommand
|
||||
}
|
||||
|
||||
func (m MockDockerCommand) ExecuteCommand(c *v1.Config, args *dockerfile.BuildArgs) error { return nil }
|
||||
func (m MockDockerCommand) String() string {
|
||||
return "meow"
|
||||
return m.command
|
||||
}
|
||||
func (m MockDockerCommand) FilesToSnapshot() []string {
|
||||
return []string{"meow-snapshot-no-cache"}
|
||||
|
|
|
|||
|
|
@ -18,11 +18,13 @@ package executor
|
|||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -51,6 +53,7 @@ type withUserAgent struct {
|
|||
|
||||
const (
|
||||
UpstreamClientUaKey = "UPSTREAM_CLIENT_TYPE"
|
||||
DockerConfLocation = "/kaniko/.docker/config.json"
|
||||
)
|
||||
|
||||
func (w *withUserAgent) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
|
|
@ -62,6 +65,48 @@ func (w *withUserAgent) RoundTrip(r *http.Request) (*http.Response, error) {
|
|||
return w.t.RoundTrip(r)
|
||||
}
|
||||
|
||||
type CertPool interface {
|
||||
value() *x509.CertPool
|
||||
append(path string) error
|
||||
}
|
||||
|
||||
type X509CertPool struct {
|
||||
inner x509.CertPool
|
||||
}
|
||||
|
||||
func (p *X509CertPool) value() *x509.CertPool {
|
||||
return &p.inner
|
||||
}
|
||||
|
||||
func (p *X509CertPool) append(path string) error {
|
||||
pem, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.inner.AppendCertsFromPEM(pem)
|
||||
return nil
|
||||
}
|
||||
|
||||
type systemCertLoader func() CertPool
|
||||
|
||||
var defaultX509Handler systemCertLoader = func() CertPool {
|
||||
systemCertPool, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
logrus.Warn("Failed to load system cert pool. Loading empty one instead.")
|
||||
systemCertPool = x509.NewCertPool()
|
||||
}
|
||||
return &X509CertPool{
|
||||
inner: *systemCertPool,
|
||||
}
|
||||
}
|
||||
|
||||
// for testing
|
||||
var (
|
||||
fs = afero.NewOsFs()
|
||||
execCommand = exec.Command
|
||||
checkRemotePushPermission = remote.CheckPushPermission
|
||||
)
|
||||
|
||||
// CheckPushPermissions checks that the configured credentials can be used to
|
||||
// push to every specified destination.
|
||||
func CheckPushPermissions(opts *config.KanikoOptions) error {
|
||||
|
|
@ -79,6 +124,18 @@ func CheckPushPermissions(opts *config.KanikoOptions) error {
|
|||
continue
|
||||
}
|
||||
|
||||
// Historically kaniko was pre-configured by default with gcr credential helper,
|
||||
// in here we keep the backwards compatibility by enabling the GCR helper only
|
||||
// when gcr.io is in one of the destinations.
|
||||
if strings.Contains(destRef.RegistryStr(), "gcr.io") {
|
||||
// Checking for existence of docker.config as it's normally required for
|
||||
// authenticated registries and prevent overwriting user provided docker conf
|
||||
if _, err := fs.Stat(DockerConfLocation); os.IsNotExist(err) {
|
||||
if err := execCommand("docker-credential-gcr", "configure-docker").Run(); err != nil {
|
||||
return errors.Wrap(err, "error while configuring docker-credential-gcr helper")
|
||||
}
|
||||
}
|
||||
}
|
||||
registryName := destRef.Repository.Registry.Name()
|
||||
if opts.Insecure || opts.InsecureRegistries.Contains(registryName) {
|
||||
newReg, err := name.NewRegistry(registryName, name.WeakValidation, name.Insecure)
|
||||
|
|
@ -87,8 +144,8 @@ func CheckPushPermissions(opts *config.KanikoOptions) error {
|
|||
}
|
||||
destRef.Repository.Registry = newReg
|
||||
}
|
||||
tr := makeTransport(opts, registryName)
|
||||
if err := remote.CheckPushPermission(destRef, creds.GetKeychain(), tr); err != nil {
|
||||
tr := makeTransport(opts, registryName, defaultX509Handler)
|
||||
if err := checkRemotePushPermission(destRef, creds.GetKeychain(), tr); err != nil {
|
||||
return errors.Wrapf(err, "checking push permission for %q", destRef)
|
||||
}
|
||||
checked[destRef.Context().RepositoryStr()] = true
|
||||
|
|
@ -184,7 +241,7 @@ func DoPush(image v1.Image, opts *config.KanikoOptions) error {
|
|||
return errors.Wrap(err, "resolving pushAuth")
|
||||
}
|
||||
|
||||
tr := makeTransport(opts, registryName)
|
||||
tr := makeTransport(opts, registryName, defaultX509Handler)
|
||||
rt := &withUserAgent{t: tr}
|
||||
|
||||
if err := remote.Write(destRef, image, remote.WithAuth(pushAuth), remote.WithTransport(rt)); err != nil {
|
||||
|
|
@ -195,8 +252,6 @@ func DoPush(image v1.Image, opts *config.KanikoOptions) error {
|
|||
return writeImageOutputs(image, destRefs)
|
||||
}
|
||||
|
||||
var fs = afero.NewOsFs()
|
||||
|
||||
func writeImageOutputs(image v1.Image, destRefs []name.Tag) error {
|
||||
dir := os.Getenv("BUILDER_OUTPUT")
|
||||
if dir == "" {
|
||||
|
|
@ -228,13 +283,22 @@ func writeImageOutputs(image v1.Image, destRefs []name.Tag) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func makeTransport(opts *config.KanikoOptions, registryName string) http.RoundTripper {
|
||||
func makeTransport(opts *config.KanikoOptions, registryName string, loader systemCertLoader) http.RoundTripper {
|
||||
// Create a transport to set our user-agent.
|
||||
tr := http.DefaultTransport
|
||||
var tr http.RoundTripper = http.DefaultTransport.(*http.Transport).Clone()
|
||||
if opts.SkipTLSVerify || opts.SkipTLSVerifyRegistries.Contains(registryName) {
|
||||
tr.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
} else if certificatePath := opts.RegistriesCertificates[registryName]; certificatePath != "" {
|
||||
systemCertPool := loader()
|
||||
if err := systemCertPool.append(certificatePath); err != nil {
|
||||
logrus.WithError(err).Warnf("Failed to load certificate %s for %s\n", certificatePath, registryName)
|
||||
} else {
|
||||
tr.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||
RootCAs: systemCertPool.value(),
|
||||
}
|
||||
}
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,15 +18,19 @@ package executor
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/layout"
|
||||
"github.com/google/go-containerregistry/pkg/v1/random"
|
||||
|
|
@ -223,3 +227,150 @@ func TestImageNameDigestFile(t *testing.T) {
|
|||
testutil.CheckErrorAndDeepEqual(t, false, err, want, got)
|
||||
|
||||
}
|
||||
|
||||
type mockedCertPool struct {
|
||||
certificatesPath []string
|
||||
}
|
||||
|
||||
func (m *mockedCertPool) value() *x509.CertPool {
|
||||
return &x509.CertPool{}
|
||||
}
|
||||
|
||||
func (m *mockedCertPool) append(path string) error {
|
||||
m.certificatesPath = append(m.certificatesPath, path)
|
||||
return nil
|
||||
}
|
||||
|
||||
func Test_makeTransport(t *testing.T) {
|
||||
registryName := "my.registry.name"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
opts *config.KanikoOptions
|
||||
check func(*tls.Config, *mockedCertPool)
|
||||
}{
|
||||
{
|
||||
name: "SkipTLSVerify set",
|
||||
opts: &config.KanikoOptions{SkipTLSVerify: true},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if !config.InsecureSkipVerify {
|
||||
t.Errorf("makeTransport().TLSClientConfig.InsecureSkipVerify not set while SkipTLSVerify set")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SkipTLSVerifyRegistries set with expected registry",
|
||||
opts: &config.KanikoOptions{SkipTLSVerifyRegistries: []string{registryName}},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if !config.InsecureSkipVerify {
|
||||
t.Errorf("makeTransport().TLSClientConfig.InsecureSkipVerify not set while SkipTLSVerifyRegistries set with registry name")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SkipTLSVerifyRegistries set with other registry",
|
||||
opts: &config.KanikoOptions{SkipTLSVerifyRegistries: []string{fmt.Sprintf("other.%s", registryName)}},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if config.InsecureSkipVerify {
|
||||
t.Errorf("makeTransport().TLSClientConfig.InsecureSkipVerify set while SkipTLSVerifyRegistries not set with registry name")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RegistriesCertificates set for registry",
|
||||
opts: &config.KanikoOptions{RegistriesCertificates: map[string]string{registryName: "/path/to/the/certificate.cert"}},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if len(pool.certificatesPath) != 1 || pool.certificatesPath[0] != "/path/to/the/certificate.cert" {
|
||||
t.Errorf("makeTransport().RegistriesCertificates certificate not appended to system certificates")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RegistriesCertificates set for another registry",
|
||||
opts: &config.KanikoOptions{RegistriesCertificates: map[string]string{fmt.Sprintf("other.%s=", registryName): "/path/to/the/certificate.cert"}},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if len(pool.certificatesPath) != 0 {
|
||||
t.Errorf("makeTransport().RegistriesCertificates certificate appended to system certificates while added for other registry")
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var certificatesPath []string
|
||||
certPool := mockedCertPool{
|
||||
certificatesPath: certificatesPath,
|
||||
}
|
||||
var mockedSystemCertLoader systemCertLoader = func() CertPool {
|
||||
return &certPool
|
||||
}
|
||||
transport := makeTransport(tt.opts, registryName, mockedSystemCertLoader)
|
||||
tt.check(transport.(*http.Transport).TLSClientConfig, &certPool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var calledExecCommand = false
|
||||
var calledCheckPushPermission = false
|
||||
|
||||
func setCalledFalse() {
|
||||
calledExecCommand = false
|
||||
calledCheckPushPermission = false
|
||||
}
|
||||
|
||||
func fakeExecCommand(command string, args ...string) *exec.Cmd {
|
||||
calledExecCommand = true
|
||||
cs := []string{"-test.run=TestHelperProcess", "--", command}
|
||||
cs = append(cs, args...)
|
||||
cmd := exec.Command(os.Args[0], cs...)
|
||||
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func fakeCheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTripper) error {
|
||||
calledCheckPushPermission = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestCheckPushPermissions(t *testing.T) {
|
||||
tests := []struct {
|
||||
Destination string
|
||||
ShouldCallExecCommand bool
|
||||
ExistingConfig bool
|
||||
}{
|
||||
{"gcr.io/test-image", true, false},
|
||||
{"gcr.io/test-image", false, true},
|
||||
{"localhost:5000/test-image", false, false},
|
||||
{"localhost:5000/test-image", false, true},
|
||||
}
|
||||
|
||||
execCommand = fakeExecCommand
|
||||
checkRemotePushPermission = fakeCheckPushPermission
|
||||
for _, test := range tests {
|
||||
testName := fmt.Sprintf("%s_ExistingDockerConf_%v", test.Destination, test.ExistingConfig)
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
fs = afero.NewMemMapFs()
|
||||
opts := config.KanikoOptions{
|
||||
Destinations: []string{test.Destination},
|
||||
}
|
||||
if test.ExistingConfig {
|
||||
afero.WriteFile(fs, DockerConfLocation, []byte(""), os.FileMode(0644))
|
||||
defer fs.Remove(DockerConfLocation)
|
||||
}
|
||||
CheckPushPermissions(&opts)
|
||||
if test.ShouldCallExecCommand != calledExecCommand {
|
||||
t.Errorf("Expected calledExecCommand to be %v however it was %v",
|
||||
calledExecCommand, test.ShouldCallExecCommand)
|
||||
}
|
||||
setCalledFalse()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHelperProcess(t *testing.T) {
|
||||
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(os.Stdout, "fake result")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
Copyright 2020 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ResolvePaths takes a slice of file paths and a slice of whitelist entries. It resolve each
|
||||
// file path according to a set of rules and then returns a slice of resolved paths or error.
|
||||
// File paths are resolved according to the following rules:
|
||||
// * If path is whitelisted, skip it.
|
||||
// * If path is a symlink, resolve it's ancestor link and add it to the output set.
|
||||
// * If path is a symlink, resolve it's target. If the target is not whitelisted add it to the
|
||||
// output set.
|
||||
// * Add all ancestors of each path to the output set.
|
||||
func ResolvePaths(paths []string, wl []util.WhitelistEntry) (pathsToAdd []string, err error) {
|
||||
logrus.Info("Resolving paths")
|
||||
logrus.Debugf("Resolving paths %s", paths)
|
||||
|
||||
fileSet := make(map[string]bool)
|
||||
|
||||
for _, f := range paths {
|
||||
// If the given path is part of the whitelist ignore it
|
||||
if util.IsInProvidedWhitelist(f, wl) {
|
||||
logrus.Debugf("path %s is whitelisted, ignoring it", f)
|
||||
continue
|
||||
}
|
||||
|
||||
link, e := resolveSymlinkAncestor(f)
|
||||
if e != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if f != link {
|
||||
logrus.Tracef("updated link %s to %s", f, link)
|
||||
}
|
||||
|
||||
if !fileSet[link] {
|
||||
pathsToAdd = append(pathsToAdd, link)
|
||||
}
|
||||
fileSet[link] = true
|
||||
|
||||
var evaled string
|
||||
|
||||
// If the path is a symlink we need to also consider the target of that
|
||||
// link
|
||||
evaled, e = filepath.EvalSymlinks(f)
|
||||
if e != nil {
|
||||
if !os.IsNotExist(e) {
|
||||
logrus.Errorf("couldn't eval %s with link %s", f, link)
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Debugf("symlink path %s, target does not exist", f)
|
||||
}
|
||||
|
||||
// If the given path is a symlink and the target is part of the whitelist
|
||||
// ignore the target
|
||||
if util.IsInProvidedWhitelist(evaled, wl) {
|
||||
logrus.Debugf("path %s is whitelisted, ignoring it", evaled)
|
||||
continue
|
||||
}
|
||||
|
||||
if !fileSet[evaled] {
|
||||
pathsToAdd = append(pathsToAdd, evaled)
|
||||
}
|
||||
fileSet[evaled] = true
|
||||
}
|
||||
|
||||
// Also add parent directories to keep the permission of them correctly.
|
||||
pathsToAdd = filesWithParentDirs(pathsToAdd)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// filesWithParentDirs returns every ancestor path for each provided file path.
|
||||
// I.E. /foo/bar/baz/boom.txt => [/, /foo, /foo/bar, /foo/bar/baz, /foo/bar/baz/boom.txt]
|
||||
func filesWithParentDirs(files []string) []string {
|
||||
filesSet := map[string]bool{}
|
||||
|
||||
for _, file := range files {
|
||||
file = filepath.Clean(file)
|
||||
filesSet[file] = true
|
||||
|
||||
for _, dir := range util.ParentDirectories(file) {
|
||||
dir = filepath.Clean(dir)
|
||||
filesSet[dir] = true
|
||||
}
|
||||
}
|
||||
|
||||
newFiles := []string{}
|
||||
for file := range filesSet {
|
||||
newFiles = append(newFiles, file)
|
||||
}
|
||||
|
||||
return newFiles
|
||||
}
|
||||
|
||||
// resolveSymlinkAncestor returns the ancestor link of the provided symlink path or returns the
|
||||
// the path if it is not a link. The ancestor link is the filenode whose type is a Symlink.
|
||||
// E.G /baz/boom/bar.txt links to /usr/bin/bar.txt but /baz/boom/bar.txt itself is not a link.
|
||||
// Instead /bar/boom is actually a link to /usr/bin. In this case resolveSymlinkAncestor would
|
||||
// return /bar/boom.
|
||||
func resolveSymlinkAncestor(path string) (string, error) {
|
||||
if !filepath.IsAbs(path) {
|
||||
return "", errors.New("dest path must be abs")
|
||||
}
|
||||
|
||||
last := ""
|
||||
newPath := filepath.Clean(path)
|
||||
|
||||
loop:
|
||||
for newPath != "/" {
|
||||
fi, err := os.Lstat(newPath)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "resolvePaths: failed to lstat")
|
||||
}
|
||||
|
||||
if util.IsSymlink(fi) {
|
||||
last = filepath.Base(newPath)
|
||||
newPath = filepath.Dir(newPath)
|
||||
} else {
|
||||
// Even if the filenode pointed to by newPath is a regular file,
|
||||
// one of its ancestors could be a symlink. We call filepath.EvalSymlinks
|
||||
// to test whether there are any links in the path. If the output of
|
||||
// EvalSymlinks is different than the input we know one of the nodes in the
|
||||
// the path is a link.
|
||||
target, err := filepath.EvalSymlinks(newPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if target != newPath {
|
||||
last = filepath.Base(newPath)
|
||||
newPath = filepath.Dir(newPath)
|
||||
} else {
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
||||
newPath = filepath.Join(newPath, last)
|
||||
return filepath.Clean(newPath), nil
|
||||
}
|
||||
|
|
@ -0,0 +1,400 @@
|
|||
/*
|
||||
Copyright 2020 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filesystem
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
)
|
||||
|
||||
func Test_ResolvePaths(t *testing.T) {
|
||||
validateResults := func(
|
||||
t *testing.T,
|
||||
actualFiles,
|
||||
expectedFiles []string,
|
||||
err error,
|
||||
) {
|
||||
if err != nil {
|
||||
t.Errorf("expected err to be nil but was %s", err)
|
||||
}
|
||||
|
||||
// Sort so that comparison is against consistent order
|
||||
sort.Strings(actualFiles)
|
||||
sort.Strings(expectedFiles)
|
||||
|
||||
if !reflect.DeepEqual(actualFiles, expectedFiles) {
|
||||
t.Errorf("expected files to equal %s but was %s",
|
||||
expectedFiles, actualFiles,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("list of files", func(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "snapshot-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
files := []string{
|
||||
"/foo/bar.txt",
|
||||
"/baz/boom.txt",
|
||||
}
|
||||
|
||||
t.Run("all are symlinks", func(t *testing.T) {
|
||||
for _, f := range files {
|
||||
fLink := filepath.Join(dir, "link", f)
|
||||
fTarget := filepath.Join(dir, "target", f)
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(fTarget), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(fTarget, []byte{}, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(fLink), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.Symlink(fTarget, fLink); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("none are whitelisted", func(t *testing.T) {
|
||||
wl := []util.WhitelistEntry{}
|
||||
|
||||
inputFiles := []string{}
|
||||
expectedFiles := []string{}
|
||||
|
||||
for _, f := range files {
|
||||
link := filepath.Join(dir, "link", f)
|
||||
expectedFiles = append(expectedFiles, link)
|
||||
inputFiles = append(inputFiles, link)
|
||||
|
||||
target := filepath.Join(dir, "target", f)
|
||||
expectedFiles = append(expectedFiles, target)
|
||||
}
|
||||
|
||||
expectedFiles = filesWithParentDirs(expectedFiles)
|
||||
|
||||
files, err := ResolvePaths(inputFiles, wl)
|
||||
|
||||
validateResults(t, files, expectedFiles, err)
|
||||
})
|
||||
|
||||
t.Run("some are whitelisted", func(t *testing.T) {
|
||||
wl := []util.WhitelistEntry{
|
||||
{
|
||||
Path: filepath.Join(dir, "link", "baz"),
|
||||
},
|
||||
{
|
||||
Path: filepath.Join(dir, "target", "foo"),
|
||||
},
|
||||
}
|
||||
|
||||
expectedFiles := []string{}
|
||||
inputFiles := []string{}
|
||||
|
||||
for _, f := range files {
|
||||
link := filepath.Join(dir, "link", f)
|
||||
inputFiles = append(inputFiles, link)
|
||||
|
||||
if util.IsInProvidedWhitelist(link, wl) {
|
||||
t.Logf("skipping %s", link)
|
||||
continue
|
||||
}
|
||||
|
||||
expectedFiles = append(expectedFiles, link)
|
||||
|
||||
target := filepath.Join(dir, "target", f)
|
||||
|
||||
if util.IsInProvidedWhitelist(target, wl) {
|
||||
t.Logf("skipping %s", target)
|
||||
continue
|
||||
}
|
||||
|
||||
expectedFiles = append(expectedFiles, target)
|
||||
}
|
||||
|
||||
link := filepath.Join(dir, "link", "zoom/")
|
||||
|
||||
target := filepath.Join(dir, "target", "zaam/")
|
||||
if err := os.MkdirAll(target, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(target, "meow.txt"), []byte{}, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.Symlink(target, link); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
file := filepath.Join(link, "meow.txt")
|
||||
inputFiles = append(inputFiles, file)
|
||||
|
||||
expectedFiles = append(expectedFiles, link)
|
||||
|
||||
targetFile := filepath.Join(target, "meow.txt")
|
||||
expectedFiles = append(expectedFiles, targetFile)
|
||||
|
||||
expectedFiles = filesWithParentDirs(expectedFiles)
|
||||
|
||||
files, err := ResolvePaths(inputFiles, wl)
|
||||
|
||||
validateResults(t, files, expectedFiles, err)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("empty set of files", func(t *testing.T) {
|
||||
inputFiles := []string{}
|
||||
expectedFiles := []string{}
|
||||
|
||||
wl := []util.WhitelistEntry{}
|
||||
|
||||
files, err := ResolvePaths(inputFiles, wl)
|
||||
|
||||
validateResults(t, files, expectedFiles, err)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_resolveSymlinkAncestor(t *testing.T) {
|
||||
setupDirs := func(t *testing.T) (string, string) {
|
||||
testDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
targetDir := filepath.Join(testDir, "bar", "baz")
|
||||
|
||||
if err := os.MkdirAll(targetDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
targetPath := filepath.Join(targetDir, "bam.txt")
|
||||
|
||||
if err := ioutil.WriteFile(targetPath, []byte("meow"), 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return testDir, targetPath
|
||||
}
|
||||
|
||||
t.Run("path is a symlink", func(t *testing.T) {
|
||||
testDir, targetPath := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
linkDir := filepath.Join(testDir, "foo", "buzz")
|
||||
|
||||
if err := os.MkdirAll(linkDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
linkPath := filepath.Join(linkDir, "zoom.txt")
|
||||
|
||||
if err := os.Symlink(targetPath, linkPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := linkPath
|
||||
|
||||
actual, err := resolveSymlinkAncestor(linkPath)
|
||||
if err != nil {
|
||||
t.Errorf("expected err to be nil but was %s", err)
|
||||
}
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("expected result to be %s not %s", expected, actual)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("dir ends with / is not a symlink", func(t *testing.T) {
|
||||
testDir, _ := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
linkDir := filepath.Join(testDir, "var", "www")
|
||||
if err := os.MkdirAll(linkDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := linkDir
|
||||
|
||||
actual, err := resolveSymlinkAncestor(fmt.Sprintf("%s/", linkDir))
|
||||
if err != nil {
|
||||
t.Errorf("expected err to be nil but was %s", err)
|
||||
}
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("expected result to be %s not %s", expected, actual)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("path is a dead symlink", func(t *testing.T) {
|
||||
testDir, targetPath := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
linkDir := filepath.Join(testDir, "foo", "buzz")
|
||||
|
||||
if err := os.MkdirAll(linkDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
linkPath := filepath.Join(linkDir, "zoom.txt")
|
||||
|
||||
if err := os.Symlink(targetPath, linkPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.Remove(targetPath); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := linkPath
|
||||
|
||||
actual, err := resolveSymlinkAncestor(linkPath)
|
||||
if err != nil {
|
||||
t.Errorf("expected err to be nil but was %s", err)
|
||||
}
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("expected result to be %s not %s", expected, actual)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("path is not a symlink", func(t *testing.T) {
|
||||
testDir, targetPath := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
expected := targetPath
|
||||
|
||||
actual, err := resolveSymlinkAncestor(targetPath)
|
||||
if err != nil {
|
||||
t.Errorf("expected err to be nil but was %s", err)
|
||||
}
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("expected result to be %s not %s", expected, actual)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("parent of path is a symlink", func(t *testing.T) {
|
||||
testDir, targetPath := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
targetDir := filepath.Dir(targetPath)
|
||||
|
||||
linkDir := filepath.Join(testDir, "foo")
|
||||
|
||||
if err := os.MkdirAll(linkDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
linkDir = filepath.Join(linkDir, "gaz")
|
||||
|
||||
if err := os.Symlink(targetDir, linkDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
linkPath := filepath.Join(linkDir, filepath.Base(targetPath))
|
||||
|
||||
expected := linkDir
|
||||
|
||||
actual, err := resolveSymlinkAncestor(linkPath)
|
||||
if err != nil {
|
||||
t.Errorf("expected err to be nil but was %s", err)
|
||||
}
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("expected result to be %s not %s", expected, actual)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("parent of path is a dead symlink", func(t *testing.T) {
|
||||
testDir, targetPath := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
targetDir := filepath.Dir(targetPath)
|
||||
|
||||
linkDir := filepath.Join(testDir, "foo")
|
||||
|
||||
if err := os.MkdirAll(linkDir, 0777); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
linkDir = filepath.Join(linkDir, "gaz")
|
||||
|
||||
if err := os.Symlink(targetDir, linkDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(targetDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
linkPath := filepath.Join(linkDir, filepath.Base(targetPath))
|
||||
|
||||
_, err := resolveSymlinkAncestor(linkPath)
|
||||
if err == nil {
|
||||
t.Error("expected err to not be nil")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("great grandparent of path is a symlink", func(t *testing.T) {
|
||||
testDir, targetPath := setupDirs(t)
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
targetDir := filepath.Dir(targetPath)
|
||||
|
||||
linkDir := filepath.Join(testDir, "foo")
|
||||
|
||||
if err := os.Symlink(filepath.Dir(targetDir), linkDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
linkPath := filepath.Join(
|
||||
linkDir,
|
||||
filepath.Join(
|
||||
filepath.Base(targetDir),
|
||||
filepath.Base(targetPath),
|
||||
),
|
||||
)
|
||||
|
||||
expected := linkDir
|
||||
|
||||
actual, err := resolveSymlinkAncestor(linkPath)
|
||||
if err != nil {
|
||||
t.Errorf("expected err to be nil but was %s", err)
|
||||
}
|
||||
|
||||
if actual != expected {
|
||||
t.Errorf("expected result to be %s not %s", expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
Copyright 2020 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// Default log level
|
||||
DefaultLevel = "info"
|
||||
|
||||
// Text format
|
||||
FormatText = "text"
|
||||
// Colored text format
|
||||
FormatColor = "color"
|
||||
// JSON format
|
||||
FormatJSON = "json"
|
||||
)
|
||||
|
||||
// Configure sets the logrus logging level and formatter
|
||||
func Configure(level, format string) error {
|
||||
lvl, err := logrus.ParseLevel(level)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "parsing log level")
|
||||
}
|
||||
logrus.SetLevel(lvl)
|
||||
|
||||
var formatter logrus.Formatter
|
||||
switch format {
|
||||
case FormatText:
|
||||
formatter = &logrus.TextFormatter{
|
||||
DisableColors: true,
|
||||
}
|
||||
case FormatColor:
|
||||
formatter = &logrus.TextFormatter{
|
||||
ForceColors: true,
|
||||
}
|
||||
case FormatJSON:
|
||||
formatter = &logrus.JSONFormatter{}
|
||||
default:
|
||||
return fmt.Errorf("not a valid log format: %q. Please specify one of (text, color, json)", format)
|
||||
}
|
||||
logrus.SetFormatter(formatter)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,143 @@
|
|||
/*
|
||||
Copyright 2020 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: github.com/google/go-containerregistry/pkg/v1 (interfaces: Layer)
|
||||
|
||||
// Package mockv1 is a generated GoMock package.
|
||||
package mockv1
|
||||
|
||||
import (
|
||||
io "io"
|
||||
reflect "reflect"
|
||||
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
types "github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// MockLayer is a mock of Layer interface
|
||||
type MockLayer struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockLayerMockRecorder
|
||||
}
|
||||
|
||||
// MockLayerMockRecorder is the mock recorder for MockLayer
|
||||
type MockLayerMockRecorder struct {
|
||||
mock *MockLayer
|
||||
}
|
||||
|
||||
// NewMockLayer creates a new mock instance
|
||||
func NewMockLayer(ctrl *gomock.Controller) *MockLayer {
|
||||
mock := &MockLayer{ctrl: ctrl}
|
||||
mock.recorder = &MockLayerMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use
|
||||
func (m *MockLayer) EXPECT() *MockLayerMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Compressed mocks base method
|
||||
func (m *MockLayer) Compressed() (io.ReadCloser, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Compressed")
|
||||
ret0, _ := ret[0].(io.ReadCloser)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Compressed indicates an expected call of Compressed
|
||||
func (mr *MockLayerMockRecorder) Compressed() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Compressed", reflect.TypeOf((*MockLayer)(nil).Compressed))
|
||||
}
|
||||
|
||||
// DiffID mocks base method
|
||||
func (m *MockLayer) DiffID() (v1.Hash, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "DiffID")
|
||||
ret0, _ := ret[0].(v1.Hash)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// DiffID indicates an expected call of DiffID
|
||||
func (mr *MockLayerMockRecorder) DiffID() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiffID", reflect.TypeOf((*MockLayer)(nil).DiffID))
|
||||
}
|
||||
|
||||
// Digest mocks base method
|
||||
func (m *MockLayer) Digest() (v1.Hash, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Digest")
|
||||
ret0, _ := ret[0].(v1.Hash)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Digest indicates an expected call of Digest
|
||||
func (mr *MockLayerMockRecorder) Digest() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Digest", reflect.TypeOf((*MockLayer)(nil).Digest))
|
||||
}
|
||||
|
||||
// MediaType mocks base method
|
||||
func (m *MockLayer) MediaType() (types.MediaType, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "MediaType")
|
||||
ret0, _ := ret[0].(types.MediaType)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// MediaType indicates an expected call of MediaType
|
||||
func (mr *MockLayerMockRecorder) MediaType() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MediaType", reflect.TypeOf((*MockLayer)(nil).MediaType))
|
||||
}
|
||||
|
||||
// Size mocks base method
|
||||
func (m *MockLayer) Size() (int64, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Size")
|
||||
ret0, _ := ret[0].(int64)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Size indicates an expected call of Size
|
||||
func (mr *MockLayerMockRecorder) Size() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockLayer)(nil).Size))
|
||||
}
|
||||
|
||||
// Uncompressed mocks base method
|
||||
func (m *MockLayer) Uncompressed() (io.ReadCloser, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Uncompressed")
|
||||
ret0, _ := ret[0].(io.ReadCloser)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Uncompressed indicates an expected call of Uncompressed
|
||||
func (mr *MockLayerMockRecorder) Uncompressed() *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Uncompressed", reflect.TypeOf((*MockLayer)(nil).Uncompressed))
|
||||
}
|
||||
|
|
@ -20,11 +20,13 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/timing"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type LayeredMap struct {
|
||||
|
|
@ -113,13 +115,18 @@ func (l *LayeredMap) Add(s string) error {
|
|||
// from the current layered map by its hashing function.
|
||||
// Returns true if the file is changed.
|
||||
func (l *LayeredMap) CheckFileChange(s string) (bool, error) {
|
||||
oldV, ok := l.Get(s)
|
||||
t := timing.Start("Hashing files")
|
||||
defer timing.DefaultRun.Stop(t)
|
||||
newV, err := l.hasher(s)
|
||||
if err != nil {
|
||||
// if this file does not exist in the new layer return.
|
||||
if os.IsNotExist(err) {
|
||||
logrus.Tracef("%s detected as changed but does not exist", s)
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
oldV, ok := l.Get(s)
|
||||
if ok && newV == oldV {
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue