Merge remote-tracking branch 'origin/master' into registry-mirror

This commit is contained in:
Yoan Blanc 2019-12-22 11:10:15 +01:00
commit 3678b70239
No known key found for this signature in database
GPG Key ID: 6058CF4574298812
989 changed files with 207310 additions and 230489 deletions

View File

@ -150,6 +150,9 @@ linters:
disable:
- errcheck
- gas
- scopelint
- bodyclose
- staticcheck
disable-all: false
presets:
- bugs

View File

@ -2,7 +2,7 @@ language: go
os: linux
go:
- "1.10"
- "1.13.3"
go_import_path: github.com/GoogleContainerTools/kaniko
script:

View File

@ -1,3 +1,46 @@
# v0.15.0 Release - 2019-12-20
## Bug fixes
* Fix #899 cached copy results in inconsistent key [#914](https://github.com/GoogleContainerTools/kaniko/pull/914)
* Fix contribution issue sentence [#912](https://github.com/GoogleContainerTools/kaniko/pull/912)
* Include source stage cache key in cache key for COPY commands using --from [#883](https://github.com/GoogleContainerTools/kaniko/pull/883)
* Fix failure when using capital letters in image alias in 'FROM ... AS…' instruction [#839](https://github.com/GoogleContainerTools/kaniko/pull/839)
* Add golangci.yaml file matching current config [#893](https://github.com/GoogleContainerTools/kaniko/pull/893)
* when copying, skip files with the same name [#905](https://github.com/GoogleContainerTools/kaniko/pull/905)
* Modified error message for writing image with digest file [#849](https://github.com/GoogleContainerTools/kaniko/pull/849)
* Don't exit optimize early; record last cachekey [#892](https://github.com/GoogleContainerTools/kaniko/pull/892)
* Final cachekey for stage [#891](https://github.com/GoogleContainerTools/kaniko/pull/891)
* Update error handling and logging for cache [#879](https://github.com/GoogleContainerTools/kaniko/pull/879)
* Resolve symlink targets to abs path before copying [#857](https://github.com/GoogleContainerTools/kaniko/pull/857)
* Fix quote strip behavior for ARG values [#850](https://github.com/GoogleContainerTools/kaniko/pull/850)
## Updates and Refactors
* add unit tests for caching run and copy [#888](https://github.com/GoogleContainerTools/kaniko/pull/888)
* Only build required docker images for integration tests [#898](https://github.com/GoogleContainerTools/kaniko/pull/898)
* Add integration test for add url with arg [#863](https://github.com/GoogleContainerTools/kaniko/pull/863)
* Add unit tests for compositecache and stagebuilder [#890](https://github.com/GoogleContainerTools/kaniko/pull/890)
## Documentation
* updated readme [#906](https://github.com/GoogleContainerTools/kaniko/pull/906)
* nits in README [#861](https://github.com/GoogleContainerTools/kaniko/pull/861)
* Invalid link to missing file config.json [#876](https://github.com/GoogleContainerTools/kaniko/pull/876)
* Fix README.md anchor links [#872](https://github.com/GoogleContainerTools/kaniko/pull/872)
* Update readme known issues [#874](https://github.com/GoogleContainerTools/kaniko/pull/874)
Huge thank you for this release towards our contributors:
- Balint Pato
- Ben Einaudi
- Cole Wippern
- Eduard Laur
- Josh Soref
- Pweetoo
- Tejal Desai
- Will Ripley
- poy
- priyawadhwa
- tommaso.doninelli
# v0.14.0 Release - 2019-11-08
## New Features

View File

@ -41,7 +41,7 @@ specifically:
- Start with a subject line
- Contain a body that explains _why_ you're making the change you're making
- Reference an issue number one exists, closing it if applicable (with text such as
- Reference an issue number if one exists, closing it if applicable (with text such as
["Fixes #245" or "Closes #111"](https://help.github.com/articles/closing-issues-using-keywords/))
Aim for [2 paragraphs in the body](https://www.youtube.com/watch?v=PJjmw9TRB7s).
@ -67,4 +67,4 @@ interesting to work on:
- To find issues that we particularly would like contributors to tackle, look for
[issues with the "help wanted" label](https://github.com/GoogleContainerTools/kaniko/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22).
- Issues that are good for new folks will additionally be marked with
["good first issue"](https://github.com/GoogleContainerTools/kaniko/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22).
["good first issue"](https://github.com/GoogleContainerTools/kaniko/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22).

1446
Gopkg.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,56 +0,0 @@
required = [
"k8s.io/client-go/discovery",
]
[prune]
unused-packages = true
non-go = true
go-tests = true
[[override]]
name = "github.com/prometheus/client_golang"
revision = "a40133b69fbd73ee655606a9bf5f8b9b9bf758dd"
[[override]]
name = "github.com/opencontainers/runc"
revision = "4fc53a81fb7c994640722ac585fa9ca548971871"
[[override]]
name = "github.com/ishidawataru/sctp"
revision = "07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb"
[[override]]
name = "github.com/vishvananda/netlink"
revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e"
[[override]]
name = "google.golang.org/grpc"
revision = "8124abf74e7633d82a5b96585b0da487d0e6eed0"
[[constraint]]
name = "github.com/aws/aws-sdk-go"
version = "1.13.56"
[[constraint]]
name = "k8s.io/client-go"
version = "kubernetes-1.11.3"
[[constraint]]
name = "github.com/google/go-containerregistry"
revision = "31e00cede111067bae48bfc2cbfc522b0b36207f"
[[override]]
name = "k8s.io/apimachinery"
version = "kubernetes-1.11.0"
[[constraint]]
name = "gopkg.in/src-d/go-git.v4"
version = "4.6.0"
[[constraint]]
name = "github.com/minio/HighwayHash"
version = "1.0.0"
[[constraint]]
name = "github.com/Azure/azure-storage-blob-go"
version = "0.8.0"

View File

@ -14,7 +14,7 @@
# Bump these on release
VERSION_MAJOR ?= 0
VERSION_MINOR ?= 14
VERSION_MINOR ?= 15
VERSION_BUILD ?= 0
VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
@ -40,6 +40,12 @@ WARMER_PACKAGE = $(REPOPATH)/cmd/warmer
KANIKO_PROJECT = $(REPOPATH)/kaniko
BUILD_ARG ?=
# Force using Go Modules and always read the dependencies from
# the `vendor` folder.
export GO111MODULE = on
export GOFLAGS = -mod=vendor
out/executor: $(GO_FILES)
GOARCH=$(GOARCH) GOOS=linux CGO_ENABLED=0 go build -ldflags $(GO_LDFLAGS) -o $@ $(EXECUTOR_PACKAGE)
@ -60,3 +66,8 @@ images:
docker build ${BUILD_ARG} -t $(REGISTRY)/executor:debug -f deploy/Dockerfile_debug .
docker build ${BUILD_ARG} -t $(REGISTRY)/warmer:latest -f deploy/Dockerfile_warmer .
.PHONY: push
push:
docker push $(REGISTRY)/executor:latest
docker push $(REGISTRY)/executor:debug
docker push $(REGISTRY)/warmer:latest

115
README.md
View File

@ -25,53 +25,54 @@ _If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPME
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
- [kaniko - Build Images In Kubernetes](#kaniko---build-images-in-kubernetes)
- [How does kaniko work?](#how-does-kaniko-work)
- [Known Issues](#known-issues)
- [Demo](#demo)
- [Tutorial](#tutorial)
- [Using kaniko](#using-kaniko)
- [kaniko Build Contexts](#kaniko-build-contexts)
- [Using Private Git Repository](#using-private-git-repository)
- [Running kaniko](#running-kaniko)
- [Running kaniko in a Kubernetes cluster](#running-kaniko-in-a-kubernetes-cluster)
- [Kubernetes secret](#kubernetes-secret)
- [Running kaniko in gVisor](#running-kaniko-in-gvisor)
- [Running kaniko in Google Cloud Build](#running-kaniko-in-google-cloud-build)
- [Running kaniko in Docker](#running-kaniko-in-docker)
- [Caching](#caching)
- [Caching Layers](#caching-layers)
- [Caching Base Images](#caching-base-images)
- [Pushing to Different Registries](#pushing-to-different-registries)
- [Pushing to Docker Hub](#pushing-to-docker-hub)
- [Pushing to Amazon ECR](#pushing-to-amazon-ecr)
- [Additional Flags](#additional-flags)
- [--build-arg](#--build-arg)
- [--cache](#--cache)
- [--cache-dir](#--cache-dir)
- [--cache-repo](#--cache-repo)
- [--digest-file](#--digest-file)
- [--oci-layout-path](#--oci-layout-path)
- [--insecure-registry](#--insecure-registry)
- [--skip-tls-verify-registry](#--skip-tls-verify-registry)
- [--cleanup](#--cleanup)
- [--insecure](#--insecure)
- [--insecure-pull](#--insecure-pull)
- [--no-push](#--no-push)
- [--registry-mirror](#--registry-mirror)
- [--reproducible](#--reproducible)
- [--single-snapshot](#--single-snapshot)
- [--skip-tls-verify](#--skip-tls-verify)
- [--skip-tls-verify-pull](#--skip-tls-verify-pull)
- [--snapshotMode](#--snapshotmode)
- [--target](#--target)
- [--tarPath](#--tarpath)
- [--verbosity](#--verbosity)
- [Community](#community)
- [How does kaniko work?](#how-does-kaniko-work)
- [Known Issues](#known-issues)
- [Demo](#demo)
- [Tutorial](#tutorial)
- [Using kaniko](#using-kaniko)
- [kaniko Build Contexts](#kaniko-build-contexts)
- [Using Azure Blob Storage](#using-azure-blob-storage)
- [Using Private Git Repository](#using-private-git-repository)
- [Running kaniko](#running-kaniko)
- [Running kaniko in a Kubernetes cluster](#running-kaniko-in-a-kubernetes-cluster)
- [Kubernetes secret](#kubernetes-secret)
- [Running kaniko in gVisor](#running-kaniko-in-gvisor)
- [Running kaniko in Google Cloud Build](#running-kaniko-in-google-cloud-build)
- [Running kaniko in Docker](#running-kaniko-in-docker)
- [Caching](#caching)
- [Caching Layers](#caching-layers)
- [Caching Base Images](#caching-base-images)
- [Pushing to Different Registries](#pushing-to-different-registries)
- [Pushing to Docker Hub](#pushing-to-docker-hub)
- [Pushing to Amazon ECR](#pushing-to-amazon-ecr)
- [Additional Flags](#additional-flags)
- [--build-arg](#--build-arg)
- [--cache](#--cache)
- [--cache-dir](#--cache-dir)
- [--cache-repo](#--cache-repo)
- [--digest-file](#--digest-file)
- [--oci-layout-path](#--oci-layout-path)
- [--insecure-registry](#--insecure-registry)
- [--skip-tls-verify-registry](#--skip-tls-verify-registry)
- [--cleanup](#--cleanup)
- [--insecure](#--insecure)
- [--insecure-pull](#--insecure-pull)
- [--no-push](#--no-push)
- [--registry-mirror](#--registry-mirror)
- [--reproducible](#--reproducible)
- [--single-snapshot](#--single-snapshot)
- [--skip-tls-verify](#--skip-tls-verify)
- [--skip-tls-verify-pull](#--skip-tls-verify-pull)
- [--snapshotMode](#--snapshotmode)
- [--target](#--target)
- [--tarPath](#--tarpath)
- [--verbosity](#--verbosity)
- [Debug Image](#debug-image)
- [Security](#security)
- [Comparison with Other Tools](#comparison-with-other-tools)
- [Community](#community)
- [Limitations](#limitations)
- [Security](#security)
- [Comparison with Other Tools](#comparison-with-other-tools)
- [Community](#community-1)
- [Limitations](#limitations)
- [mtime and snapshotting](#mtime-and-snapshotting)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
@ -153,7 +154,7 @@ If you don't specify a prefix, kaniko will assume a local directory.
For example, to use a GCS bucket called `kaniko-bucket`, you would pass in `--context=gs://kaniko-bucket/path/to/context.tar.gz`.
### Using Azure Blob Storage
If you are using Azure Blob Storage for context file, you will need to pass [Azure Storage Account Access Key](https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2fazure%2fstorage%2fblobs%2ftoc.json) as an evironment variable named `AZURE_STORAGE_ACCESS_KEY` through Kubernetes Secrets
If you are using Azure Blob Storage for context file, you will need to pass [Azure Storage Account Access Key](https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string?toc=%2fazure%2fstorage%2fblobs%2ftoc.json) as an environment variable named `AZURE_STORAGE_ACCESS_KEY` through Kubernetes Secrets
### Using Private Git Repository
You can use `Personal Access Tokens` for Build Contexts from Private Repositories from [GitHub](https://blog.github.com/2012-09-21-easier-builds-and-deployments-using-git-over-https-and-oauth/).
@ -293,7 +294,7 @@ Before executing a command, kaniko checks the cache for the layer.
If it exists, kaniko will pull and extract the cached layer instead of executing the command.
If not, kaniko will execute the command and then push the newly created layer to the cache.
Users can opt in to caching by setting the `--cache=true` flag.
Users can opt into caching by setting the `--cache=true` flag.
A remote repository for storing cached layers can be provided via the `--cache-repo` flag.
If this flag isn't provided, a cached repo will be inferred from the `--destination` provided.
@ -343,7 +344,7 @@ Run kaniko with the `config.json` inside `/kaniko/.docker/config.json`
#### Pushing to Amazon ECR
The Amazon ECR [credential helper](https://github.com/awslabs/amazon-ecr-credential-helper) is built in to the kaniko executor image.
The Amazon ECR [credential helper](https://github.com/awslabs/amazon-ecr-credential-helper) is built into the kaniko executor image.
To configure credentials, you will need to do the following:
1. Update the `credHelpers` section of [config.json](https://github.com/awslabs/amazon-ecr-credential-helper#configuration) with the specific URI of your ECR registry:
@ -408,7 +409,7 @@ You can set it multiple times for multiple arguments.
#### --cache
Set this flag as `--cache=true` to opt in to caching with kaniko.
Set this flag as `--cache=true` to opt into caching with kaniko.
#### --cache-dir
@ -418,7 +419,7 @@ _This flag must be used in conjunction with the `--cache=true` flag._
#### --cache-repo
Set this flag to specify a remote repository which will be used to store cached layers.
Set this flag to specify a remote repository that will be used to store cached layers.
If this flag is not provided, a cache repo will be inferred from the `--destination` flag.
If `--destination=gcr.io/kaniko-project/test`, then cached layers will be stored in `gcr.io/kaniko-project/test/cache`.
@ -457,7 +458,7 @@ You can set it multiple times for multiple registries.
#### --skip-tls-verify-registry
Set this flag to skip TLS cerificate validation when accessing a registry. It is supposed to be used for testing purposes only and should not be used in production!
Set this flag to skip TLS certificate validation when accessing a registry. It is supposed to be used for testing purposes only and should not be used in production!
You can set it multiple times for multiple registries.
#### --cleanup
@ -517,7 +518,7 @@ Set this flag as `--verbosity=<panic|fatal|error|warn|info|debug>` to set the lo
### Debug Image
The kaniko executor image is based off of scratch and doesn't contain a shell.
The kaniko executor image is based on scratch and doesn't contain a shell.
We provide `gcr.io/kaniko-project/executor:debug`, a debug image which consists of the kaniko executor image along with a busybox shell to enter.
You can launch the debug image with a shell entrypoint:
@ -534,7 +535,7 @@ kaniko relies on the security features of your container runtime to provide buil
The minimum permissions kaniko needs inside your container are governed by a few things:
* The permissions required to unpack your base image into it's container
* The permissions required to unpack your base image into its container
* The permissions required to execute the RUN commands inside the container
If you have a minimal base image (SCRATCH or similar) that doesn't require
@ -559,7 +560,7 @@ Similar tools include:
All of these tools build container images with different approaches.
BuildKit (and `img`) can perform as a non root user from within a container, but requires
BuildKit (and `img`) can perform as a non-root user from within a container but requires
seccomp and AppArmor to be disabled to create nested containers. `kaniko`
does not actually create nested containers, so it does not require seccomp and AppArmor
to be disabled.
@ -572,7 +573,7 @@ builds can be done entirely without privilege).
`umoci` works without any privileges, and also has no restrictions on the root
filesystem being extracted (though it requires additional handling if your
filesystem is sufficiently complicated). However it has no `Dockerfile`-like
filesystem is sufficiently complicated). However, it has no `Dockerfile`-like
build tooling (it's a slightly lower-level tool that can be used to build such
builders -- such as `orca-build`).
@ -604,7 +605,7 @@ To Contribute to kaniko, see [DEVELOPMENT.md](DEVELOPMENT.md) and [CONTRIBUTING.
When taking a snapshot, kaniko's hashing algorithms include (or in the case of
[`--snapshotMode=time`](#--snapshotmode), only use) a file's
[`mtime`](https://en.wikipedia.org/wiki/Inode#POSIX_inode_description) to determine
if the file has changed. Unfortunately there is a delay between when changes to a
if the file has changed. Unfortunately, there is a delay between when changes to a
file are made and when the `mtime` is updated. This means:
* With the time-only snapshot mode (`--snapshotMode=time`), kaniko may miss changes

View File

@ -46,7 +46,7 @@ var (
func init() {
RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", constants.DefaultLogLevel, "Log level (debug, info, warn, error, fatal, panic")
RootCmd.PersistentFlags().BoolVarP(&force, "force", "", false, "Force building outside of a container")
addKanikoOptionsFlags(RootCmd)
addKanikoOptionsFlags()
addHiddenFlags(RootCmd)
}
@ -118,7 +118,7 @@ var RootCmd = &cobra.Command{
}
// addKanikoOptionsFlags configures opts
func addKanikoOptionsFlags(cmd *cobra.Command) {
func addKanikoOptionsFlags() {
RootCmd.PersistentFlags().StringVarP(&opts.DockerfilePath, "dockerfile", "f", "Dockerfile", "Path to the dockerfile to be built.")
RootCmd.PersistentFlags().StringVarP(&opts.SrcContext, "context", "c", "/workspace/", "Path to the dockerfile build context.")
RootCmd.PersistentFlags().StringVarP(&opts.Bucket, "bucket", "b", "", "Name of the GCS bucket from which to access build context as tarball.")

View File

@ -36,8 +36,8 @@ var (
func init() {
RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", constants.DefaultLogLevel, "Log level (debug, info, warn, error, fatal, panic")
addKanikoOptionsFlags(RootCmd)
addHiddenFlags(RootCmd)
addKanikoOptionsFlags()
addHiddenFlags()
}
var RootCmd = &cobra.Command{
@ -65,7 +65,7 @@ var RootCmd = &cobra.Command{
}
// addKanikoOptionsFlags configures opts
func addKanikoOptionsFlags(cmd *cobra.Command) {
func addKanikoOptionsFlags() {
RootCmd.PersistentFlags().VarP(&opts.Images, "image", "i", "Image to cache. Set it repeatedly for multiple images.")
RootCmd.PersistentFlags().StringVarP(&opts.CacheDir, "cache-dir", "c", "/cache", "Directory of the cache.")
RootCmd.PersistentFlags().BoolVarP(&opts.Force, "force", "f", false, "Force cache overwriting.")
@ -73,7 +73,7 @@ func addKanikoOptionsFlags(cmd *cobra.Command) {
}
// addHiddenFlags marks certain flags as hidden from the executor help text
func addHiddenFlags(cmd *cobra.Command) {
func addHiddenFlags() {
RootCmd.PersistentFlags().MarkHidden("azure-container-registry-config")
}

118
go.mod Normal file
View File

@ -0,0 +1,118 @@
module github.com/GoogleContainerTools/kaniko
go 1.13
require (
cloud.google.com/go v0.25.0
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
github.com/Azure/azure-sdk-for-go v19.1.0+incompatible // indirect
github.com/Azure/azure-storage-blob-go v0.8.0
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/Azure/go-autorest v10.15.0+incompatible // indirect
github.com/Microsoft/go-winio v0.4.9 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect
github.com/aws/aws-sdk-go v1.25.19
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/containerd/containerd v1.1.2 // indirect
github.com/containerd/continuity v0.0.0-20180712174259-0377f7d76720 // indirect
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260 // indirect
github.com/coreos/etcd v3.3.9+incompatible // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/docker/distribution v2.6.0-rc.1.0.20180720172123-0dae0957e5fe+incompatible // indirect
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad // indirect
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 // indirect
github.com/docker/go-units v0.3.3 // indirect
github.com/docker/swarmkit v1.12.1-0.20180726190244-7567d47988d8 // indirect
github.com/emirpasic/gods v1.9.0 // indirect
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/fsnotify/fsnotify v1.4.7 // indirect
github.com/genuinetools/amicontained v0.4.3
github.com/ghodss/yaml v1.0.0 // indirect
github.com/gliderlabs/ssh v0.2.2 // indirect
github.com/go-ini/ini v1.38.1 // indirect
github.com/gogo/protobuf v1.1.1 // indirect
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
github.com/golang/protobuf v1.1.0 // indirect
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a // indirect
github.com/google/go-cmp v0.2.0
github.com/google/go-containerregistry v0.0.0-20190820205713-31e00cede111
github.com/google/go-github v17.0.0+incompatible
github.com/google/go-querystring v1.0.0 // indirect
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect
github.com/google/martian v2.1.0+incompatible // indirect
github.com/googleapis/gax-go v2.0.0+incompatible // indirect
github.com/googleapis/gnostic v0.2.0 // indirect
github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa // indirect
github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 // indirect
github.com/hashicorp/go-uuid v1.0.1 // indirect
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be // indirect
github.com/karrick/godirwalk v1.7.7
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e // indirect
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect
github.com/mattn/go-shellwords v1.0.3 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/minio/highwayhash v1.0.0
github.com/mitchellh/go-homedir v1.0.0 // indirect
github.com/moby/buildkit v0.0.0-20180731175856-e57eed420c75
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 // indirect
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/runc v1.0.0-rc5 // indirect
github.com/opencontainers/runtime-spec v1.0.1 // indirect
github.com/opencontainers/selinux v1.0.0-rc1 // indirect
github.com/opentracing/opentracing-go v1.0.2 // indirect
github.com/otiai10/copy v0.0.0-20180813032824-7e9a647135a1
github.com/otiai10/mint v1.3.0 // indirect
github.com/pborman/uuid v1.2.0 // indirect
github.com/pelletier/go-buffruneio v0.2.0 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/errors v0.8.0
github.com/prometheus/client_golang v0.9.0-pre1.0.20180210140205-a40133b69fbd // indirect
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 // indirect
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 // indirect
github.com/sergi/go-diff v1.0.0 // indirect
github.com/sirupsen/logrus v1.4.2
github.com/smartystreets/goconvey v1.6.4 // indirect
github.com/spf13/afero v1.2.1
github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.1
github.com/src-d/gcfg v1.3.0 // indirect
github.com/stretchr/testify v1.4.0 // indirect
github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e // indirect
github.com/tonistiigi/fsutil v0.0.0-20180725061210-b19464cd1b6a // indirect
github.com/vbatts/tar-split v0.10.2 // indirect
github.com/xanzy/ssh-agent v0.2.0 // indirect
go.opencensus.io v0.14.0 // indirect
golang.org/x/net v0.0.0-20190311183353-d8887717615a
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect
google.golang.org/api v0.0.0-20180730000901-31ca0e01cd79 // indirect
google.golang.org/appengine v1.1.0 // indirect
google.golang.org/genproto v0.0.0-20180731170733-daca94659cb5 // indirect
google.golang.org/grpc v1.2.1-0.20180320012744-8124abf74e76 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.51.0 // indirect
gopkg.in/src-d/go-billy.v4 v4.2.0 // indirect
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 // indirect
gopkg.in/src-d/go-git.v4 v4.6.0
gopkg.in/warnings.v0 v0.1.2 // indirect
gotest.tools v2.2.0+incompatible // indirect
k8s.io/api v0.0.0-20180711052118-183f3326a935 // indirect
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d // indirect
k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137 // indirect
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a // indirect
k8s.io/kubernetes v1.11.1 // indirect
)

311
go.sum Normal file
View File

@ -0,0 +1,311 @@
cloud.google.com/go v0.25.0 h1:6vD6xZTc8Jo6To8gHxFDRVsMvWFDgY3rugNszcDalN8=
cloud.google.com/go v0.25.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
github.com/Azure/azure-sdk-for-go v19.1.0+incompatible h1:ysqLW+tqZjJWOTE74heH/pDRbr4vlN3yV+dqQYgpyxw=
github.com/Azure/azure-sdk-for-go v19.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=
github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v10.15.0+incompatible h1:GqDO/9r+7tmkU8HI/DNLVkeucncU8jCul1DLeTaA3GI=
github.com/Azure/go-autorest v10.15.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ=
github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/aws/aws-sdk-go v1.15.2 h1:RH309yOKW4FwEHbRmTsY/bVP8RlldMxBvu3u74wnTVc=
github.com/aws/aws-sdk-go v1.15.2/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.25.19 h1:sp3xP91qIAVhWufyn9qM6Zhhn6kX06WJQcmhRj7QTXc=
github.com/aws/aws-sdk-go v1.25.19/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/containerd/containerd v1.1.2 h1:gojrlFOL/4A5zP4BPgK3YrCaVIgAIHZfqGco/+mfJOs=
github.com/containerd/containerd v1.1.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20180712174259-0377f7d76720 h1:T5LkgEMACPq7+VPRnkh51WhyV+Q0waOGGtp37Y82org=
github.com/containerd/continuity v0.0.0-20180712174259-0377f7d76720/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260 h1:XGyg7oTtD0DoRFhbpV6x1WfV0flKC4UxXU7ab1zC08U=
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
github.com/coreos/etcd v3.3.9+incompatible h1:iKSVPXGNGqroBx4+RmUXv8emeU7y+ucRZSzTYgzLZwM=
github.com/coreos/etcd v3.3.9+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/docker/distribution v2.6.0-rc.1.0.20180720172123-0dae0957e5fe+incompatible h1:8hsJ081BmnqE+fkFjgasY9S8+otMhlWxHcKSYcKzrpk=
github.com/docker/distribution v2.6.0-rc.1.0.20180720172123-0dae0957e5fe+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197 h1:7X3lPJrEEhoUt1UnISqyUB4phKf9aAKVMdFXD63DJO8=
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad h1:VXIse57M5C6ezDuCPyq6QmMvEJ2xclYKZ35SfkXdm3E=
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 h1:yWHOI+vFjEsAakUTSrtqc/SAHrhSkmn48pqjidZX3QA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/swarmkit v1.12.1-0.20180726190244-7567d47988d8 h1:ASmFyV8Sc6XrH1ng0TBPpYLspA8b7qRT84IbrQY1jSY=
github.com/docker/swarmkit v1.12.1-0.20180726190244-7567d47988d8/go.mod h1:n3Z4lIEl7g261ptkGDBcYi/3qBMDl9csaAhwi2MPejs=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo=
github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/genuinetools/amicontained v0.4.3 h1:cqq9XiAHfWWY3dk8VU8bSJFu9yh8Il5coEdeTAPq72o=
github.com/genuinetools/amicontained v0.4.3/go.mod h1:PAMZkg9CcUTa6gNyULQ6tOMTMEb2HTKJufvKeFqDw+o=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-ini/ini v1.38.1 h1:hbtfM8emWUVo9GnXSloXYyFbXxZ+tG6sbepSStoe1FY=
github.com/go-ini/ini v1.38.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.1.0 h1:0iH4Ffd/meGoXqF2lSAhZHt8X+cPgkfn/cb6Cce5Vpc=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a h1:ZJu5NB1Bk5ms4vw0Xu4i+jD32SE9jQXyfnOvwhHqlT0=
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-containerregistry v0.0.0-20190820205713-31e00cede111 h1:5F39eE4QsUnAd6iGzt1/zBs3dhX877U2hJyOJHFmQF0=
github.com/google/go-containerregistry v0.0.0-20190820205713-31e00cede111/go.mod h1:yZAFP63pRshzrEYLXLGPmUt0Ay+2zdjmMN1loCnRLUk=
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa h1:0nA8i+6Rwqaq9xlpmVxxTwk6rxiEhX+E6Wh4vPNHiS8=
github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa/go.mod h1:6ij3Z20p+OhOkCSrA0gImAWoHYQRGbnlcuk6XYTiaRw=
github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 h1:yxxFgVz31vFoKKTtRUNbXLNe4GFnbLKqg+0N7yG42L8=
github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71/go.mod h1:kbfItVoBJwCfKXDXN4YoAXjxcFVZ7MRrJzyTX6H4giE=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw=
github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/karrick/godirwalk v1.7.7 h1:lLkPCA+C0u1pI4fLFseaupvh5/THlPJIqSPmnGGViKs=
github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34=
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8=
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-shellwords v1.0.3 h1:K/VxK7SZ+cvuPgFSLKi5QPI9Vr/ipOf4C1gN+ntueUk=
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/minio/highwayhash v1.0.0 h1:iMSDhgUILCr0TNm8LWlSjF8N0ZIj2qbO8WHp6Q/J2BA=
github.com/minio/highwayhash v1.0.0/go.mod h1:xQboMTeM9nY9v/LlAOxFctujiv5+Aq2hR5dxBpaMbdc=
github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/moby/buildkit v0.0.0-20180731175856-e57eed420c75 h1:xlqeydzXCHEsRtsDyT0rvOBF7t3iTxNSG8gTQk2rfms=
github.com/moby/buildkit v0.0.0-20180731175856-e57eed420c75/go.mod h1:nnELdKPRkUAQR6pAB3mRU3+IlbqL3SSaAWqQL8k/K+4=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c h1:Hww8mOyEKTeON4bZn7FrlLismspbPc1teNRUVH7wLQ8=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c h1:eSfnfIuwhxZyULg1NNuZycJcYkjYVGYe7FczwQReM6U=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v1.0.0-rc5 h1:rYjdzMDXVly2Av0RLs3nf/iVkaWh2UrDhuTdTT2KggQ=
github.com/opencontainers/runc v1.0.0-rc5/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.0.0-rc1 h1:Q70KvmpJSrYzryl/d0tC3vWUiTn23cSdStKodlokEPs=
github.com/opencontainers/selinux v1.0.0-rc1/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/otiai10/copy v0.0.0-20180813032824-7e9a647135a1 h1:A7kMXwDPBTfIVRv2l6XV3U6Su3SzLUzZjxnDDQVZDIY=
github.com/otiai10/copy v0.0.0-20180813032824-7e9a647135a1/go.mod h1:pXzZSDlN+HPzSdyIBnKNN9ptD9Hx7iZMWIJPTwo4FPE=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95 h1:+OLn68pqasWca0z5ryit9KGfp3sUsW4Lqg32iRMJyzs=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/mint v1.3.0 h1:Ady6MKVezQwHBkGzLFbrsywyp09Ah7rkmfjV3Bcr5uc=
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.0-pre1.0.20180210140205-a40133b69fbd h1:R18uzd3mmBHD1ZrNQl83EcOdkKZ5byMwbsKuTxbNeIo=
github.com/prometheus/client_golang v0.9.0-pre1.0.20180210140205-a40133b69fbd/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 h1:osmNoEW2SCW3L7EX0km2LYM8HKpNWRiouxjE3XHkyGc=
github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M=
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e h1:QjF5rxNgRSLHJDwKUvfYP3qOx1vTDzUi/+oSC8FXnCI=
github.com/syndtr/gocapability v0.0.0-20180223013746-33e07d32887e/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tonistiigi/fsutil v0.0.0-20180725061210-b19464cd1b6a h1:qd/ItJwqOuirA+l39OBtUOLokgzKyqKsHMTdzHpoWFk=
github.com/tonistiigi/fsutil v0.0.0-20180725061210-b19464cd1b6a/go.mod h1:eden9dLzAAuNQ0L7whFr6/Mzgz6btsvQpUnxOOI+CCE=
github.com/vbatts/tar-split v0.10.2 h1:CXd7HEKGkTLjBMinpObcJZU5Hm8EKlor2a1JtX6msXQ=
github.com/vbatts/tar-split v0.10.2/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
go.opencensus.io v0.14.0 h1:1eTLxqxSIAylcKoxnNkdhvvBNZDA8JwkKNXxgyma0IA=
go.opencensus.io v0.14.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc h1:3ElrZeO6IBP+M8kgu5YFwRo92Gqr+zBg3aooYQ6ziqU=
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564 h1:o6ENHFwwr1TZ9CUPQcfo1HGvLP1OPsPOTB7xCIOPNmU=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
google.golang.org/api v0.0.0-20180730000901-31ca0e01cd79 h1:wCy2/9bhO1JeP2zZUALrj7ZdZuZoR4mRV57kTxjqRpo=
google.golang.org/api v0.0.0-20180730000901-31ca0e01cd79/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180731170733-daca94659cb5 h1:2PjFmwzH/sxgW9CRJDlEiwMHO8rOk1eMDzVL14HC1e4=
google.golang.org/genproto v0.0.0-20180731170733-daca94659cb5/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.2.1-0.20180320012744-8124abf74e76 h1:vTe0EiECbrLSj5+6SXMi+eWwYW/bjmd26LE5lv52YVs=
google.golang.org/grpc v1.2.1-0.20180320012744-8124abf74e76/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/src-d/go-billy.v4 v4.2.0 h1:VGbrP1EsYxtvVPEiHui+4//imr4E5MGEFLx66bQtusg=
gopkg.in/src-d/go-billy.v4 v4.2.0/go.mod h1:ZHSF0JP+7oD97194otDUCD7Ofbk63+xFcfWP5bT6h+Q=
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=
gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
gopkg.in/src-d/go-git.v4 v4.6.0 h1:3XrA9Qxiwfj7Iusd7dVYUqxMjJYPsLuBdUeQbwnL/NQ=
gopkg.in/src-d/go-git.v4 v4.6.0/go.mod h1:CzbUWqMn4pvmvndg3gnh5iZFmSsbhyhUWdI0IQ60AQo=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
k8s.io/api v0.0.0-20180711052118-183f3326a935 h1:K2hsi4kfw1BvCEKX5J1A51pQtXjico7lOEmSiIHQ3/E=
k8s.io/api v0.0.0-20180711052118-183f3326a935/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d h1:MZjlsu9igBoVPZkXpIGoxI6EonqNsXXZU7hhvfQLkd4=
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137 h1:4DIWGqvAjLME47asVwjb14H+6bDRu+4h43Ssw6tMAHc=
k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kubernetes v1.11.1 h1:wHOPX+teuYaSlUWfL/b24jMH0n7HECbj4Xt8i7kSZIw=
k8s.io/kubernetes v1.11.1/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

View File

@ -1,71 +0,0 @@
#!/bin/bash
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
set -e -o pipefail
DEP_VERSION=v0.5.0
DEP_DIR="$GOPATH/src/github.com/golang/dep"
install_dep() {
echo "Installing dep ${DEP_VERSION}"
if [ ! -d "$DEP_DIR" ]; then
go get -u -d github.com/golang/dep/cmd/dep
fi
cd $DEP_DIR
git checkout -q ${DEP_VERSION}
go build -o $GOPATH/bin/dep github.com/golang/dep/cmd/dep
}
if [ -z "$VALIDATE_UPSTREAM" ]; then
VALIDATE_REPO='git@github.com:GoogleContainerTools/kaniko.git'
VALIDATE_BRANCH='master'
VALIDATE_HEAD="$(git rev-parse --verify HEAD)"
git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH"
VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)"
VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD"
validate_diff() {
if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then
git diff "$VALIDATE_COMMIT_DIFF" "$@"
fi
}
fi
# See if there have been upstream changes
IFS=$'\n'
files=( $(validate_diff --name-only -- 'Gopkg.toml' 'Gopkg.lock' 'vendor/' || true) )
unset IFS
if [ ${#files[@]} -gt 0 ]; then
if ! [ -x "$(command -v dep)" ]; then
install_dep
fi
dep ensure
diffs="$(git status --porcelain -- vendor Gopkg.toml Gopkg.lock 2>/dev/null)"
if [ "$diffs" ]; then
{
echo 'Vendor not reproducible, please commit these changes to fix.'
echo
echo "$diffs"
} >&2
false
fi
else
echo 'No vendor changes from upstream. Skipping dep ensure, dep prune'
fi

View File

@ -20,7 +20,7 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if ! [ -x "$(command -v golangci-lint)" ]; then
echo "Installing GolangCI-Lint"
${DIR}/install_golint.sh -b $GOPATH/bin v1.9.3
${DIR}/install_golint.sh -b $GOPATH/bin v1.21.0
fi
golangci-lint run

View File

@ -32,7 +32,6 @@ import (
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/daemon"
"golang.org/x/sync/errgroup"
"github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
@ -42,40 +41,6 @@ import (
var config = initGCPConfig()
var imageBuilder *DockerFileBuilder
type gcpConfig struct {
gcsBucket string
imageRepo string
onbuildBaseImage string
hardlinkBaseImage string
}
type imageDetails struct {
name string
numLayers int
digest string
}
func (i imageDetails) String() string {
return fmt.Sprintf("Image: [%s] Digest: [%s] Number of Layers: [%d]", i.name, i.digest, i.numLayers)
}
func initGCPConfig() *gcpConfig {
var c gcpConfig
flag.StringVar(&c.gcsBucket, "bucket", "gs://kaniko-test-bucket", "The gcs bucket argument to uploaded the tar-ed contents of the `integration` dir to.")
flag.StringVar(&c.imageRepo, "repo", "gcr.io/kaniko-test", "The (docker) image repo to build and push images to during the test. `gcloud` must be authenticated with this repo.")
flag.Parse()
if c.gcsBucket == "" || c.imageRepo == "" {
log.Fatalf("You must provide a gcs bucket (\"%s\" was provided) and a docker repo (\"%s\" was provided)", c.gcsBucket, c.imageRepo)
}
if !strings.HasSuffix(c.imageRepo, "/") {
c.imageRepo = c.imageRepo + "/"
}
c.onbuildBaseImage = c.imageRepo + "onbuild-base:latest"
c.hardlinkBaseImage = c.imageRepo + "hardlink-base:latest"
return &c
}
const (
daemonPrefix = "daemon://"
dockerfilesPath = "dockerfiles"
@ -102,19 +67,6 @@ const (
]`
)
func meetsRequirements() bool {
requiredTools := []string{"container-diff", "gsutil"}
hasRequirements := true
for _, tool := range requiredTools {
_, err := exec.LookPath(tool)
if err != nil {
fmt.Printf("You must have %s installed and on your PATH\n", tool)
hasRequirements = false
}
}
return hasRequirements
}
func TestMain(m *testing.M) {
if !meetsRequirements() {
fmt.Println("Missing required tools")
@ -192,19 +144,9 @@ func TestMain(m *testing.M) {
}
imageBuilder = NewDockerFileBuilder(dockerfiles)
g := errgroup.Group{}
for dockerfile := range imageBuilder.FilesBuilt {
df := dockerfile
g.Go(func() error {
return imageBuilder.BuildImage(config.imageRepo, config.gcsBucket, dockerfilesPath, df)
})
}
if err := g.Wait(); err != nil {
fmt.Printf("Error building images: %s", err)
os.Exit(1)
}
os.Exit(m.Run())
}
func TestRun(t *testing.T) {
for dockerfile := range imageBuilder.FilesBuilt {
t.Run("test_"+dockerfile, func(t *testing.T) {
@ -216,6 +158,10 @@ func TestRun(t *testing.T) {
if _, ok := imageBuilder.TestCacheDockerfiles[dockerfile]; ok {
t.SkipNow()
}
buildImage(t, dockerfile, imageBuilder)
imageBuilder.FilesBuilt[dockerfile] = true
dockerImage := GetDockerImage(config.imageRepo, dockerfile)
kanikoImage := GetKanikoImage(config.imageRepo, dockerfile)
@ -333,10 +279,15 @@ func TestLayers(t *testing.T) {
for dockerfile := range imageBuilder.FilesBuilt {
t.Run("test_layer_"+dockerfile, func(t *testing.T) {
dockerfile := dockerfile
t.Parallel()
if _, ok := imageBuilder.DockerfilesToIgnore[dockerfile]; ok {
t.SkipNow()
}
buildImage(t, dockerfile, imageBuilder)
imageBuilder.FilesBuilt[dockerfile] = true
// Pull the kaniko image
dockerImage := GetDockerImage(config.imageRepo, dockerfile)
kanikoImage := GetKanikoImage(config.imageRepo, dockerfile)
@ -352,6 +303,21 @@ func TestLayers(t *testing.T) {
}
}
func buildImage(t *testing.T, dockerfile string, imageBuilder *DockerFileBuilder) {
if imageBuilder.FilesBuilt[dockerfile] {
return
}
if err := imageBuilder.BuildImage(
config.imageRepo, config.gcsBucket, dockerfilesPath, dockerfile,
); err != nil {
t.Errorf("Error building image: %s", err)
t.FailNow()
}
return
}
// Build each image with kaniko twice, and then make sure they're exactly the same
func TestCache(t *testing.T) {
populateVolumeCache()
@ -526,3 +492,50 @@ func logBenchmarks(benchmark string) error {
}
return nil
}
type gcpConfig struct {
gcsBucket string
imageRepo string
onbuildBaseImage string
hardlinkBaseImage string
}
type imageDetails struct {
name string
numLayers int
digest string
}
func (i imageDetails) String() string {
return fmt.Sprintf("Image: [%s] Digest: [%s] Number of Layers: [%d]", i.name, i.digest, i.numLayers)
}
func initGCPConfig() *gcpConfig {
var c gcpConfig
flag.StringVar(&c.gcsBucket, "bucket", "gs://kaniko-test-bucket", "The gcs bucket argument to uploaded the tar-ed contents of the `integration` dir to.")
flag.StringVar(&c.imageRepo, "repo", "gcr.io/kaniko-test", "The (docker) image repo to build and push images to during the test. `gcloud` must be authenticated with this repo.")
flag.Parse()
if c.gcsBucket == "" || c.imageRepo == "" {
log.Fatalf("You must provide a gcs bucket (\"%s\" was provided) and a docker repo (\"%s\" was provided)", c.gcsBucket, c.imageRepo)
}
if !strings.HasSuffix(c.imageRepo, "/") {
c.imageRepo = c.imageRepo + "/"
}
c.onbuildBaseImage = c.imageRepo + "onbuild-base:latest"
c.hardlinkBaseImage = c.imageRepo + "hardlink-base:latest"
return &c
}
func meetsRequirements() bool {
requiredTools := []string{"container-diff", "gsutil"}
hasRequirements := true
for _, tool := range requiredTools {
_, err := exec.LookPath(tool)
if err != nil {
fmt.Printf("You must have %s installed and on your PATH\n", tool)
hasRequirements = false
}
}
return hasRequirements
}

2
pkg/cache/cache.go vendored
View File

@ -113,7 +113,7 @@ func Destination(opts *config.KanikoOptions, cacheKey string) (string, error) {
return fmt.Sprintf("%s:%s", cache, cacheKey), nil
}
// LocalSource retieves a source image from a local cache given cacheKey
// LocalSource retrieves a source image from a local cache given cacheKey
func LocalSource(opts *config.CacheOptions, cacheKey string) (v1.Image, error) {
cache := opts.CacheDir
if cache == "" {

View File

@ -17,6 +17,7 @@ limitations under the License.
package commands
import (
"fmt"
"os"
"path/filepath"
@ -120,24 +121,7 @@ func (c *CopyCommand) String() string {
}
func (c *CopyCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerfile.BuildArgs) ([]string, error) {
// We don't use the context if we're performing a copy --from.
if c.cmd.From != "" {
return nil, nil
}
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
srcs, _, err := util.ResolveEnvAndWildcards(c.cmd.SourcesAndDest, c.buildcontext, replacementEnvs)
if err != nil {
return nil, err
}
files := []string{}
for _, src := range srcs {
fullPath := filepath.Join(c.buildcontext, src)
files = append(files, fullPath)
}
logrus.Infof("Using files from context: %v", files)
return files, nil
return copyCmdFilesUsedFromContext(config, buildArgs, c.cmd, c.buildcontext)
}
func (c *CopyCommand) MetadataOnly() bool {
@ -156,37 +140,62 @@ func (c *CopyCommand) ShouldCacheOutput() bool {
func (c *CopyCommand) CacheCommand(img v1.Image) DockerCommand {
return &CachingCopyCommand{
img: img,
cmd: c.cmd,
img: img,
cmd: c.cmd,
buildcontext: c.buildcontext,
extractFn: util.ExtractFile,
}
}
func (c *CopyCommand) From() string {
return c.cmd.From
}
type CachingCopyCommand struct {
BaseCommand
img v1.Image
extractedFiles []string
cmd *instructions.CopyCommand
buildcontext string
extractFn util.ExtractFunction
}
func (cr *CachingCopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Infof("Found cached layer, extracting to filesystem")
var err error
cr.extractedFiles, err = util.GetFSFromImage(RootDir, cr.img)
if cr.img == nil {
return errors.New(fmt.Sprintf("cached command image is nil %v", cr.String()))
}
cr.extractedFiles, err = util.GetFSFromImage(RootDir, cr.img, cr.extractFn)
logrus.Infof("extractedFiles: %s", cr.extractedFiles)
if err != nil {
return errors.Wrap(err, "extracting fs from image")
}
return nil
}
func (cr *CachingCopyCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerfile.BuildArgs) ([]string, error) {
return copyCmdFilesUsedFromContext(config, buildArgs, cr.cmd, cr.buildcontext)
}
func (cr *CachingCopyCommand) FilesToSnapshot() []string {
return cr.extractedFiles
}
func (cr *CachingCopyCommand) String() string {
if cr.cmd == nil {
return "nil command"
}
return cr.cmd.String()
}
func (cr *CachingCopyCommand) From() string {
return cr.cmd.From
}
func resolveIfSymlink(destPath string) (string, error) {
baseDir := filepath.Dir(destPath)
if info, err := os.Lstat(baseDir); err == nil {
@ -204,3 +213,32 @@ func resolveIfSymlink(destPath string) (string, error) {
}
return destPath, nil
}
func copyCmdFilesUsedFromContext(
config *v1.Config, buildArgs *dockerfile.BuildArgs, cmd *instructions.CopyCommand,
buildcontext string,
) ([]string, error) {
// We don't use the context if we're performing a copy --from.
if cmd.From != "" {
return nil, nil
}
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
srcs, _, err := util.ResolveEnvAndWildcards(
cmd.SourcesAndDest, buildcontext, replacementEnvs,
)
if err != nil {
return nil, err
}
files := []string{}
for _, src := range srcs {
fullPath := filepath.Join(buildcontext, src)
files = append(files, fullPath)
}
logrus.Debugf("Using files from context: %v", files)
return files, nil
}

View File

@ -16,6 +16,7 @@ limitations under the License.
package commands
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
@ -215,3 +216,151 @@ func Test_resolveIfSymlink(t *testing.T) {
})
}
}
func Test_CachingCopyCommand_ExecuteCommand(t *testing.T) {
tempDir := setupTestTemp()
tarContent, err := prepareTarFixture([]string{"foo.txt"})
if err != nil {
t.Errorf("couldn't prepare tar fixture %v", err)
}
config := &v1.Config{}
buildArgs := &dockerfile.BuildArgs{}
type testCase struct {
desctiption string
expectLayer bool
expectErr bool
count *int
expectedCount int
command *CachingCopyCommand
extractedFiles []string
contextFiles []string
}
testCases := []testCase{
func() testCase {
err = ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("meow"), 0644)
if err != nil {
t.Errorf("couldn't write tempfile %v", err)
t.FailNow()
}
c := &CachingCopyCommand{
img: fakeImage{
ImageLayers: []v1.Layer{
fakeLayer{TarContent: tarContent},
},
},
buildcontext: tempDir,
cmd: &instructions.CopyCommand{
SourcesAndDest: []string{
"foo.txt", "foo.txt",
},
},
}
count := 0
tc := testCase{
desctiption: "with valid image and valid layer",
count: &count,
expectedCount: 1,
expectLayer: true,
extractedFiles: []string{"/foo.txt"},
contextFiles: []string{"foo.txt"},
}
c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error {
*tc.count++
return nil
}
tc.command = c
return tc
}(),
func() testCase {
c := &CachingCopyCommand{}
tc := testCase{
desctiption: "with no image",
expectErr: true,
}
c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error {
return nil
}
tc.command = c
return tc
}(),
func() testCase {
c := &CachingCopyCommand{
img: fakeImage{},
}
tc := testCase{
desctiption: "with image containing no layers",
}
c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error {
return nil
}
tc.command = c
return tc
}(),
func() testCase {
c := &CachingCopyCommand{
img: fakeImage{
ImageLayers: []v1.Layer{
fakeLayer{},
},
},
}
c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error {
return nil
}
tc := testCase{
desctiption: "with image one layer which has no tar content",
expectErr: false, // this one probably should fail but doesn't because of how ExecuteCommand and util.GetFSFromLayers are implemented - cvgw- 2019-11-25
expectLayer: true,
}
tc.command = c
return tc
}(),
}
for _, tc := range testCases {
t.Run(tc.desctiption, func(t *testing.T) {
c := tc.command
err := c.ExecuteCommand(config, buildArgs)
if !tc.expectErr && err != nil {
t.Errorf("Expected err to be nil but was %v", err)
} else if tc.expectErr && err == nil {
t.Error("Expected err but was nil")
}
if tc.count != nil {
if *tc.count != tc.expectedCount {
t.Errorf("Expected extractFn to be called %v times but was called %v times", tc.expectedCount, *tc.count)
}
for _, file := range tc.extractedFiles {
match := false
cFiles := c.FilesToSnapshot()
for _, cFile := range cFiles {
if file == cFile {
match = true
break
}
}
if !match {
t.Errorf("Expected extracted files to include %v but did not %v", file, cFiles)
}
}
cmdFiles, err := c.FilesUsedFromContext(
config, buildArgs,
)
if err != nil {
t.Errorf("failed to get files used from context from command %v", err)
}
if len(cmdFiles) != len(tc.contextFiles) {
t.Errorf("expected files used from context to equal %v but was %v", tc.contextFiles, cmdFiles)
}
}
})
}
}

View File

@ -0,0 +1,88 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// used for testing in the commands package
package commands
import (
"bytes"
"io"
"io/ioutil"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/types"
)
type fakeLayer struct {
TarContent []byte
}
func (f fakeLayer) Digest() (v1.Hash, error) {
return v1.Hash{}, nil
}
func (f fakeLayer) DiffID() (v1.Hash, error) {
return v1.Hash{}, nil
}
func (f fakeLayer) Compressed() (io.ReadCloser, error) {
return nil, nil
}
func (f fakeLayer) Uncompressed() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(f.TarContent)), nil
}
func (f fakeLayer) Size() (int64, error) {
return 0, nil
}
func (f fakeLayer) MediaType() (types.MediaType, error) {
return "", nil
}
type fakeImage struct {
ImageLayers []v1.Layer
}
func (f fakeImage) Layers() ([]v1.Layer, error) {
return f.ImageLayers, nil
}
func (f fakeImage) MediaType() (types.MediaType, error) {
return "", nil
}
func (f fakeImage) Size() (int64, error) {
return 0, nil
}
func (f fakeImage) ConfigName() (v1.Hash, error) {
return v1.Hash{}, nil
}
func (f fakeImage) ConfigFile() (*v1.ConfigFile, error) {
return &v1.ConfigFile{}, nil
}
func (f fakeImage) RawConfigFile() ([]byte, error) {
return []byte{}, nil
}
func (f fakeImage) Digest() (v1.Hash, error) {
return v1.Hash{}, nil
}
func (f fakeImage) Manifest() (*v1.Manifest, error) {
return &v1.Manifest{}, nil
}
func (f fakeImage) RawManifest() ([]byte, error) {
return []byte{}, nil
}
func (f fakeImage) LayerByDigest(v1.Hash) (v1.Layer, error) {
return fakeLayer{}, nil
}
func (f fakeImage) LayerByDiffID(v1.Hash) (v1.Layer, error) {
return fakeLayer{}, nil
}

View File

@ -164,8 +164,9 @@ func (r *RunCommand) FilesToSnapshot() []string {
func (r *RunCommand) CacheCommand(img v1.Image) DockerCommand {
return &CachingRunCommand{
img: img,
cmd: r.cmd,
img: img,
cmd: r.cmd,
extractFn: util.ExtractFile,
}
}
@ -186,15 +187,21 @@ type CachingRunCommand struct {
img v1.Image
extractedFiles []string
cmd *instructions.RunCommand
extractFn util.ExtractFunction
}
func (cr *CachingRunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Infof("Found cached layer, extracting to filesystem")
var err error
cr.extractedFiles, err = util.GetFSFromImage(constants.RootDir, cr.img)
if cr.img == nil {
return errors.New(fmt.Sprintf("command image is nil %v", cr.String()))
}
cr.extractedFiles, err = util.GetFSFromImage(constants.RootDir, cr.img, cr.extractFn)
if err != nil {
return errors.Wrap(err, "extracting fs from image")
}
return nil
}
@ -203,5 +210,8 @@ func (cr *CachingRunCommand) FilesToSnapshot() []string {
}
func (cr *CachingRunCommand) String() string {
if cr.cmd == nil {
return "nil command"
}
return cr.cmd.String()
}

View File

@ -16,10 +16,19 @@ limitations under the License.
package commands
import (
"archive/tar"
"bytes"
"io"
"io/ioutil"
"log"
"os"
"os/user"
"path/filepath"
"testing"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/testutil"
v1 "github.com/google/go-containerregistry/pkg/v1"
)
func Test_addDefaultHOME(t *testing.T) {
@ -120,3 +129,187 @@ func Test_addDefaultHOME(t *testing.T) {
})
}
}
func prepareTarFixture(fileNames []string) ([]byte, error) {
dir, err := ioutil.TempDir("/tmp", "tar-fixture")
if err != nil {
return nil, err
}
content := `
Meow meow meow meow
meow meow meow meow
`
for _, name := range fileNames {
if err := ioutil.WriteFile(filepath.Join(dir, name), []byte(content), 0777); err != nil {
return nil, err
}
}
writer := bytes.NewBuffer([]byte{})
tw := tar.NewWriter(writer)
defer tw.Close()
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
hdr, err := tar.FileInfoHeader(info, "")
if err != nil {
return err
}
if err := tw.WriteHeader(hdr); err != nil {
log.Fatal(err)
}
body, err := ioutil.ReadFile(path)
if err != nil {
return err
}
if _, err := tw.Write(body); err != nil {
log.Fatal(err)
}
return nil
})
return writer.Bytes(), nil
}
func Test_CachingRunCommand_ExecuteCommand(t *testing.T) {
tarContent, err := prepareTarFixture([]string{"foo.txt"})
if err != nil {
t.Errorf("couldn't prepare tar fixture %v", err)
}
config := &v1.Config{}
buildArgs := &dockerfile.BuildArgs{}
type testCase struct {
desctiption string
expectLayer bool
expectErr bool
count *int
expectedCount int
command *CachingRunCommand
extractedFiles []string
contextFiles []string
}
testCases := []testCase{
func() testCase {
c := &CachingRunCommand{
img: fakeImage{
ImageLayers: []v1.Layer{
fakeLayer{TarContent: tarContent},
},
},
}
count := 0
tc := testCase{
desctiption: "with valid image and valid layer",
count: &count,
expectedCount: 1,
expectLayer: true,
extractedFiles: []string{"/foo.txt"},
contextFiles: []string{"foo.txt"},
}
c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error {
*tc.count++
return nil
}
tc.command = c
return tc
}(),
func() testCase {
c := &CachingRunCommand{}
tc := testCase{
desctiption: "with no image",
expectErr: true,
}
c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error {
return nil
}
tc.command = c
return tc
}(),
func() testCase {
c := &CachingRunCommand{
img: fakeImage{},
}
tc := testCase{
desctiption: "with image containing no layers",
}
c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error {
return nil
}
tc.command = c
return tc
}(),
func() testCase {
c := &CachingRunCommand{
img: fakeImage{
ImageLayers: []v1.Layer{
fakeLayer{},
},
},
}
c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error {
return nil
}
tc := testCase{
desctiption: "with image one layer which has no tar content",
expectErr: false, // this one probably should fail but doesn't because of how ExecuteCommand and util.GetFSFromLayers are implemented - cvgw- 2019-11-25
expectLayer: true,
}
tc.command = c
return tc
}(),
}
for _, tc := range testCases {
t.Run(tc.desctiption, func(t *testing.T) {
c := tc.command
err := c.ExecuteCommand(config, buildArgs)
if !tc.expectErr && err != nil {
t.Errorf("Expected err to be nil but was %v", err)
} else if tc.expectErr && err == nil {
t.Error("Expected err but was nil")
}
if tc.count != nil {
if *tc.count != tc.expectedCount {
t.Errorf("Expected extractFn to be called %v times but was called %v times", 1, *tc.count)
}
for _, file := range tc.extractedFiles {
match := false
cmdFiles := c.extractedFiles
for _, f := range cmdFiles {
if file == f {
match = true
break
}
}
if !match {
t.Errorf("Expected extracted files to include %v but did not %v", file, cmdFiles)
}
}
// CachingRunCommand does not override BaseCommand
// FilesUseFromContext so this will always return an empty slice and no error
// This seems like it might be a bug as it results in RunCommands and CachingRunCommands generating different cache keys - cvgw - 2019-11-27
cmdFiles, err := c.FilesUsedFromContext(
config, buildArgs,
)
if err != nil {
t.Errorf("failed to get files used from context from command")
}
if len(cmdFiles) != 0 {
t.Errorf("expected files used from context to be empty but was not")
}
}
})
}
}

View File

@ -18,7 +18,7 @@ package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
)

View File

@ -19,7 +19,7 @@ import (
"testing"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
)

View File

@ -20,7 +20,7 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/docker/docker/pkg/signal"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/sirupsen/logrus"
)

View File

@ -20,7 +20,7 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
)

View File

@ -21,7 +21,7 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/sirupsen/logrus"
)

View File

@ -21,7 +21,7 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
)

View File

@ -23,7 +23,7 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/sirupsen/logrus"
)

View File

@ -21,7 +21,7 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
)

View File

@ -23,7 +23,7 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/sirupsen/logrus"
)

View File

@ -22,7 +22,7 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
)

View File

@ -16,7 +16,9 @@ limitations under the License.
package config
import "github.com/moby/buildkit/frontend/dockerfile/instructions"
import (
"github.com/moby/buildkit/frontend/dockerfile/instructions"
)
// KanikoStage wraps a stage of the Dockerfile and provides extra information
type KanikoStage struct {

View File

@ -61,23 +61,24 @@ type snapShotter interface {
// stageBuilder contains all fields necessary to build one stage of a Dockerfile
type stageBuilder struct {
stage config.KanikoStage
image v1.Image
cf *v1.ConfigFile
snapshotter snapShotter
layerCache cache.LayerCache
pushCache cachePusher
baseImageDigest string
finalCacheKey string
opts *config.KanikoOptions
cmds []commands.DockerCommand
args *dockerfile.BuildArgs
crossStageDeps map[int][]string
digestToCacheKeyMap map[string]string
stage config.KanikoStage
image v1.Image
cf *v1.ConfigFile
baseImageDigest string
finalCacheKey string
opts *config.KanikoOptions
cmds []commands.DockerCommand
args *dockerfile.BuildArgs
crossStageDeps map[int][]string
digestToCacheKey map[string]string
stageIdxToDigest map[string]string
snapshotter snapShotter
layerCache cache.LayerCache
pushCache cachePusher
}
// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, crossStageDeps map[int][]string, dcm map[string]string) (*stageBuilder, error) {
func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, crossStageDeps map[int][]string, dcm map[string]string, sid map[string]string) (*stageBuilder, error) {
sourceImage, err := util.RetrieveSourceImage(stage, opts)
if err != nil {
return nil, err
@ -104,14 +105,15 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, cross
return nil, err
}
s := &stageBuilder{
stage: stage,
image: sourceImage,
cf: imageConfig,
snapshotter: snapshotter,
baseImageDigest: digest.String(),
opts: opts,
crossStageDeps: crossStageDeps,
digestToCacheKeyMap: dcm,
stage: stage,
image: sourceImage,
cf: imageConfig,
snapshotter: snapshotter,
baseImageDigest: digest.String(),
opts: opts,
crossStageDeps: crossStageDeps,
digestToCacheKey: dcm,
stageIdxToDigest: sid,
layerCache: &cache.RegistryCache{
Opts: opts,
},
@ -146,6 +148,40 @@ func initializeConfig(img partial.WithConfigFile) (*v1.ConfigFile, error) {
return imageConfig, nil
}
func (s *stageBuilder) populateCompositeKey(command fmt.Stringer, files []string, compositeKey CompositeCache) (CompositeCache, error) {
// Add the next command to the cache key.
compositeKey.AddKey(command.String())
switch v := command.(type) {
case *commands.CopyCommand:
compositeKey = s.populateCopyCmdCompositeKey(command, v.From(), compositeKey)
case *commands.CachingCopyCommand:
compositeKey = s.populateCopyCmdCompositeKey(command, v.From(), compositeKey)
}
for _, f := range files {
if err := compositeKey.AddPath(f); err != nil {
return compositeKey, err
}
}
return compositeKey, nil
}
func (s *stageBuilder) populateCopyCmdCompositeKey(command fmt.Stringer, from string, compositeKey CompositeCache) CompositeCache {
if from != "" {
digest, ok := s.stageIdxToDigest[from]
if ok {
ds := digest
cacheKey, ok := s.digestToCacheKey[ds]
if ok {
logrus.Debugf("adding digest %v from previous stage to composite key for %v", ds, command.String())
compositeKey.AddKey(cacheKey)
}
}
}
return compositeKey
}
func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) error {
if !s.opts.Cache {
return nil
@ -160,25 +196,28 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro
if command == nil {
continue
}
compositeKey.AddKey(command.String())
// If the command uses files from the context, add them.
files, err := command.FilesUsedFromContext(&cfg, s.args)
if err != nil {
return errors.Wrap(err, "failed to get files used from context")
}
for _, f := range files {
if err := compositeKey.AddPath(f); err != nil {
return errors.Wrap(err, "failed to add path to composite key")
}
compositeKey, err = s.populateCompositeKey(command, files, compositeKey)
if err != nil {
return err
}
logrus.Debugf("optimize: composite key for command %v %v", command.String(), compositeKey)
ck, err := compositeKey.Hash()
if err != nil {
return errors.Wrap(err, "failed to hash composite key")
}
logrus.Debugf("optimize: cache key for command %v %v", command.String(), ck)
s.finalCacheKey = ck
if command.ShouldCacheOutput() && !stopCache {
img, err := s.layerCache.RetrieveLayer(ck)
if err != nil {
logrus.Debugf("Failed to retrieve layer: %s", err)
logrus.Infof("No cached layer found for cmd %s", command.String())
@ -206,11 +245,12 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro
func (s *stageBuilder) build() error {
// Set the initial cache key to be the base image digest, the build args and the SrcContext.
var compositeKey *CompositeCache
if cacheKey, ok := s.digestToCacheKeyMap[s.baseImageDigest]; ok {
if cacheKey, ok := s.digestToCacheKey[s.baseImageDigest]; ok {
compositeKey = NewCompositeCache(cacheKey)
} else {
compositeKey = NewCompositeCache(s.baseImageDigest)
}
compositeKey.AddKey(s.opts.BuildArgs...)
// Apply optimizations to the instructions.
@ -233,21 +273,26 @@ func (s *stageBuilder) build() error {
if shouldUnpack {
t := timing.Start("FS Unpacking")
if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
if _, err := util.GetFSFromImage(constants.RootDir, s.image, util.ExtractFile); err != nil {
return errors.Wrap(err, "failed to get filesystem from image")
}
timing.DefaultRun.Stop(t)
} else {
logrus.Info("Skipping unpacking as no commands require it.")
}
if err := util.DetectFilesystemWhitelist(constants.WhitelistPath); err != nil {
return errors.Wrap(err, "failed to check filesystem whitelist")
}
// Take initial snapshot
t := timing.Start("Initial FS snapshot")
if err := s.snapshotter.Init(); err != nil {
return err
}
timing.DefaultRun.Stop(t)
cacheGroup := errgroup.Group{}
@ -256,19 +301,19 @@ func (s *stageBuilder) build() error {
continue
}
// Add the next command to the cache key.
compositeKey.AddKey(command.String())
t := timing.Start("Command: " + command.String())
// If the command uses files from the context, add them.
files, err := command.FilesUsedFromContext(&s.cf.Config, s.args)
if err != nil {
return errors.Wrap(err, "failed to get files used from context")
}
for _, f := range files {
if err := compositeKey.AddPath(f); err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to add path to composite key %v", f))
}
*compositeKey, err = s.populateCompositeKey(command, files, *compositeKey)
if err != nil {
return err
}
logrus.Info(command.String())
if err := command.ExecuteCommand(&s.cf.Config, s.args); err != nil {
@ -286,10 +331,14 @@ func (s *stageBuilder) build() error {
return errors.Wrap(err, "failed to take snapshot")
}
logrus.Debugf("build: composite key for command %v %v", command.String(), compositeKey)
ck, err := compositeKey.Hash()
if err != nil {
return errors.Wrap(err, "failed to hash composite key")
}
logrus.Debugf("build: cache key for command %v %v", command.String(), ck)
// Push layer to cache (in parallel) now along with new config file
if s.opts.Cache && command.ShouldCacheOutput() {
cacheGroup.Go(func() error {
@ -303,6 +352,7 @@ func (s *stageBuilder) build() error {
if err := cacheGroup.Wait(); err != nil {
logrus.Warnf("error uploading layer to cache: %s", err)
}
return nil
}
@ -374,7 +424,6 @@ func (s *stageBuilder) saveSnapshotToImage(createdBy string, tarPath string) err
},
)
return err
}
func CalculateDependencies(opts *config.KanikoOptions) (map[int][]string, error) {
@ -442,8 +491,10 @@ func CalculateDependencies(opts *config.KanikoOptions) (map[int][]string, error)
// DoBuild executes building the Dockerfile
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
t := timing.Start("Total Build Time")
digestToCacheKeyMap := make(map[string]string)
// Parse dockerfile and unpack base image to root
digestToCacheKey := make(map[string]string)
stageIdxToDigest := make(map[string]string)
// Parse dockerfile
stages, err := dockerfile.Stages(opts)
if err != nil {
return nil, err
@ -463,23 +514,32 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
logrus.Infof("Built cross stage deps: %v", crossStageDependencies)
for index, stage := range stages {
sb, err := newStageBuilder(opts, stage, crossStageDependencies, digestToCacheKeyMap)
sb, err := newStageBuilder(opts, stage, crossStageDependencies, digestToCacheKey, stageIdxToDigest)
if err != nil {
return nil, err
}
if err := sb.build(); err != nil {
return nil, errors.Wrap(err, "error building stage")
}
reviewConfig(stage, &sb.cf.Config)
sourceImage, err := mutate.Config(sb.image, sb.cf.Config)
if err != nil {
return nil, err
}
d, err := sourceImage.Digest()
if err != nil {
return nil, err
}
digestToCacheKeyMap[d.String()] = sb.finalCacheKey
stageIdxToDigest[fmt.Sprintf("%d", sb.stage.Index)] = d.String()
logrus.Debugf("mapping stage idx %v to digest %v", sb.stage.Index, d.String())
digestToCacheKey[d.String()] = sb.finalCacheKey
logrus.Debugf("mapping digest %v to cachekey %v", d.String(), sb.finalCacheKey)
if stage.Final {
sourceImage, err = mutate.CreatedAt(sourceImage, v1.Time{Time: time.Now()})
if err != nil {
@ -593,7 +653,7 @@ func extractImageToDependencyDir(name string, image v1.Image) error {
return err
}
logrus.Debugf("trying to extract to %s", dependencyDir)
_, err := util.GetFSFromImage(dependencyDir, image)
_, err := util.GetFSFromImage(dependencyDir, image, util.ExtractFile)
return err
}

View File

@ -36,7 +36,6 @@ import (
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/sirupsen/logrus"
)
func Test_reviewConfig(t *testing.T) {
@ -620,7 +619,7 @@ func Test_stageBuilder_build(t *testing.T) {
ch := NewCompositeCache("", "")
ch.AddPath(filepath)
logrus.SetLevel(logrus.DebugLevel)
hash, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
@ -664,7 +663,7 @@ func Test_stageBuilder_build(t *testing.T) {
filePath := filepath.Join(dir, filename)
ch := NewCompositeCache("", "")
ch.AddPath(filePath)
logrus.SetLevel(logrus.DebugLevel)
hash, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
@ -698,22 +697,24 @@ func Test_stageBuilder_build(t *testing.T) {
dir, filenames := tempDirAndFile(t)
filename := filenames[0]
tarContent := generateTar(t, filename)
destDir, err := ioutil.TempDir("", "baz")
if err != nil {
t.Errorf("could not create temp dir %v", err)
}
filePath := filepath.Join(dir, filename)
ch := NewCompositeCache("", fmt.Sprintf("COPY %s foo.txt", filename))
ch.AddPath(filePath)
logrus.SetLevel(logrus.DebugLevel)
logrus.Infof("test composite key %v", ch)
ch := NewCompositeCache("", fmt.Sprintf("RUN foobar"))
hash1, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
}
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
ch.AddPath(filePath)
logrus.Infof("test composite key %v", ch)
hash2, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
@ -721,11 +722,78 @@ func Test_stageBuilder_build(t *testing.T) {
ch = NewCompositeCache("", fmt.Sprintf("COPY %s foo.txt", filename))
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
ch.AddPath(filePath)
logrus.Infof("test composite key %v", ch)
hash3, err := ch.Hash()
image := fakeImage{
ImageLayers: []v1.Layer{
fakeLayer{
TarContent: tarContent,
},
},
}
dockerFile := fmt.Sprintf(`
FROM ubuntu:16.04
RUN foobar
COPY %s bar.txt
`, filename)
f, _ := ioutil.TempFile("", "")
ioutil.WriteFile(f.Name(), []byte(dockerFile), 0755)
opts := &config.KanikoOptions{
DockerfilePath: f.Name(),
}
stages, err := dockerfile.Stages(opts)
if err != nil {
t.Errorf("could not parse test dockerfile")
}
stage := stages[0]
cmds := stage.Commands
return testcase{
description: "cached run command followed by uncached copy command result in consistent read and write hashes",
opts: &config.KanikoOptions{Cache: true},
rootDir: dir,
config: &v1.ConfigFile{Config: v1.Config{WorkingDir: destDir}},
layerCache: &fakeLayerCache{
keySequence: []string{hash1},
img: image,
},
image: image,
// hash1 is the read cachekey for the first layer
// hash2 is the read cachekey for the second layer
expectedCacheKeys: []string{hash1, hash2},
pushedCacheKeys: []string{hash2},
commands: getCommands(dir, cmds),
}
}(),
func() testcase {
dir, filenames := tempDirAndFile(t)
filename := filenames[0]
tarContent := generateTar(t, filename)
destDir, err := ioutil.TempDir("", "baz")
if err != nil {
t.Errorf("could not create temp dir %v", err)
}
filePath := filepath.Join(dir, filename)
ch := NewCompositeCache("", fmt.Sprintf("COPY %s foo.txt", filename))
ch.AddPath(filePath)
hash1, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
}
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
ch.AddPath(filePath)
hash2, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
}
ch = NewCompositeCache("", fmt.Sprintf("COPY %s foo.txt", filename))
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
ch.AddPath(filePath)
image := fakeImage{
ImageLayers: []v1.Layer{
fakeLayer{
@ -749,10 +817,12 @@ COPY %s bar.txt
if err != nil {
t.Errorf("could not parse test dockerfile")
}
stage := stages[0]
cmds := stage.Commands
return testcase{
description: "cached copy command followed by uncached copy command result in different read and write hashes",
description: "cached copy command followed by uncached copy command result in consistent read and write hashes",
opts: &config.KanikoOptions{Cache: true},
rootDir: dir,
config: &v1.ConfigFile{Config: v1.Config{WorkingDir: destDir}},
@ -764,10 +834,8 @@ COPY %s bar.txt
// hash1 is the read cachekey for the first layer
// hash2 is the read cachekey for the second layer
expectedCacheKeys: []string{hash1, hash2},
// Due to CachingCopyCommand and CopyCommand returning different values the write cache key for the second copy command will never match the read cache key
// hash3 is the cachekey used to write to the cache for layer 2
pushedCacheKeys: []string{hash3},
commands: getCommands(dir, cmds),
pushedCacheKeys: []string{hash2},
commands: getCommands(dir, cmds),
}
}(),
}
@ -848,6 +916,11 @@ func assertCacheKeys(t *testing.T, expectedCacheKeys, actualCacheKeys []string,
sort.Slice(actualCacheKeys, func(x, y int) bool {
return actualCacheKeys[x] > actualCacheKeys[y]
})
if len(expectedCacheKeys) != len(actualCacheKeys) {
t.Errorf("expected %v to equal %v", actualCacheKeys, expectedCacheKeys)
}
for i, key := range expectedCacheKeys {
if key != actualCacheKeys[i] {
t.Errorf("expected to %v keys %d to be %v but was %v %v", description, i, key, actualCacheKeys[i], actualCacheKeys)
@ -888,6 +961,7 @@ func tempDirAndFile(t *testing.T) (string, []string) {
return dir, filenames
}
func generateTar(t *testing.T, dir string, fileNames ...string) []byte {
buf := bytes.NewBuffer([]byte{})
writer := tar.NewWriter(buf)

View File

@ -18,6 +18,7 @@ package executor
import (
"crypto/sha256"
"fmt"
"os"
"path/filepath"
"strings"
@ -75,7 +76,7 @@ func (s *CompositeCache) AddPath(p string) error {
return err
}
s.keys = append(s.keys, string(sha.Sum(nil)))
s.keys = append(s.keys, fmt.Sprintf("%x", sha.Sum(nil)))
return nil
}
@ -98,5 +99,5 @@ func HashDir(p string) (string, error) {
return "", err
}
return string(sha.Sum(nil)), nil
return fmt.Sprintf("%x", sha.Sum(nil)), nil
}

View File

@ -20,7 +20,7 @@ import (
"os"
"strings"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/sirupsen/logrus"
)

View File

@ -134,7 +134,7 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
Callback: func(path string, ent *godirwalk.Dirent) error {
if util.IsInWhitelist(path) {
if util.IsDestDir(path) {
logrus.Debugf("Skipping paths under %s, as it is a whitelisted directory", path)
logrus.Tracef("Skipping paths under %s, as it is a whitelisted directory", path)
return filepath.SkipDir
}
return nil
@ -170,7 +170,7 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
filesToAdd := []string{}
for path := range memFs {
if util.CheckWhitelist(path) {
logrus.Debugf("Not adding %s to layer, as it's whitelisted", path)
logrus.Tracef("Not adding %s to layer, as it's whitelisted", path)
continue
}
// Only add changed files.
@ -179,7 +179,7 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
return nil, nil, fmt.Errorf("could not check if file has changed %s %s", path, err)
}
if fileChanged {
logrus.Debugf("Adding %s to layer, because it was changed.", path)
logrus.Tracef("Adding %s to layer, because it was changed.", path)
filesToAdd = append(filesToAdd, path)
}
}

View File

@ -69,9 +69,17 @@ var volumes = []string{}
var excluded []string
type ExtractFunction func(string, *tar.Header, io.Reader) error
// GetFSFromImage extracts the layers of img to root
// It returns a list of all files extracted
func GetFSFromImage(root string, img v1.Image) ([]string, error) {
func GetFSFromImage(root string, img v1.Image, extract ExtractFunction) ([]string, error) {
if extract == nil {
return nil, errors.New("must supply an extract function")
}
if img == nil {
return nil, errors.New("image cannot be nil")
}
if err := DetectFilesystemWhitelist(constants.WhitelistPath); err != nil {
return nil, err
}
@ -114,7 +122,7 @@ func GetFSFromImage(root string, img v1.Image) ([]string, error) {
}
continue
}
if err := extractFile(root, hdr, tr); err != nil {
if err := extract(root, hdr, tr); err != nil {
return nil, err
}
extractedFiles = append(extractedFiles, filepath.Join(root, filepath.Clean(hdr.Name)))
@ -179,7 +187,7 @@ func unTar(r io.Reader, dest string) ([]string, error) {
if err != nil {
return nil, err
}
if err := extractFile(dest, hdr, tr); err != nil {
if err := ExtractFile(dest, hdr, tr); err != nil {
return nil, err
}
extractedFiles = append(extractedFiles, dest)
@ -187,7 +195,7 @@ func unTar(r io.Reader, dest string) ([]string, error) {
return extractedFiles, nil
}
func extractFile(dest string, hdr *tar.Header, tr io.Reader) error {
func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error {
path := filepath.Join(dest, filepath.Clean(hdr.Name))
base := filepath.Base(path)
dir := filepath.Dir(path)
@ -206,7 +214,7 @@ func extractFile(dest string, hdr *tar.Header, tr io.Reader) error {
}
switch hdr.Typeflag {
case tar.TypeReg:
logrus.Debugf("creating file %s", path)
logrus.Tracef("creating file %s", path)
// It's possible a file is in the tar before its directory,
// or a file was copied over a directory prior to now
fi, err := os.Stat(dir)
@ -502,7 +510,7 @@ func CopyDir(src, dest, buildcontext string) ([]string, error) {
}
destPath := filepath.Join(dest, file)
if fi.IsDir() {
logrus.Debugf("Creating directory %s", destPath)
logrus.Tracef("Creating directory %s", destPath)
mode := fi.Mode()
uid := int(fi.Sys().(*syscall.Stat_t).Uid)

View File

@ -659,7 +659,7 @@ func TestExtractFile(t *testing.T) {
defer os.RemoveAll(r)
for _, hdr := range tc.hdrs {
if err := extractFile(r, hdr, bytes.NewReader(tc.contents)); err != nil {
if err := ExtractFile(r, hdr, bytes.NewReader(tc.contents)); err != nil {
t.Fatal(err)
}
}

View File

@ -22,7 +22,7 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/config"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/frontend/dockerfile/parser"

View File

@ -26,7 +26,7 @@ import (
"sync"
"syscall"
highwayhash "github.com/minio/HighwayHash"
"github.com/minio/highwayhash"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)

View File

@ -32,7 +32,6 @@ scripts=(
"hack/boilerplate.sh"
"hack/gofmt.sh"
"hack/linter.sh"
"hack/dep.sh"
)
fail=0
for s in "${scripts[@]}"

View File

@ -0,0 +1,6 @@
#!/bin/bash
today=$(date +%Y%m%d)
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE

46908
vendor/cloud.google.com/go/storage/storage.replay generated vendored Normal file

File diff suppressed because one or more lines are too long

0
vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go generated vendored Executable file → Normal file
View File

0
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go generated vendored Executable file → Normal file
View File

0
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go generated vendored Executable file → Normal file
View File

0
vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go generated vendored Executable file → Normal file
View File

0
vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go generated vendored Executable file → Normal file
View File

0
vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go generated vendored Executable file → Normal file
View File

0
vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go generated vendored Executable file → Normal file
View File

0
vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go generated vendored Executable file → Normal file
View File

File diff suppressed because it is too large Load Diff

12
vendor/github.com/Azure/go-ansiterm/README.md generated vendored Normal file
View File

@ -0,0 +1,12 @@
# go-ansiterm
This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent.
For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position.
The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
See parser_test.go for examples exercising the state machine and generating appropriate function calls.
-----
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

View File

@ -0,0 +1,292 @@
# Azure Active Directory authentication for Go
This is a standalone package for authenticating with Azure Active
Directory from other Go libraries and applications, in particular the [Azure SDK
for Go](https://github.com/Azure/azure-sdk-for-go).
Note: Despite the package's name it is not related to other "ADAL" libraries
maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues
should be opened in [this repo's](https://github.com/Azure/go-autorest/issues)
or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue
trackers.
## Install
```bash
go get -u github.com/Azure/go-autorest/autorest/adal
```
## Usage
An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli).
### Register an Azure AD Application with secret
1. Register a new application with a `secret` credential
```
az ad app create \
--display-name example-app \
--homepage https://example-app/home \
--identifier-uris https://example-app/app \
--password secret
```
2. Create a service principal using the `Application ID` from previous step
```
az ad sp create --id "Application ID"
```
* Replace `Application ID` with `appId` from step 1.
### Register an Azure AD Application with certificate
1. Create a private key
```
openssl genrsa -out "example-app.key" 2048
```
2. Create the certificate
```
openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr"
openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000
```
3. Create the PKCS12 version of the certificate containing also the private key
```
openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass:
```
4. Register a new application with the certificate content form `example-app.crt`
```
certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)"
az ad app create \
--display-name example-app \
--homepage https://example-app/home \
--identifier-uris https://example-app/app \
--key-usage Verify --end-date 2018-01-01 \
--key-value "${certificateContents}"
```
5. Create a service principal using the `Application ID` from previous step
```
az ad sp create --id "APPLICATION_ID"
```
* Replace `APPLICATION_ID` with `appId` from step 4.
### Grant the necessary permissions
Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained
level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles)
which can be assigned to a service principal of an Azure AD application depending of your needs.
```
az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME"
```
* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step.
* Replace the `ROLE_NAME` with a role name of your choice.
It is also possible to define custom role definitions.
```
az role definition create --role-definition role-definition.json
```
* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file.
### Acquire Access Token
The common configuration used by all flows:
```Go
const activeDirectoryEndpoint = "https://login.microsoftonline.com/"
tenantID := "TENANT_ID"
oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID)
applicationID := "APPLICATION_ID"
callback := func(token adal.Token) error {
// This is called after the token is acquired
}
// The resource for which the token is acquired
resource := "https://management.core.windows.net/"
```
* Replace the `TENANT_ID` with your tenant ID.
* Replace the `APPLICATION_ID` with the value from previous section.
#### Client Credentials
```Go
applicationSecret := "APPLICATION_SECRET"
spt, err := adal.NewServicePrincipalToken(
oauthConfig,
appliationID,
applicationSecret,
resource,
callbacks...)
if err != nil {
return nil, err
}
// Acquire a new access token
err = spt.Refresh()
if (err == nil) {
token := spt.Token
}
```
* Replace the `APPLICATION_SECRET` with the `password` value from previous section.
#### Client Certificate
```Go
certificatePath := "./example-app.pfx"
certData, err := ioutil.ReadFile(certificatePath)
if err != nil {
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err)
}
// Get the certificate and private key from pfx file
certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
if err != nil {
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
}
spt, err := adal.NewServicePrincipalTokenFromCertificate(
oauthConfig,
applicationID,
certificate,
rsaPrivateKey,
resource,
callbacks...)
// Acquire a new access token
err = spt.Refresh()
if (err == nil) {
token := spt.Token
}
```
* Update the certificate path to point to the example-app.pfx file which was created in previous section.
#### Device Code
```Go
oauthClient := &http.Client{}
// Acquire the device code
deviceCode, err := adal.InitiateDeviceAuth(
oauthClient,
oauthConfig,
applicationID,
resource)
if err != nil {
return nil, fmt.Errorf("Failed to start device auth flow: %s", err)
}
// Display the authentication message
fmt.Println(*deviceCode.Message)
// Wait here until the user is authenticated
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
if err != nil {
return nil, fmt.Errorf("Failed to finish device auth flow: %s", err)
}
spt, err := adal.NewServicePrincipalTokenFromManualToken(
oauthConfig,
applicationID,
resource,
*token,
callbacks...)
if (err == nil) {
token := spt.Token
}
```
#### Username password authenticate
```Go
spt, err := adal.NewServicePrincipalTokenFromUsernamePassword(
oauthConfig,
applicationID,
username,
password,
resource,
callbacks...)
if (err == nil) {
token := spt.Token
}
```
#### Authorization code authenticate
``` Go
spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode(
oauthConfig,
applicationID,
clientSecret,
authorizationCode,
redirectURI,
resource,
callbacks...)
err = spt.Refresh()
if (err == nil) {
token := spt.Token
}
```
### Command Line Tool
A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above.
```
adal -h
Usage of ./adal:
-applicationId string
application id
-certificatePath string
path to pk12/PFC application certificate
-mode string
authentication mode (device, secret, cert, refresh) (default "device")
-resource string
resource for which the token is requested
-secret string
application secret
-tenantId string
tenant id
-tokenCachePath string
location of oath token cache (default "/home/cgc/.adal/accessToken.json")
```
Example acquire a token for `https://management.core.windows.net/` using device code flow:
```
adal -mode device \
-applicationId "APPLICATION_ID" \
-tenantId "TENANT_ID" \
-resource https://management.core.windows.net/
```

1
vendor/github.com/Microsoft/go-winio/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
*.exe

22
vendor/github.com/Microsoft/go-winio/README.md generated vendored Normal file
View File

@ -0,0 +1,22 @@
# go-winio
This repository contains utilities for efficiently performing Win32 IO operations in
Go. Currently, this is focused on accessing named pipes and other file handles, and
for using named pipes as a net transport.
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
newer operating systems. This is similar to the implementation of network sockets in Go's net
package.
Please see the LICENSE file for licensing information.
This project has adopted the [Microsoft Open Source Code of
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
see the [Code of Conduct
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
questions or comments.
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
for another named pipe implementation.

View File

@ -1,27 +0,0 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

5
vendor/github.com/Nvveen/Gotty/README generated vendored Normal file
View File

@ -0,0 +1,5 @@
Gotty is a library written in Go that determines and reads termcap database
files to produce an interface for interacting with the capabilities of a
terminal.
See the godoc documentation or the source code for more information about
function usage.

3
vendor/github.com/Nvveen/Gotty/TODO generated vendored Normal file
View File

@ -0,0 +1,3 @@
gotty.go:// TODO add more concurrency to name lookup, look for more opportunities.
all:// TODO add more documentation, with function usage in a doc.go file.
all:// TODO add more testing/benchmarking with go test.

View File

@ -1,3 +1,3 @@
AWS SDK for Go
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2014-2015 Stripe, Inc.

View File

@ -138,8 +138,27 @@ type RequestFailure interface {
RequestID() string
}
// NewRequestFailure returns a new request error wrapper for the given Error
// provided.
// NewRequestFailure returns a wrapped error with additional information for
// request status code, and service requestID.
//
// Should be used to wrap all request which involve service requests. Even if
// the request failed without a service response, but had an HTTP status code
// that may be meaningful.
func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
return newRequestError(err, statusCode, reqID)
}
// UnmarshalError provides the interface for the SDK failing to unmarshal data.
type UnmarshalError interface {
awsError
Bytes() []byte
}
// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding
// the bytes that fail to unmarshal to the error.
func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError {
return &unmarshalError{
awsError: New("UnmarshalError", msg, err),
bytes: bytes,
}
}

View File

@ -1,6 +1,9 @@
package awserr
import "fmt"
import (
"encoding/hex"
"fmt"
)
// SprintError returns a string of the formatted error code.
//
@ -119,6 +122,7 @@ type requestError struct {
awsError
statusCode int
requestID string
bytes []byte
}
// newRequestError returns a wrapped error with additional information for
@ -170,6 +174,29 @@ func (r requestError) OrigErrs() []error {
return []error{r.OrigErr()}
}
type unmarshalError struct {
awsError
bytes []byte
}
// Error returns the string representation of the error.
// Satisfies the error interface.
func (e unmarshalError) Error() string {
extra := hex.Dump(e.bytes)
return SprintError(e.Code(), e.Message(), extra, e.OrigErr())
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (e unmarshalError) String() string {
return e.Error()
}
// Bytes returns the bytes that failed to unmarshal.
func (e unmarshalError) Bytes() []byte {
return e.bytes
}
// An error list that satisfies the golang interface
type errorList []error
@ -181,7 +208,7 @@ func (e errorList) Error() string {
// How do we want to handle the array size being zero
if size := len(e); size > 0 {
for i := 0; i < size; i++ {
msg += fmt.Sprintf("%s", e[i].Error())
msg += e[i].Error()
// We check the next index to see if it is within the slice.
// If it is, then we append a newline. We do this, because unit tests
// could be broken with the additional '\n'

View File

@ -15,7 +15,7 @@ func DeepEqual(a, b interface{}) bool {
rb := reflect.Indirect(reflect.ValueOf(b))
if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
// If the elements are both nil, and of the same type the are equal
// If the elements are both nil, and of the same type they are equal
// If they are of different types they are not equal
return reflect.TypeOf(a) == reflect.TypeOf(b)
} else if raValid != rbValid {

View File

@ -185,13 +185,12 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
// SetValueAtPath sets a value at the case insensitive lexical path inside
// of a structure.
func SetValueAtPath(i interface{}, path string, v interface{}) {
if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
for _, rval := range rvals {
if rval.Kind() == reflect.Ptr && rval.IsNil() {
continue
}
setValue(rval, v)
rvals := rValuesAtPath(i, path, true, false, v == nil)
for _, rval := range rvals {
if rval.Kind() == reflect.Ptr && rval.IsNil() {
continue
}
setValue(rval, v)
}
}

View File

@ -23,28 +23,27 @@ func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
case reflect.Struct:
buf.WriteString("{\n")
names := []string{}
for i := 0; i < v.Type().NumField(); i++ {
name := v.Type().Field(i).Name
f := v.Field(i)
if name[0:1] == strings.ToLower(name[0:1]) {
ft := v.Type().Field(i)
fv := v.Field(i)
if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
continue // ignore unexported fields
}
if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
continue // ignore unset fields
}
names = append(names, name)
}
for i, n := range names {
val := v.FieldByName(n)
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(n + ": ")
stringValue(val, indent+2, buf)
buf.WriteString(ft.Name + ": ")
if i < len(names)-1 {
buf.WriteString(",\n")
if tag := ft.Tag.Get("sensitive"); tag == "true" {
buf.WriteString("<sensitive>")
} else {
stringValue(fv, indent+2, buf)
}
buf.WriteString(",\n")
}
buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")

View File

@ -18,7 +18,7 @@ type Config struct {
// States that the signing name did not come from a modeled source but
// was derived based on other data. Used by service client constructors
// to determine if the signin name can be overriden based on metadata the
// to determine if the signin name can be overridden based on metadata the
// service has.
SigningNameDerived bool
}
@ -64,7 +64,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op
default:
maxRetries := aws.IntValue(cfg.MaxRetries)
if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
maxRetries = 3
maxRetries = DefaultRetryerMaxNumRetries
}
svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
}

View File

@ -1,6 +1,7 @@
package client
import (
"math"
"strconv"
"time"
@ -9,82 +10,142 @@ import (
)
// DefaultRetryer implements basic retry logic using exponential backoff for
// most services. If you want to implement custom retry logic, implement the
// request.Retryer interface or create a structure type that composes this
// struct and override the specific methods. For example, to override only
// the MaxRetries method:
// most services. If you want to implement custom retry logic, you can implement the
// request.Retryer interface.
//
// type retryer struct {
// client.DefaultRetryer
// }
//
// // This implementation always has 100 max retries
// func (d retryer) MaxRetries() int { return 100 }
type DefaultRetryer struct {
// Num max Retries is the number of max retries that will be performed.
// By default, this is zero.
NumMaxRetries int
// MinRetryDelay is the minimum retry delay after which retry will be performed.
// If not set, the value is 0ns.
MinRetryDelay time.Duration
// MinThrottleRetryDelay is the minimum retry delay when throttled.
// If not set, the value is 0ns.
MinThrottleDelay time.Duration
// MaxRetryDelay is the maximum retry delay before which retry must be performed.
// If not set, the value is 0ns.
MaxRetryDelay time.Duration
// MaxThrottleDelay is the maximum retry delay when throttled.
// If not set, the value is 0ns.
MaxThrottleDelay time.Duration
}
const (
// DefaultRetryerMaxNumRetries sets maximum number of retries
DefaultRetryerMaxNumRetries = 3
// DefaultRetryerMinRetryDelay sets minimum retry delay
DefaultRetryerMinRetryDelay = 30 * time.Millisecond
// DefaultRetryerMinThrottleDelay sets minimum delay when throttled
DefaultRetryerMinThrottleDelay = 500 * time.Millisecond
// DefaultRetryerMaxRetryDelay sets maximum retry delay
DefaultRetryerMaxRetryDelay = 300 * time.Second
// DefaultRetryerMaxThrottleDelay sets maximum delay when throttled
DefaultRetryerMaxThrottleDelay = 300 * time.Second
)
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API request.
func (d DefaultRetryer) MaxRetries() int {
return d.NumMaxRetries
}
// setRetryerDefaults sets the default values of the retryer if not set
func (d *DefaultRetryer) setRetryerDefaults() {
if d.MinRetryDelay == 0 {
d.MinRetryDelay = DefaultRetryerMinRetryDelay
}
if d.MaxRetryDelay == 0 {
d.MaxRetryDelay = DefaultRetryerMaxRetryDelay
}
if d.MinThrottleDelay == 0 {
d.MinThrottleDelay = DefaultRetryerMinThrottleDelay
}
if d.MaxThrottleDelay == 0 {
d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay
}
}
// RetryRules returns the delay duration before retrying this request again
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
// Set the upper limit of delay in retrying at ~five minutes
minTime := 30
throttle := d.shouldThrottle(r)
if throttle {
if delay, ok := getRetryDelay(r); ok {
return delay
}
minTime = 500
// if number of max retries is zero, no retries will be performed.
if d.NumMaxRetries == 0 {
return 0
}
// Sets default value for retryer members
d.setRetryerDefaults()
// minDelay is the minimum retryer delay
minDelay := d.MinRetryDelay
var initialDelay time.Duration
isThrottle := r.IsErrorThrottle()
if isThrottle {
if delay, ok := getRetryAfterDelay(r); ok {
initialDelay = delay
}
minDelay = d.MinThrottleDelay
}
retryCount := r.RetryCount
if throttle && retryCount > 8 {
retryCount = 8
} else if retryCount > 13 {
retryCount = 13
// maxDelay the maximum retryer delay
maxDelay := d.MaxRetryDelay
if isThrottle {
maxDelay = d.MaxThrottleDelay
}
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
return time.Duration(delay) * time.Millisecond
var delay time.Duration
// Logic to cap the retry count based on the minDelay provided
actualRetryCount := int(math.Log2(float64(minDelay))) + 1
if actualRetryCount < 63-retryCount {
delay = time.Duration(1<<uint64(retryCount)) * getJitterDelay(minDelay)
if delay > maxDelay {
delay = getJitterDelay(maxDelay / 2)
}
} else {
delay = getJitterDelay(maxDelay / 2)
}
return delay + initialDelay
}
// getJitterDelay returns a jittered delay for retry
func getJitterDelay(duration time.Duration) time.Duration {
return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration))
}
// ShouldRetry returns true if the request should be retried.
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
// ShouldRetry returns false if number of max retries is 0.
if d.NumMaxRetries == 0 {
return false
}
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable != nil {
return *r.Retryable
}
if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
return true
}
return r.IsErrorRetryable() || d.shouldThrottle(r)
}
// ShouldThrottle returns true if the request should be throttled.
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
switch r.HTTPResponse.StatusCode {
case 429:
case 502:
case 503:
case 504:
default:
return r.IsErrorThrottle()
}
return true
return r.IsErrorRetryable() || r.IsErrorThrottle()
}
// This will look in the Retry-After header, RFC 7231, for how long
// it will wait before attempting another request
func getRetryDelay(r *request.Request) (time.Duration, bool) {
func getRetryAfterDelay(r *request.Request) (time.Duration, bool) {
if !canUseRetryAfterHeader(r) {
return 0, false
}

View File

@ -67,10 +67,14 @@ func logRequest(r *request.Request) {
if !bodySeekable {
r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
}
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
// Body as a NoOpCloser and will not be reset after read by the HTTP
// client reader.
r.ResetBody()
// Reset the request body because dumpRequest will re-wrap the
// r.HTTPRequest's Body as a NoOpCloser and will not be reset after
// read by the HTTP client reader.
if err := r.Error; err != nil {
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
r.ClientInfo.ServiceName, r.Operation.Name, err))
return
}
}
r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
@ -118,6 +122,12 @@ var LogHTTPResponseHandler = request.NamedHandler{
func logResponse(r *request.Request) {
lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
if r.HTTPResponse == nil {
lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
return
}
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
if logBody {
r.HTTPResponse.Body = &teeReaderCloser{

View File

@ -0,0 +1,28 @@
package client
import (
"time"
"github.com/aws/aws-sdk-go/aws/request"
)
// NoOpRetryer provides a retryer that performs no retries.
// It should be used when we do not want retries to be performed.
type NoOpRetryer struct{}
// MaxRetries returns the number of maximum returns the service will use to make
// an individual API; For NoOpRetryer the MaxRetries will always be zero.
func (d NoOpRetryer) MaxRetries() int {
return 0
}
// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool {
return false
}
// RetryRules returns the delay duration before retrying this request again;
// since NoOpRetryer does not retry, RetryRules always returns 0.
func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration {
return 0
}

View File

@ -18,9 +18,9 @@ const UseServiceDefaultRetries = -1
type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig tructure.
// all clients will use the defaults.DefaultConfig structure.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // Create Session with MaxRetries configuration to be shared by multiple
// // service clients.
// sess := session.Must(session.NewSession(&aws.Config{
// MaxRetries: aws.Int(3),
@ -45,8 +45,8 @@ type Config struct {
// that overrides the default generated endpoint for a client. Set this
// to `""` to use the default generated endpoint.
//
// @note You must still provide a `Region` value when specifying an
// endpoint for a client.
// Note: You must still provide a `Region` value when specifying an
// endpoint for a client.
Endpoint *string
// The resolver to use for looking up endpoints for AWS service clients
@ -65,8 +65,8 @@ type Config struct {
// noted. A full list of regions is found in the "Regions and Endpoints"
// document.
//
// @see http://docs.aws.amazon.com/general/latest/gr/rande.html
// AWS Regions and Endpoints
// See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
// Regions and Endpoints.
Region *string
// Set this to `true` to disable SSL when sending requests. Defaults
@ -120,9 +120,10 @@ type Config struct {
// will use virtual hosted bucket addressing when possible
// (`http://BUCKET.s3.amazonaws.com/KEY`).
//
// @note This configuration option is specific to the Amazon S3 service.
// @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// Amazon S3: Virtual Hosting of Buckets
// Note: This configuration option is specific to the Amazon S3 service.
//
// See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
// for Amazon S3: Virtual Hosting of Buckets
S3ForcePathStyle *bool
// Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
@ -223,12 +224,37 @@ type Config struct {
// Key: aws.String("//foo//bar//moo"),
// })
DisableRestProtocolURICleaning *bool
// EnableEndpointDiscovery will allow for endpoint discovery on operations that
// have the definition in its model. By default, endpoint discovery is off.
//
// Example:
// sess := session.Must(session.NewSession(&aws.Config{
// EnableEndpointDiscovery: aws.Bool(true),
// }))
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("/foo/bar/moo"),
// })
EnableEndpointDiscovery *bool
// DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
// request endpoint hosts with modeled information.
//
// Disabling this feature is useful when you want to use local endpoints
// for testing that do not support the modeled host prefix pattern.
DisableEndpointHostPrefix *bool
// STSRegionalEndpoint will enable regional or legacy endpoint resolving
STSRegionalEndpoint endpoints.STSRegionalEndpoint
}
// NewConfig returns a new Config pointer that can be chained with builder
// methods to set multiple configuration values inline without using pointers.
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // Create Session with MaxRetries configuration to be shared by multiple
// // service clients.
// sess := session.Must(session.NewSession(aws.NewConfig().
// WithMaxRetries(3),
@ -377,6 +403,19 @@ func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
return c
}
// WithEndpointDiscovery will set whether or not to use endpoint discovery.
func (c *Config) WithEndpointDiscovery(t bool) *Config {
c.EnableEndpointDiscovery = &t
return c
}
// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
// when making requests.
func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
c.DisableEndpointHostPrefix = &t
return c
}
// MergeIn merges the passed in configs into the existing config object.
func (c *Config) MergeIn(cfgs ...*Config) {
for _, other := range cfgs {
@ -384,6 +423,13 @@ func (c *Config) MergeIn(cfgs ...*Config) {
}
}
// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag
// when resolving the endpoint for a service
func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config {
c.STSRegionalEndpoint = sre
return c
}
func mergeInConfig(dst *Config, other *Config) {
if other == nil {
return
@ -476,6 +522,18 @@ func mergeInConfig(dst *Config, other *Config) {
if other.EnforceShouldRetryCheck != nil {
dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
}
if other.EnableEndpointDiscovery != nil {
dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
}
if other.DisableEndpointHostPrefix != nil {
dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
}
if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint {
dst.STSRegionalEndpoint = other.STSRegionalEndpoint
}
}
// Copy will return a shallow copy of the Config object. If any additional

View File

@ -1,8 +1,8 @@
// +build !go1.9
package aws
import (
"time"
)
import "time"
// Context is an copy of the Go v1.7 stdlib's context.Context interface.
// It is represented as a SDK interface to enable you to use the "WithContext"
@ -35,37 +35,3 @@ type Context interface {
// functions.
Value(key interface{}) interface{}
}
// BackgroundContext returns a context that will never be canceled, has no
// values, and no deadline. This context is used by the SDK to provide
// backwards compatibility with non-context API operations and functionality.
//
// Go 1.6 and before:
// This context function is equivalent to context.Background in the Go stdlib.
//
// Go 1.7 and later:
// The context returned will be the value returned by context.Background()
//
// See https://golang.org/pkg/context for more information on Contexts.
func BackgroundContext() Context {
return backgroundCtx
}
// SleepWithContext will wait for the timer duration to expire, or the context
// is canceled. Which ever happens first. If the context is canceled the Context's
// error will be returned.
//
// Expects Context to always return a non-nil error if the Done channel is closed.
func SleepWithContext(ctx Context, dur time.Duration) error {
t := time.NewTimer(dur)
defer t.Stop()
select {
case <-t.C:
break
case <-ctx.Done():
return ctx.Err()
}
return nil
}

View File

@ -1,9 +0,0 @@
// +build go1.7
package aws
import "context"
var (
backgroundCtx = context.Background()
)

11
vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// +build go1.9
package aws
import "context"
// Context is an alias of the Go stdlib's context.Context interface.
// It can be used within the SDK's API operation "WithContext" methods.
//
// See https://golang.org/pkg/context on how to use contexts.
type Context = context.Context

View File

@ -39,3 +39,18 @@ func (e *emptyCtx) String() string {
var (
backgroundCtx = new(emptyCtx)
)
// BackgroundContext returns a context that will never be canceled, has no
// values, and no deadline. This context is used by the SDK to provide
// backwards compatibility with non-context API operations and functionality.
//
// Go 1.6 and before:
// This context function is equivalent to context.Background in the Go stdlib.
//
// Go 1.7 and later:
// The context returned will be the value returned by context.Background()
//
// See https://golang.org/pkg/context for more information on Contexts.
func BackgroundContext() Context {
return backgroundCtx
}

View File

@ -0,0 +1,20 @@
// +build go1.7
package aws
import "context"
// BackgroundContext returns a context that will never be canceled, has no
// values, and no deadline. This context is used by the SDK to provide
// backwards compatibility with non-context API operations and functionality.
//
// Go 1.6 and before:
// This context function is equivalent to context.Background in the Go stdlib.
//
// Go 1.7 and later:
// The context returned will be the value returned by context.Background()
//
// See https://golang.org/pkg/context for more information on Contexts.
func BackgroundContext() Context {
return context.Background()
}

24
vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package aws
import (
"time"
)
// SleepWithContext will wait for the timer duration to expire, or the context
// is canceled. Which ever happens first. If the context is canceled the Context's
// error will be returned.
//
// Expects Context to always return a non-nil error if the Done channel is closed.
func SleepWithContext(ctx Context, dur time.Duration) error {
t := time.NewTimer(dur)
defer t.Stop()
select {
case <-t.C:
break
case <-ctx.Done():
return ctx.Err()
}
return nil
}

View File

@ -179,6 +179,242 @@ func IntValueMap(src map[string]*int) map[string]int {
return dst
}
// Uint returns a pointer to the uint value passed in.
func Uint(v uint) *uint {
return &v
}
// UintValue returns the value of the uint pointer passed in or
// 0 if the pointer is nil.
func UintValue(v *uint) uint {
if v != nil {
return *v
}
return 0
}
// UintSlice converts a slice of uint values uinto a slice of
// uint pointers
func UintSlice(src []uint) []*uint {
dst := make([]*uint, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// UintValueSlice converts a slice of uint pointers uinto a slice of
// uint values
func UintValueSlice(src []*uint) []uint {
dst := make([]uint, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// UintMap converts a string map of uint values uinto a string
// map of uint pointers
func UintMap(src map[string]uint) map[string]*uint {
dst := make(map[string]*uint)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// UintValueMap converts a string map of uint pointers uinto a string
// map of uint values
func UintValueMap(src map[string]*uint) map[string]uint {
dst := make(map[string]uint)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int8 returns a pointer to the int8 value passed in.
func Int8(v int8) *int8 {
return &v
}
// Int8Value returns the value of the int8 pointer passed in or
// 0 if the pointer is nil.
func Int8Value(v *int8) int8 {
if v != nil {
return *v
}
return 0
}
// Int8Slice converts a slice of int8 values into a slice of
// int8 pointers
func Int8Slice(src []int8) []*int8 {
dst := make([]*int8, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int8ValueSlice converts a slice of int8 pointers into a slice of
// int8 values
func Int8ValueSlice(src []*int8) []int8 {
dst := make([]int8, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int8Map converts a string map of int8 values into a string
// map of int8 pointers
func Int8Map(src map[string]int8) map[string]*int8 {
dst := make(map[string]*int8)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int8ValueMap converts a string map of int8 pointers into a string
// map of int8 values
func Int8ValueMap(src map[string]*int8) map[string]int8 {
dst := make(map[string]int8)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int16 returns a pointer to the int16 value passed in.
func Int16(v int16) *int16 {
return &v
}
// Int16Value returns the value of the int16 pointer passed in or
// 0 if the pointer is nil.
func Int16Value(v *int16) int16 {
if v != nil {
return *v
}
return 0
}
// Int16Slice converts a slice of int16 values into a slice of
// int16 pointers
func Int16Slice(src []int16) []*int16 {
dst := make([]*int16, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int16ValueSlice converts a slice of int16 pointers into a slice of
// int16 values
func Int16ValueSlice(src []*int16) []int16 {
dst := make([]int16, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int16Map converts a string map of int16 values into a string
// map of int16 pointers
func Int16Map(src map[string]int16) map[string]*int16 {
dst := make(map[string]*int16)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int16ValueMap converts a string map of int16 pointers into a string
// map of int16 values
func Int16ValueMap(src map[string]*int16) map[string]int16 {
dst := make(map[string]int16)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int32 returns a pointer to the int32 value passed in.
func Int32(v int32) *int32 {
return &v
}
// Int32Value returns the value of the int32 pointer passed in or
// 0 if the pointer is nil.
func Int32Value(v *int32) int32 {
if v != nil {
return *v
}
return 0
}
// Int32Slice converts a slice of int32 values into a slice of
// int32 pointers
func Int32Slice(src []int32) []*int32 {
dst := make([]*int32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Int32ValueSlice converts a slice of int32 pointers into a slice of
// int32 values
func Int32ValueSlice(src []*int32) []int32 {
dst := make([]int32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Int32Map converts a string map of int32 values into a string
// map of int32 pointers
func Int32Map(src map[string]int32) map[string]*int32 {
dst := make(map[string]*int32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Int32ValueMap converts a string map of int32 pointers into a string
// map of int32 values
func Int32ValueMap(src map[string]*int32) map[string]int32 {
dst := make(map[string]int32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Int64 returns a pointer to the int64 value passed in.
func Int64(v int64) *int64 {
return &v
@ -238,6 +474,301 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 {
return dst
}
// Uint8 returns a pointer to the uint8 value passed in.
func Uint8(v uint8) *uint8 {
return &v
}
// Uint8Value returns the value of the uint8 pointer passed in or
// 0 if the pointer is nil.
func Uint8Value(v *uint8) uint8 {
if v != nil {
return *v
}
return 0
}
// Uint8Slice converts a slice of uint8 values into a slice of
// uint8 pointers
func Uint8Slice(src []uint8) []*uint8 {
dst := make([]*uint8, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint8ValueSlice converts a slice of uint8 pointers into a slice of
// uint8 values
func Uint8ValueSlice(src []*uint8) []uint8 {
dst := make([]uint8, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint8Map converts a string map of uint8 values into a string
// map of uint8 pointers
func Uint8Map(src map[string]uint8) map[string]*uint8 {
dst := make(map[string]*uint8)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint8ValueMap converts a string map of uint8 pointers into a string
// map of uint8 values
func Uint8ValueMap(src map[string]*uint8) map[string]uint8 {
dst := make(map[string]uint8)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint16 returns a pointer to the uint16 value passed in.
func Uint16(v uint16) *uint16 {
return &v
}
// Uint16Value returns the value of the uint16 pointer passed in or
// 0 if the pointer is nil.
func Uint16Value(v *uint16) uint16 {
if v != nil {
return *v
}
return 0
}
// Uint16Slice converts a slice of uint16 values into a slice of
// uint16 pointers
func Uint16Slice(src []uint16) []*uint16 {
dst := make([]*uint16, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint16ValueSlice converts a slice of uint16 pointers into a slice of
// uint16 values
func Uint16ValueSlice(src []*uint16) []uint16 {
dst := make([]uint16, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint16Map converts a string map of uint16 values into a string
// map of uint16 pointers
func Uint16Map(src map[string]uint16) map[string]*uint16 {
dst := make(map[string]*uint16)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint16ValueMap converts a string map of uint16 pointers into a string
// map of uint16 values
func Uint16ValueMap(src map[string]*uint16) map[string]uint16 {
dst := make(map[string]uint16)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint32 returns a pointer to the uint32 value passed in.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint32Value returns the value of the uint32 pointer passed in or
// 0 if the pointer is nil.
func Uint32Value(v *uint32) uint32 {
if v != nil {
return *v
}
return 0
}
// Uint32Slice converts a slice of uint32 values into a slice of
// uint32 pointers
func Uint32Slice(src []uint32) []*uint32 {
dst := make([]*uint32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint32ValueSlice converts a slice of uint32 pointers into a slice of
// uint32 values
func Uint32ValueSlice(src []*uint32) []uint32 {
dst := make([]uint32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint32Map converts a string map of uint32 values into a string
// map of uint32 pointers
func Uint32Map(src map[string]uint32) map[string]*uint32 {
dst := make(map[string]*uint32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint32ValueMap converts a string map of uint32 pointers into a string
// map of uint32 values
func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
dst := make(map[string]uint32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Uint64 returns a pointer to the uint64 value passed in.
func Uint64(v uint64) *uint64 {
return &v
}
// Uint64Value returns the value of the uint64 pointer passed in or
// 0 if the pointer is nil.
func Uint64Value(v *uint64) uint64 {
if v != nil {
return *v
}
return 0
}
// Uint64Slice converts a slice of uint64 values into a slice of
// uint64 pointers
func Uint64Slice(src []uint64) []*uint64 {
dst := make([]*uint64, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Uint64ValueSlice converts a slice of uint64 pointers into a slice of
// uint64 values
func Uint64ValueSlice(src []*uint64) []uint64 {
dst := make([]uint64, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Uint64Map converts a string map of uint64 values into a string
// map of uint64 pointers
func Uint64Map(src map[string]uint64) map[string]*uint64 {
dst := make(map[string]*uint64)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Uint64ValueMap converts a string map of uint64 pointers into a string
// map of uint64 values
func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
dst := make(map[string]uint64)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float32 returns a pointer to the float32 value passed in.
func Float32(v float32) *float32 {
return &v
}
// Float32Value returns the value of the float32 pointer passed in or
// 0 if the pointer is nil.
func Float32Value(v *float32) float32 {
if v != nil {
return *v
}
return 0
}
// Float32Slice converts a slice of float32 values into a slice of
// float32 pointers
func Float32Slice(src []float32) []*float32 {
dst := make([]*float32, len(src))
for i := 0; i < len(src); i++ {
dst[i] = &(src[i])
}
return dst
}
// Float32ValueSlice converts a slice of float32 pointers into a slice of
// float32 values
func Float32ValueSlice(src []*float32) []float32 {
dst := make([]float32, len(src))
for i := 0; i < len(src); i++ {
if src[i] != nil {
dst[i] = *(src[i])
}
}
return dst
}
// Float32Map converts a string map of float32 values into a string
// map of float32 pointers
func Float32Map(src map[string]float32) map[string]*float32 {
dst := make(map[string]*float32)
for k, val := range src {
v := val
dst[k] = &v
}
return dst
}
// Float32ValueMap converts a string map of float32 pointers into a string
// map of float32 values
func Float32ValueMap(src map[string]*float32) map[string]float32 {
dst := make(map[string]float32)
for k, val := range src {
if val != nil {
dst[k] = *val
}
}
return dst
}
// Float64 returns a pointer to the float64 value passed in.
func Float64(v float64) *float64 {
return &v

View File

@ -72,9 +72,9 @@ var ValidateReqSigHandler = request.NamedHandler{
signedTime = r.LastSignedAt
}
// 10 minutes to allow for some clock skew/delays in transmission.
// 5 minutes to allow for some clock skew/delays in transmission.
// Would be improved with aws/aws-sdk-go#423
if signedTime.Add(10 * time.Minute).After(time.Now()) {
if signedTime.Add(5 * time.Minute).After(time.Now()) {
return
}
@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) {
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
}
// Catch all other request errors.
// Catch all request errors, and let the default retrier determine
// if the error is retryable.
r.Error = awserr.New("RequestError", "send request failed", err)
r.Retryable = aws.Bool(true) // network errors are retryable
// Override the error with a context canceled error, if that was canceled.
ctx := r.Context()
@ -184,37 +184,39 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH
// AfterRetryHandler performs final checks to determine if the request should
// be retried and how long to delay.
var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
r.Retryable = aws.Bool(r.ShouldRetry(r))
}
if r.WillRetry() {
r.RetryDelay = r.RetryRules(r)
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
// Support SleepDelay for backwards compatibility and testing
sleepFn(r.RetryDelay)
} else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
r.Error = awserr.New(request.CanceledErrorCode,
"request context canceled", err)
r.Retryable = aws.Bool(false)
return
var AfterRetryHandler = request.NamedHandler{
Name: "core.AfterRetryHandler",
Fn: func(r *request.Request) {
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
r.Retryable = aws.Bool(r.ShouldRetry(r))
}
// when the expired token exception occurs the credentials
// need to be expired locally so that the next request to
// get credentials will trigger a credentials refresh.
if r.IsErrorExpired() {
r.Config.Credentials.Expire()
}
if r.WillRetry() {
r.RetryDelay = r.RetryRules(r)
r.RetryCount++
r.Error = nil
}
}}
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
// Support SleepDelay for backwards compatibility and testing
sleepFn(r.RetryDelay)
} else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
r.Error = awserr.New(request.CanceledErrorCode,
"request context canceled", err)
r.Retryable = aws.Bool(false)
return
}
// when the expired token exception occurs the credentials
// need to be expired locally so that the next request to
// get credentials will trigger a credentials refresh.
if r.IsErrorExpired() {
r.Config.Credentials.Expire()
}
r.RetryCount++
r.Error = nil
}
}}
// ValidateEndpointHandler is a request handler to validate a request had the
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or

View File

@ -17,7 +17,7 @@ var SDKVersionUserAgentHandler = request.NamedHandler{
}
const execEnvVar = `AWS_EXECUTION_ENV`
const execEnvUAKey = `exec_env`
const execEnvUAKey = `exec-env`
// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
// execution environment to the user agent.

View File

@ -9,9 +9,7 @@ var (
// providers in the ChainProvider.
//
// This has been deprecated. For verbose error messaging set
// aws.Config.CredentialsChainVerboseErrors to true
//
// @readonly
// aws.Config.CredentialsChainVerboseErrors to true.
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
`no valid providers in chain. Deprecated.
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,

View File

@ -49,8 +49,11 @@
package credentials
import (
"fmt"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
)
// AnonymousCredentials is an empty Credential object that can be used as
@ -64,8 +67,6 @@ import (
// Credentials: credentials.AnonymousCredentials,
// })))
// // Access public S3 buckets.
//
// @readonly
var AnonymousCredentials = NewStaticCredentials("", "", "")
// A Value is the AWS credentials value for individual credential fields.
@ -83,6 +84,12 @@ type Value struct {
ProviderName string
}
// HasKeys returns if the credentials Value has both AccessKeyID and
// SecretAccessKey value set.
func (v Value) HasKeys() bool {
return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0
}
// A Provider is the interface for any component which will provide credentials
// Value. A provider is required to manage its own Expired state, and what to
// be expired means.
@ -99,6 +106,14 @@ type Provider interface {
IsExpired() bool
}
// An Expirer is an interface that Providers can implement to expose the expiration
// time, if known. If the Provider cannot accurately provide this info,
// it should not implement this interface.
type Expirer interface {
// The time at which the credentials are no longer valid
ExpiresAt() time.Time
}
// An ErrorProvider is a stub credentials provider that always returns an error
// this is used by the SDK when construction a known provider is not possible
// due to an error.
@ -158,13 +173,19 @@ func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
// IsExpired returns if the credentials are expired.
func (e *Expiry) IsExpired() bool {
if e.CurrentTime == nil {
e.CurrentTime = time.Now
curTime := e.CurrentTime
if curTime == nil {
curTime = time.Now
}
return e.expiration.Before(e.CurrentTime())
return e.expiration.Before(curTime())
}
// A Credentials provides synchronous safe retrieval of AWS credentials Value.
// ExpiresAt returns the expiration time of the credential
func (e *Expiry) ExpiresAt() time.Time {
return e.expiration
}
// A Credentials provides concurrency safe retrieval of AWS credentials Value.
// Credentials will cache the credentials value until they expire. Once the value
// expires the next Get will attempt to retrieve valid credentials.
//
@ -256,3 +277,23 @@ func (c *Credentials) IsExpired() bool {
func (c *Credentials) isExpired() bool {
return c.forceRefresh || c.provider.IsExpired()
}
// ExpiresAt provides access to the functionality of the Expirer interface of
// the underlying Provider, if it supports that interface. Otherwise, it returns
// an error.
func (c *Credentials) ExpiresAt() (time.Time, error) {
c.m.RLock()
defer c.m.RUnlock()
expirer, ok := c.provider.(Expirer)
if !ok {
return time.Time{}, awserr.New("ProviderNotExpirer",
fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName),
nil)
}
if c.forceRefresh {
// set expiration time to the distant past
return time.Time{}, nil
}
return expirer.ExpiresAt(), nil
}

View File

@ -11,6 +11,7 @@ import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/sdkuri"
)
@ -142,7 +143,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
}
if err := s.Err(); err != nil {
return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
return nil, awserr.New(request.ErrCodeSerialization,
"failed to read EC2 instance role from metadata service", err)
}
return credsList, nil
@ -164,7 +166,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred
respCreds := ec2RoleCredRespBody{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
return ec2RoleCredRespBody{},
awserr.New("SerializationError",
awserr.New(request.ErrCodeSerialization,
fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
err)
}

View File

@ -39,6 +39,7 @@ import (
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
)
// ProviderName is the name of the credentials provider.
@ -65,6 +66,10 @@ type Provider struct {
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// Optional authorization token value if set will be used as the value of
// the Authorization header of the endpoint credential request.
AuthorizationToken string
}
// NewProviderClient returns a credentials Provider for retrieving AWS credentials
@ -93,8 +98,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin
return p
}
// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
// from an arbitrary endpoint concurrently. The client will request the
// NewCredentialsClient returns a pointer to a new Credentials object
// wrapping the endpoint credentials Provider.
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
}
@ -152,6 +157,9 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
out := &getCredentialsOutput{}
req := p.Client.NewRequest(op, nil, out)
req.HTTPRequest.Header.Set("Accept", "application/json")
if authToken := p.AuthorizationToken; len(authToken) != 0 {
req.HTTPRequest.Header.Set("Authorization", authToken)
}
return out, req.Send()
}
@ -167,7 +175,7 @@ func unmarshalHandler(r *request.Request) {
out := r.Data.(*getCredentialsOutput)
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
r.Error = awserr.New("SerializationError",
r.Error = awserr.New(request.ErrCodeSerialization,
"failed to decode endpoint credentials",
err,
)
@ -178,11 +186,15 @@ func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
var errOut errorOutput
if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
r.Error = awserr.New("SerializationError",
"failed to decode endpoint credentials",
err,
err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body)
if err != nil {
r.Error = awserr.NewRequestFailure(
awserr.New(request.ErrCodeSerialization,
"failed to decode error message", err),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
// Response body format is not consistent between metadata endpoints.

View File

@ -12,14 +12,10 @@ const EnvProviderName = "EnvProvider"
var (
// ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
// found in the process's environment.
//
// @readonly
ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
// ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
// can't be found in the process's environment.
//
// @readonly
ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
)

View File

@ -0,0 +1,12 @@
[default]
aws_access_key_id = accessKey
aws_secret_access_key = secret
aws_session_token = token
[no_token]
aws_access_key_id = accessKey
aws_secret_access_key = secret
[with_colon]
aws_access_key_id: accessKey
aws_secret_access_key: secret

View File

@ -0,0 +1,425 @@
/*
Package processcreds is a credential Provider to retrieve `credential_process`
credentials.
WARNING: The following describes a method of sourcing credentials from an external
process. This can potentially be dangerous, so proceed with caution. Other
credential providers should be preferred if at all possible. If using this
option, you should make sure that the config file is as locked down as possible
using security best practices for your operating system.
You can use credentials from a `credential_process` in a variety of ways.
One way is to setup your shared config file, located in the default
location, with the `credential_process` key and the command you want to be
called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
[default]
credential_process = /command/to/call
Creating a new session will use the credential process to retrieve credentials.
NOTE: If there are credentials in the profile you are using, the credential
process will not be used.
// Initialize a session to load credentials.
sess, _ := session.NewSession(&aws.Config{
Region: aws.String("us-east-1")},
)
// Create S3 service client to use the credentials.
svc := s3.New(sess)
Another way to use the `credential_process` method is by using
`credentials.NewCredentials()` and providing a command to be executed to
retrieve credentials:
// Create credentials using the ProcessProvider.
creds := processcreds.NewCredentials("/path/to/command")
// Create service client value configured for credentials.
svc := s3.New(sess, &aws.Config{Credentials: creds})
You can set a non-default timeout for the `credential_process` with another
constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
set a one minute timeout:
// Create credentials using the ProcessProvider.
creds := processcreds.NewCredentialsTimeout(
"/path/to/command",
time.Duration(500) * time.Millisecond)
If you need more control, you can set any configurable options in the
credentials using one or more option functions. For example, you can set a two
minute timeout, a credential duration of 60 minutes, and a maximum stdout
buffer size of 2k.
creds := processcreds.NewCredentials(
"/path/to/command",
func(opt *ProcessProvider) {
opt.Timeout = time.Duration(2) * time.Minute
opt.Duration = time.Duration(60) * time.Minute
opt.MaxBufSize = 2048
})
You can also use your own `exec.Cmd`:
// Create an exec.Cmd
myCommand := exec.Command("/path/to/command")
// Create credentials using your exec.Cmd and custom timeout
creds := processcreds.NewCredentialsCommand(
myCommand,
func(opt *processcreds.ProcessProvider) {
opt.Timeout = time.Duration(1) * time.Second
})
*/
package processcreds
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"runtime"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
)
const (
// ProviderName is the name this credentials provider will label any
// returned credentials Value with.
ProviderName = `ProcessProvider`
// ErrCodeProcessProviderParse error parsing process output
ErrCodeProcessProviderParse = "ProcessProviderParseError"
// ErrCodeProcessProviderVersion version error in output
ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
// ErrCodeProcessProviderRequired required attribute missing in output
ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
// ErrCodeProcessProviderExecution execution of command failed
ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
// errMsgProcessProviderTimeout process took longer than allowed
errMsgProcessProviderTimeout = "credential process timed out"
// errMsgProcessProviderProcess process error
errMsgProcessProviderProcess = "error in credential_process"
// errMsgProcessProviderParse problem parsing output
errMsgProcessProviderParse = "parse failed of credential_process output"
// errMsgProcessProviderVersion version error in output
errMsgProcessProviderVersion = "wrong version in process output (not 1)"
// errMsgProcessProviderMissKey missing access key id in output
errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
// errMsgProcessProviderMissSecret missing secret acess key in output
errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
// errMsgProcessProviderPrepareCmd prepare of command failed
errMsgProcessProviderPrepareCmd = "failed to prepare command"
// errMsgProcessProviderEmptyCmd command must not be empty
errMsgProcessProviderEmptyCmd = "command must not be empty"
// errMsgProcessProviderPipe failed to initialize pipe
errMsgProcessProviderPipe = "failed to initialize pipe"
// DefaultDuration is the default amount of time in minutes that the
// credentials will be valid for.
DefaultDuration = time.Duration(15) * time.Minute
// DefaultBufSize limits buffer size from growing to an enormous
// amount due to a faulty process.
DefaultBufSize = 1024
// DefaultTimeout default limit on time a process can run.
DefaultTimeout = time.Duration(1) * time.Minute
)
// ProcessProvider satisfies the credentials.Provider interface, and is a
// client to retrieve credentials from a process.
type ProcessProvider struct {
staticCreds bool
credentials.Expiry
originalCommand []string
// Expiry duration of the credentials. Defaults to 15 minutes if not set.
Duration time.Duration
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
// due to ExpiredTokenException exceptions.
//
// So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
// 10 seconds before the credentials are actually expired.
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// A string representing an os command that should return a JSON with
// credential information.
command *exec.Cmd
// MaxBufSize limits memory usage from growing to an enormous
// amount due to a faulty process.
MaxBufSize int
// Timeout limits the time a process can run.
Timeout time.Duration
}
// NewCredentials returns a pointer to a new Credentials object wrapping the
// ProcessProvider. The credentials will expire every 15 minutes by default.
func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
p := &ProcessProvider{
command: exec.Command(command),
Duration: DefaultDuration,
Timeout: DefaultTimeout,
MaxBufSize: DefaultBufSize,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
// NewCredentialsTimeout returns a pointer to a new Credentials object with
// the specified command and timeout, and default duration and max buffer size.
func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
p := NewCredentials(command, func(opt *ProcessProvider) {
opt.Timeout = timeout
})
return p
}
// NewCredentialsCommand returns a pointer to a new Credentials object with
// the specified command, and default timeout, duration and max buffer size.
func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
p := &ProcessProvider{
command: command,
Duration: DefaultDuration,
Timeout: DefaultTimeout,
MaxBufSize: DefaultBufSize,
}
for _, option := range options {
option(p)
}
return credentials.NewCredentials(p)
}
type credentialProcessResponse struct {
Version int
AccessKeyID string `json:"AccessKeyId"`
SecretAccessKey string
SessionToken string
Expiration *time.Time
}
// Retrieve executes the 'credential_process' and returns the credentials.
func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
out, err := p.executeCredentialProcess()
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
// Serialize and validate response
resp := &credentialProcessResponse{}
if err = json.Unmarshal(out, resp); err != nil {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderParse,
fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
err)
}
if resp.Version != 1 {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderVersion,
errMsgProcessProviderVersion,
nil)
}
if len(resp.AccessKeyID) == 0 {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderRequired,
errMsgProcessProviderMissKey,
nil)
}
if len(resp.SecretAccessKey) == 0 {
return credentials.Value{ProviderName: ProviderName}, awserr.New(
ErrCodeProcessProviderRequired,
errMsgProcessProviderMissSecret,
nil)
}
// Handle expiration
p.staticCreds = resp.Expiration == nil
if resp.Expiration != nil {
p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
}
return credentials.Value{
ProviderName: ProviderName,
AccessKeyID: resp.AccessKeyID,
SecretAccessKey: resp.SecretAccessKey,
SessionToken: resp.SessionToken,
}, nil
}
// IsExpired returns true if the credentials retrieved are expired, or not yet
// retrieved.
func (p *ProcessProvider) IsExpired() bool {
if p.staticCreds {
return false
}
return p.Expiry.IsExpired()
}
// prepareCommand prepares the command to be executed.
func (p *ProcessProvider) prepareCommand() error {
var cmdArgs []string
if runtime.GOOS == "windows" {
cmdArgs = []string{"cmd.exe", "/C"}
} else {
cmdArgs = []string{"sh", "-c"}
}
if len(p.originalCommand) == 0 {
p.originalCommand = make([]string, len(p.command.Args))
copy(p.originalCommand, p.command.Args)
// check for empty command because it succeeds
if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
return awserr.New(
ErrCodeProcessProviderExecution,
fmt.Sprintf(
"%s: %s",
errMsgProcessProviderPrepareCmd,
errMsgProcessProviderEmptyCmd),
nil)
}
}
cmdArgs = append(cmdArgs, p.originalCommand...)
p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
p.command.Env = os.Environ()
return nil
}
// executeCredentialProcess starts the credential process on the OS and
// returns the results or an error.
func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
if err := p.prepareCommand(); err != nil {
return nil, err
}
// Setup the pipes
outReadPipe, outWritePipe, err := os.Pipe()
if err != nil {
return nil, awserr.New(
ErrCodeProcessProviderExecution,
errMsgProcessProviderPipe,
err)
}
p.command.Stderr = os.Stderr // display stderr on console for MFA
p.command.Stdout = outWritePipe // get creds json on process's stdout
p.command.Stdin = os.Stdin // enable stdin for MFA
output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
stdoutCh := make(chan error, 1)
go readInput(
io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
output,
stdoutCh)
execCh := make(chan error, 1)
go executeCommand(*p.command, execCh)
finished := false
var errors []error
for !finished {
select {
case readError := <-stdoutCh:
errors = appendError(errors, readError)
finished = true
case execError := <-execCh:
err := outWritePipe.Close()
errors = appendError(errors, err)
errors = appendError(errors, execError)
if errors != nil {
return output.Bytes(), awserr.NewBatchError(
ErrCodeProcessProviderExecution,
errMsgProcessProviderProcess,
errors)
}
case <-time.After(p.Timeout):
finished = true
return output.Bytes(), awserr.NewBatchError(
ErrCodeProcessProviderExecution,
errMsgProcessProviderTimeout,
errors) // errors can be nil
}
}
out := output.Bytes()
if runtime.GOOS == "windows" {
// windows adds slashes to quotes
out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
}
return out, nil
}
// appendError conveniently checks for nil before appending slice
func appendError(errors []error, err error) []error {
if err != nil {
return append(errors, err)
}
return errors
}
func executeCommand(cmd exec.Cmd, exec chan error) {
// Start the command
err := cmd.Start()
if err == nil {
err = cmd.Wait()
}
exec <- err
}
func readInput(r io.Reader, w io.Writer, read chan error) {
tee := io.TeeReader(r, w)
_, err := ioutil.ReadAll(tee)
if err == io.EOF {
err = nil
}
read <- err // will only arrive here when write end of pipe is closed
}

View File

@ -4,9 +4,8 @@ import (
"fmt"
"os"
"github.com/go-ini/ini"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/internal/ini"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
@ -77,36 +76,37 @@ func (p *SharedCredentialsProvider) IsExpired() bool {
// The credentials retrieved from the profile will be returned or error. Error will be
// returned if it fails to read from the file, or the data is invalid.
func loadProfile(filename, profile string) (Value, error) {
config, err := ini.Load(filename)
config, err := ini.OpenFile(filename)
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
}
iniProfile, err := config.GetSection(profile)
if err != nil {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
iniProfile, ok := config.GetSection(profile)
if !ok {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
}
id, err := iniProfile.GetKey("aws_access_key_id")
if err != nil {
id := iniProfile.String("aws_access_key_id")
if len(id) == 0 {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
err)
nil)
}
secret, err := iniProfile.GetKey("aws_secret_access_key")
if err != nil {
secret := iniProfile.String("aws_secret_access_key")
if len(secret) == 0 {
return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
nil)
}
// Default to empty string if not found
token := iniProfile.Key("aws_session_token")
token := iniProfile.String("aws_session_token")
return Value{
AccessKeyID: id.String(),
SecretAccessKey: secret.String(),
SessionToken: token.String(),
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: token,
ProviderName: SharedCredsProviderName,
}, nil
}

View File

@ -9,8 +9,6 @@ const StaticProviderName = "StaticProvider"
var (
// ErrStaticCredentialsEmpty is emitted when static credentials are empty.
//
// @readonly
ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
)

View File

@ -80,16 +80,18 @@ package stscreds
import (
"fmt"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/internal/sdkrand"
"github.com/aws/aws-sdk-go/service/sts"
)
// StdinTokenProvider will prompt on stdout and read from stdin for a string value.
// StdinTokenProvider will prompt on stderr and read from stdin for a string value.
// An error is returned if reading from stdin fails.
//
// Use this function go read MFA tokens from stdin. The function makes no attempt
@ -102,7 +104,7 @@ import (
// Will wait forever until something is provided on the stdin.
func StdinTokenProvider() (string, error) {
var v string
fmt.Printf("Assume Role MFA token code: ")
fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ")
_, err := fmt.Scanln(&v)
return v, err
@ -193,6 +195,18 @@ type AssumeRoleProvider struct {
//
// If ExpiryWindow is 0 or less it will be ignored.
ExpiryWindow time.Duration
// MaxJitterFrac reduces the effective Duration of each credential requested
// by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must
// have a value between 0 and 1. Any other value may lead to expected behavior.
// With a MaxJitterFrac value of 0, default) will no jitter will be used.
//
// For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the
// AssumeRole call will be made with an arbitrary Duration between 27m and
// 30m.
//
// MaxJitterFrac should not be negative.
MaxJitterFrac float64
}
// NewCredentials returns a pointer to a new Credentials object wrapping the
@ -244,7 +258,6 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*
// Retrieve generates a new set of temporary credentials using STS.
func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
// Apply defaults where parameters are not set.
if p.RoleSessionName == "" {
// Try to work out a role name that will hopefully end up unique.
@ -254,8 +267,9 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
// Expire as often as AWS permits.
p.Duration = DefaultDuration
}
jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration))
input := &sts.AssumeRoleInput{
DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)),
RoleArn: aws.String(p.RoleARN),
RoleSessionName: aws.String(p.RoleSessionName),
ExternalId: p.ExternalID,

View File

@ -0,0 +1,100 @@
package stscreds
import (
"fmt"
"io/ioutil"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/aws/aws-sdk-go/service/sts/stsiface"
)
const (
// ErrCodeWebIdentity will be used as an error code when constructing
// a new error to be returned during session creation or retrieval.
ErrCodeWebIdentity = "WebIdentityErr"
// WebIdentityProviderName is the web identity provider name
WebIdentityProviderName = "WebIdentityCredentials"
)
// now is used to return a time.Time object representing
// the current time. This can be used to easily test and
// compare test values.
var now = time.Now
// WebIdentityRoleProvider is used to retrieve credentials using
// an OIDC token.
type WebIdentityRoleProvider struct {
credentials.Expiry
client stsiface.STSAPI
ExpiryWindow time.Duration
tokenFilePath string
roleARN string
roleSessionName string
}
// NewWebIdentityCredentials will return a new set of credentials with a given
// configuration, role arn, and token file path.
func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials {
svc := sts.New(c)
p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path)
return credentials.NewCredentials(p)
}
// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
// provided stsiface.STSAPI
func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider {
return &WebIdentityRoleProvider{
client: svc,
tokenFilePath: path,
roleARN: roleARN,
roleSessionName: roleSessionName,
}
}
// Retrieve will attempt to assume a role from a token which is located at
// 'WebIdentityTokenFilePath' specified destination and if that is empty an
// error will be returned.
func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
b, err := ioutil.ReadFile(p.tokenFilePath)
if err != nil {
errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath)
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err)
}
sessionName := p.roleSessionName
if len(sessionName) == 0 {
// session name is used to uniquely identify a session. This simply
// uses unix time in nanoseconds to uniquely identify sessions.
sessionName = strconv.FormatInt(now().UnixNano(), 10)
}
req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
RoleArn: &p.roleARN,
RoleSessionName: &sessionName,
WebIdentityToken: aws.String(string(b)),
})
// InvalidIdentityToken error is a temporary error that can occur
// when assuming an Role with a JWT web identity token.
req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException)
if err := req.Send(); err != nil {
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err)
}
p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow)
value := credentials.Value{
AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId),
SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey),
SessionToken: aws.StringValue(resp.Credentials.SessionToken),
ProviderName: WebIdentityProviderName,
}
return value, nil
}

View File

@ -1,30 +1,61 @@
// Package csm provides Client Side Monitoring (CSM) which enables sending metrics
// via UDP connection. Using the Start function will enable the reporting of
// metrics on a given port. If Start is called, with different parameters, again,
// a panic will occur.
// Package csm provides the Client Side Monitoring (CSM) client which enables
// sending metrics via UDP connection to the CSM agent. This package provides
// control options, and configuration for the CSM client. The client can be
// controlled manually, or automatically via the SDK's Session configuration.
//
// Pause can be called to pause any metrics publishing on a given port. Sessions
// that have had their handlers modified via InjectHandlers may still be used.
// However, the handlers will act as a no-op meaning no metrics will be published.
// Enabling CSM client via SDK's Session configuration
//
// The CSM client can be enabled automatically via SDK's Session configuration.
// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT
// environment variable is set to a non-empty value.
//
// The configuration options for the CSM client via the SDK's session
// configuration are:
//
// * AWS_CSM_PORT=<port number>
// The port number the CSM agent will receive metrics on.
//
// * AWS_CSM_HOST=<hostname or ip>
// The hostname, or IP address the CSM agent will receive metrics on.
// Without port number.
//
// Manually enabling the CSM client
//
// The CSM client can be started, paused, and resumed manually. The Start
// function will enable the CSM client to publish metrics to the CSM agent. It
// is safe to call Start concurrently, but if Start is called additional times
// with different ClientID or address it will panic.
//
// Example:
// r, err := csm.Start("clientID", ":31000")
// if err != nil {
// panic(fmt.Errorf("failed starting CSM: %v", err))
// }
//
// When controlling the CSM client manually, you must also inject its request
// handlers into the SDK's Session configuration for the SDK's API clients to
// publish metrics.
//
// sess, err := session.NewSession(&aws.Config{})
// if err != nil {
// panic(fmt.Errorf("failed loading session: %v", err))
// }
//
// // Add CSM client's metric publishing request handlers to the SDK's
// // Session Configuration.
// r.InjectHandlers(&sess.Handlers)
//
// client := s3.New(sess)
// resp, err := client.GetObject(&s3.GetObjectInput{
// Bucket: aws.String("bucket"),
// Key: aws.String("key"),
// })
// Controlling CSM client
//
// Once the CSM client has been enabled the Get function will return a Reporter
// value that you can use to pause and resume the metrics published to the CSM
// agent. If Get function is called before the reporter is enabled with the
// Start function or via SDK's Session configuration nil will be returned.
//
// The Pause method can be called to stop the CSM client publishing metrics to
// the CSM agent. The Continue method will resume metric publishing.
//
// // Get the CSM client Reporter.
// r := csm.Get()
//
// // Will pause monitoring
// r.Pause()
@ -35,12 +66,4 @@
//
// // Resume monitoring
// r.Continue()
//
// Start returns a Reporter that is used to enable or disable monitoring. If
// access to the Reporter is required later, calling Get will return the Reporter
// singleton.
//
// Example:
// r := csm.Get()
// r.Continue()
package csm

View File

@ -2,6 +2,7 @@ package csm
import (
"fmt"
"strings"
"sync"
)
@ -9,19 +10,40 @@ var (
lock sync.Mutex
)
// Client side metric handler names
const (
APICallMetricHandlerName = "awscsm.SendAPICallMetric"
APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
// DefaultPort is used when no port is specified.
DefaultPort = "31000"
// DefaultHost is the host that will be used when none is specified.
DefaultHost = "127.0.0.1"
)
// Start will start the a long running go routine to capture
// AddressWithDefaults returns a CSM address built from the host and port
// values. If the host or port is not set, default values will be used
// instead. If host is "localhost" it will be replaced with "127.0.0.1".
func AddressWithDefaults(host, port string) string {
if len(host) == 0 || strings.EqualFold(host, "localhost") {
host = DefaultHost
}
if len(port) == 0 {
port = DefaultPort
}
// Only IP6 host can contain a colon
if strings.Contains(host, ":") {
return "[" + host + "]:" + port
}
return host + ":" + port
}
// Start will start a long running go routine to capture
// client side metrics. Calling start multiple time will only
// start the metric listener once and will panic if a different
// client ID or port is passed in.
//
// Example:
// r, err := csm.Start("clientID", "127.0.0.1:8094")
// r, err := csm.Start("clientID", "127.0.0.1:31000")
// if err != nil {
// panic(fmt.Errorf("expected no error, but received %v", err))
// }

View File

@ -3,6 +3,8 @@ package csm
import (
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
)
type metricTime time.Time
@ -39,6 +41,12 @@ type metric struct {
SDKException *string `json:"SdkException,omitempty"`
SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"`
FinalAWSException *string `json:"FinalAwsException,omitempty"`
FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"`
FinalSDKException *string `json:"FinalSdkException,omitempty"`
FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"`
DestinationIP *string `json:"DestinationIp,omitempty"`
ConnectionReused *int `json:"ConnectionReused,omitempty"`
@ -48,4 +56,54 @@ type metric struct {
DNSLatency *int `json:"DnsLatency,omitempty"`
TCPLatency *int `json:"TcpLatency,omitempty"`
SSLLatency *int `json:"SslLatency,omitempty"`
MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
}
func (m *metric) TruncateFields() {
m.ClientID = truncateString(m.ClientID, 255)
m.UserAgent = truncateString(m.UserAgent, 256)
m.AWSException = truncateString(m.AWSException, 128)
m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512)
m.SDKException = truncateString(m.SDKException, 128)
m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512)
m.FinalAWSException = truncateString(m.FinalAWSException, 128)
m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512)
m.FinalSDKException = truncateString(m.FinalSDKException, 128)
m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512)
}
func truncateString(v *string, l int) *string {
if v != nil && len(*v) > l {
nv := (*v)[:l]
return &nv
}
return v
}
func (m *metric) SetException(e metricException) {
switch te := e.(type) {
case awsException:
m.AWSException = aws.String(te.exception)
m.AWSExceptionMessage = aws.String(te.message)
case sdkException:
m.SDKException = aws.String(te.exception)
m.SDKExceptionMessage = aws.String(te.message)
}
}
func (m *metric) SetFinalException(e metricException) {
switch te := e.(type) {
case awsException:
m.FinalAWSException = aws.String(te.exception)
m.FinalAWSExceptionMessage = aws.String(te.message)
case sdkException:
m.FinalSDKException = aws.String(te.exception)
m.FinalSDKExceptionMessage = aws.String(te.message)
}
}

View File

@ -16,25 +16,26 @@ var (
type metricChan struct {
ch chan metric
paused int64
paused *int64
}
func newMetricChan(size int) metricChan {
return metricChan{
ch: make(chan metric, size),
ch: make(chan metric, size),
paused: new(int64),
}
}
func (ch *metricChan) Pause() {
atomic.StoreInt64(&ch.paused, pausedEnum)
atomic.StoreInt64(ch.paused, pausedEnum)
}
func (ch *metricChan) Continue() {
atomic.StoreInt64(&ch.paused, runningEnum)
atomic.StoreInt64(ch.paused, runningEnum)
}
func (ch *metricChan) IsPaused() bool {
v := atomic.LoadInt64(&ch.paused)
v := atomic.LoadInt64(ch.paused)
return v == pausedEnum
}

View File

@ -0,0 +1,26 @@
package csm
type metricException interface {
Exception() string
Message() string
}
type requestException struct {
exception string
message string
}
func (e requestException) Exception() string {
return e.exception
}
func (e requestException) Message() string {
return e.message
}
type awsException struct {
requestException
}
type sdkException struct {
requestException
}

View File

@ -10,11 +10,6 @@ import (
"github.com/aws/aws-sdk-go/aws/request"
)
const (
// DefaultPort is used when no port is specified
DefaultPort = "31000"
)
// Reporter will gather metrics of API requests made and
// send those metrics to the CSM endpoint.
type Reporter struct {
@ -71,7 +66,6 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
XAmzRequestID: aws.String(r.RequestID),
AttemptCount: aws.Int(r.RetryCount + 1),
AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
AccessKey: aws.String(creds.AccessKeyID),
}
@ -82,26 +76,29 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
if r.Error != nil {
if awserr, ok := r.Error.(awserr.Error); ok {
setError(&m, awserr)
m.SetException(getMetricException(awserr))
}
}
m.TruncateFields()
rep.metricsCh.Push(m)
}
func setError(m *metric, err awserr.Error) {
func getMetricException(err awserr.Error) metricException {
msg := err.Error()
code := err.Code()
switch code {
case "RequestError",
"SerializationError",
request.ErrCodeSerialization,
request.CanceledErrorCode:
m.SDKException = &code
m.SDKExceptionMessage = &msg
return sdkException{
requestException{exception: code, message: msg},
}
default:
m.AWSException = &code
m.AWSExceptionMessage = &msg
return awsException{
requestException{exception: code, message: msg},
}
}
}
@ -112,16 +109,31 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) {
now := time.Now()
m := metric{
ClientID: aws.String(rep.clientID),
API: aws.String(r.Operation.Name),
Service: aws.String(r.ClientInfo.ServiceID),
Timestamp: (*metricTime)(&now),
Type: aws.String("ApiCall"),
AttemptCount: aws.Int(r.RetryCount + 1),
Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
XAmzRequestID: aws.String(r.RequestID),
ClientID: aws.String(rep.clientID),
API: aws.String(r.Operation.Name),
Service: aws.String(r.ClientInfo.ServiceID),
Timestamp: (*metricTime)(&now),
UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
Type: aws.String("ApiCall"),
AttemptCount: aws.Int(r.RetryCount + 1),
Region: r.Config.Region,
Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)),
XAmzRequestID: aws.String(r.RequestID),
MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
}
if r.HTTPResponse != nil {
m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
}
if r.Error != nil {
if awserr, ok := r.Error.(awserr.Error); ok {
m.SetFinalException(getMetricException(awserr))
}
}
m.TruncateFields()
// TODO: Probably want to figure something out for logging dropped
// metrics
rep.metricsCh.Push(m)
@ -172,8 +184,9 @@ func (rep *Reporter) start() {
}
}
// Pause will pause the metric channel preventing any new metrics from
// being added.
// Pause will pause the metric channel preventing any new metrics from being
// added. It is safe to call concurrently with other calls to Pause, but if
// called concurently with Continue can lead to unexpected state.
func (rep *Reporter) Pause() {
lock.Lock()
defer lock.Unlock()
@ -185,8 +198,9 @@ func (rep *Reporter) Pause() {
rep.close()
}
// Continue will reopen the metric channel and allow for monitoring
// to be resumed.
// Continue will reopen the metric channel and allow for monitoring to be
// resumed. It is safe to call concurrently with other calls to Continue, but
// if called concurently with Pause can lead to unexpected state.
func (rep *Reporter) Continue() {
lock.Lock()
defer lock.Unlock()
@ -201,10 +215,18 @@ func (rep *Reporter) Continue() {
rep.metricsCh.Continue()
}
// Client side metric handler names
const (
APICallMetricHandlerName = "awscsm.SendAPICallMetric"
APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
)
// InjectHandlers will will enable client side metrics and inject the proper
// handlers to handle how metrics are sent.
//
// Example:
// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers
// multiple times may lead to unexpected behavior, (e.g. duplicate metrics).
//
// // Start must be called in order to inject the correct handlers
// r, err := csm.Start("clientID", "127.0.0.1:8094")
// if err != nil {
@ -221,11 +243,22 @@ func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
return
}
apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric}
apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric}
handlers.Complete.PushFrontNamed(request.NamedHandler{
Name: APICallMetricHandlerName,
Fn: rep.sendAPICallMetric,
})
handlers.Complete.PushFrontNamed(apiCallHandler)
handlers.Complete.PushFrontNamed(apiCallAttemptHandler)
handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler)
handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{
Name: APICallAttemptMetricHandlerName,
Fn: rep.sendAPICallAttemptMetric,
})
}
// boolIntValue return 1 for true and 0 for false.
func boolIntValue(b bool) int {
if b {
return 1
}
return 0
}

View File

@ -24,6 +24,7 @@ import (
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
// A Defaults provides a collection of default values for SDK clients.
@ -112,8 +113,8 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro
}
const (
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
)
// RemoteCredProvider returns a credentials provider for the default remote
@ -123,8 +124,8 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P
return localHTTPCredProvider(cfg, handlers, u)
}
if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 {
u := fmt.Sprintf("http://169.254.170.2%s", uri)
if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 {
u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri)
return httpCredProvider(cfg, handlers, u)
}
@ -187,6 +188,7 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede
return endpointcreds.NewProviderClient(cfg, handlers, u,
func(p *endpointcreds.Provider) {
p.ExpiryWindow = 5 * time.Minute
p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
},
)
}

View File

@ -24,8 +24,9 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
err := req.Send()
return output.Content, req.Send()
return output.Content, err
}
// GetUserData returns the userdata that was configured for the service. If
@ -45,8 +46,9 @@ func (c *EC2Metadata) GetUserData() (string, error) {
r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
}
})
err := req.Send()
return output.Content, req.Send()
return output.Content, err
}
// GetDynamicData uses the path provided to request information from the EC2
@ -61,8 +63,9 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
err := req.Send()
return output.Content, req.Send()
return output.Content, err
}
// GetInstanceIdentityDocument retrieves an identity document describing an
@ -79,7 +82,7 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument
doc := EC2InstanceIdentityDocument{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
return EC2InstanceIdentityDocument{},
awserr.New("SerializationError",
awserr.New(request.ErrCodeSerialization,
"failed to decode EC2 instance identity document", err)
}
@ -98,7 +101,7 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
info := EC2IAMInfo{}
if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
return EC2IAMInfo{},
awserr.New("SerializationError",
awserr.New(request.ErrCodeSerialization,
"failed to decode EC2 IAM info", err)
}
@ -118,6 +121,10 @@ func (c *EC2Metadata) Region() (string, error) {
return "", err
}
if len(resp) == 0 {
return "", awserr.New("EC2MetadataError", "invalid Region response", nil)
}
// returns region without the suffix. Eg: us-west-2a becomes us-west-2
return resp[:len(resp)-1], nil
}
@ -145,18 +152,19 @@ type EC2IAMInfo struct {
// An EC2InstanceIdentityDocument provides the shape for unmarshaling
// an instance identity document
type EC2InstanceIdentityDocument struct {
DevpayProductCodes []string `json:"devpayProductCodes"`
AvailabilityZone string `json:"availabilityZone"`
PrivateIP string `json:"privateIp"`
Version string `json:"version"`
Region string `json:"region"`
InstanceID string `json:"instanceId"`
BillingProducts []string `json:"billingProducts"`
InstanceType string `json:"instanceType"`
AccountID string `json:"accountId"`
PendingTime time.Time `json:"pendingTime"`
ImageID string `json:"imageId"`
KernelID string `json:"kernelId"`
RamdiskID string `json:"ramdiskId"`
Architecture string `json:"architecture"`
DevpayProductCodes []string `json:"devpayProductCodes"`
MarketplaceProductCodes []string `json:"marketplaceProductCodes"`
AvailabilityZone string `json:"availabilityZone"`
PrivateIP string `json:"privateIp"`
Version string `json:"version"`
Region string `json:"region"`
InstanceID string `json:"instanceId"`
BillingProducts []string `json:"billingProducts"`
InstanceType string `json:"instanceType"`
AccountID string `json:"accountId"`
PendingTime time.Time `json:"pendingTime"`
ImageID string `json:"imageId"`
KernelID string `json:"kernelId"`
RamdiskID string `json:"ramdiskId"`
Architecture string `json:"architecture"`
}

View File

@ -4,7 +4,7 @@
// This package's client can be disabled completely by setting the environment
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
// true instructs the SDK to disable the EC2 Metadata client. The client cannot
// be used while the environemnt variable is set to true, (case insensitive).
// be used while the environment variable is set to true, (case insensitive).
package ec2metadata
import (
@ -72,6 +72,7 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
ServiceID: ServiceName,
Endpoint: endpoint,
APIVersion: "latest",
},
@ -91,6 +92,9 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
svc.Handlers.Send.SwapNamed(request.NamedHandler{
Name: corehandlers.SendHandler.Name,
Fn: func(r *request.Request) {
r.HTTPResponse = &http.Response{
Header: http.Header{},
}
r.Error = awserr.New(
request.CanceledErrorCode,
"EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
@ -119,7 +123,7 @@ func unmarshalHandler(r *request.Request) {
defer r.HTTPResponse.Body.Close()
b := &bytes.Buffer{}
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err)
return
}
@ -132,7 +136,7 @@ func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
b := &bytes.Buffer{}
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err)
return
}

Some files were not shown because too many files have changed in this diff Show More