From 4b7e2b3a064fe0d558d2a87173bc50e7de31f441 Mon Sep 17 00:00:00 2001 From: dlorenc Date: Thu, 21 Feb 2019 14:09:22 -0600 Subject: [PATCH 01/22] Update the cache warmer to also save manifests. (#576) --- pkg/cache/warm.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/cache/warm.go b/pkg/cache/warm.go index 02bb550fe..c03746e0b 100644 --- a/pkg/cache/warm.go +++ b/pkg/cache/warm.go @@ -18,6 +18,7 @@ package cache import ( "fmt" + "io/ioutil" "path" "github.com/GoogleContainerTools/kaniko/pkg/config" @@ -53,6 +54,15 @@ func WarmCache(opts *config.WarmerOptions) error { if err != nil { return errors.Wrap(err, fmt.Sprintf("Failed to write %s to cache", image)) } + + mfst, err := img.RawManifest() + if err != nil { + return errors.Wrap(err, fmt.Sprintf("Failed to retrieve manifest for %s", image)) + } + mfstPath := cachePath + ".json" + if err := ioutil.WriteFile(mfstPath, mfst, 0666); err != nil { + return errors.Wrap(err, fmt.Sprintf("Failed to save manifest for %s", image)) + } logrus.Debugf("Wrote %s to cache", image) } return nil From 1d079e683e28606162e6340d5ccf844055a6d575 Mon Sep 17 00:00:00 2001 From: Valentin Rothberg Date: Mon, 25 Feb 2019 16:52:53 +0100 Subject: [PATCH 02/22] README.me: update Buildah description (#586) Use the latest description of Buildah from upstream. Signed-off-by: Valentin Rothberg --- README.md | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 065bfa985..a4206963e 100644 --- a/README.md +++ b/README.md @@ -467,12 +467,15 @@ filesystem is sufficiently complicated). However it has no `Dockerfile`-like build tooling (it's a slightly lower-level tool that can be used to build such builders -- such as `orca-build`). -`Buildah` can run as a non root user and does not require privileges. Buildah -specializes in building OCI images. Buildah's commands replicate all of the -commands that are found in a Dockerfile. Its goal is also to provide a lower -level coreutils interface to build images, allowing people to build containers -without requiring a Dockerfile. The intent with Buildah is to allow other -scripting languages to build container images, without requiring a daemon. +`Buildah` specializes in building OCI images. Buildah's commands replicate all +of the commands that are found in a Dockerfile. This allows building images +with and without Dockerfiles while not requiring any root privileges. +Buildah’s ultimate goal is to provide a lower-level coreutils interface to +build images. The flexibility of building images without Dockerfiles allows +for the integration of other scripting languages into the build process. +Buildah follows a simple fork-exec model and does not run as a daemon +but it is based on a comprehensive API in golang, which can be vendored +into other tools. `FTL` and `Bazel` aim to achieve the fastest possible creation of Docker images for a subset of images. These can be thought of as a special-case "fast path" From 2abe109eb26ef540aa7389f17970e4e2c773b07e Mon Sep 17 00:00:00 2001 From: dlorenc Date: Mon, 25 Feb 2019 10:42:34 -0600 Subject: [PATCH 03/22] Environment variables should be replaced in URLs in ADD commands. (#580) We were previously explicitly skipping this for some reason, but Docker seems to expand these in URLs so we should too. --- integration/dockerfiles/Dockerfile_test_add | 5 +- pkg/commands/add.go | 5 +- pkg/util/command_util.go | 20 +++-- pkg/util/command_util_test.go | 96 +++++++++++++++++---- 4 files changed, 100 insertions(+), 26 deletions(-) diff --git a/integration/dockerfiles/Dockerfile_test_add b/integration/dockerfiles/Dockerfile_test_add index 65f530ad1..3df0c5864 100644 --- a/integration/dockerfiles/Dockerfile_test_add +++ b/integration/dockerfiles/Dockerfile_test_add @@ -24,4 +24,7 @@ COPY $file /arg # Finally, test adding a remote URL, concurrently with a normal file ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3/docker-credential-gcr_linux_386-1.4.3.tar.gz context/foo /test/all/ -ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3-static/docker-credential-gcr_linux_amd64-1.4.3.tar.gz /destination + +# Test environment replacement in the URL +ENV VERSION=v1.4.3 +ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/${VERSION}-static/docker-credential-gcr_linux_amd64-1.4.3.tar.gz /destination diff --git a/pkg/commands/add.go b/pkg/commands/add.go index 7bcbce810..b66b56db2 100644 --- a/pkg/commands/add.go +++ b/pkg/commands/add.go @@ -61,7 +61,10 @@ func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui for _, src := range srcs { fullPath := filepath.Join(a.buildcontext, src) if util.IsSrcRemoteFileURL(src) { - urlDest := util.URLDestinationFilepath(src, dest, config.WorkingDir) + urlDest, err := util.URLDestinationFilepath(src, dest, config.WorkingDir, replacementEnvs) + if err != nil { + return err + } logrus.Infof("Adding remote URL %s to %s", src, urlDest) if err := util.DownloadFileToDest(src, urlDest); err != nil { return err diff --git a/pkg/util/command_util.go b/pkg/util/command_util.go index c217fc74d..e64fadf55 100644 --- a/pkg/util/command_util.go +++ b/pkg/util/command_util.go @@ -37,11 +37,13 @@ import ( func ResolveEnvironmentReplacementList(values, envs []string, isFilepath bool) ([]string, error) { var resolvedValues []string for _, value := range values { + var resolved string + var err error if IsSrcRemoteFileURL(value) { - resolvedValues = append(resolvedValues, value) - continue + resolved, err = ResolveEnvironmentReplacement(value, envs, false) + } else { + resolved, err = ResolveEnvironmentReplacement(value, envs, isFilepath) } - resolved, err := ResolveEnvironmentReplacement(value, envs, isFilepath) logrus.Debugf("Resolved %s to %s", value, resolved) if err != nil { return nil, err @@ -165,20 +167,24 @@ func DestinationFilepath(src, dest, cwd string) (string, error) { } // URLDestinationFilepath gives the destination a file from a remote URL should be saved to -func URLDestinationFilepath(rawurl, dest, cwd string) string { +func URLDestinationFilepath(rawurl, dest, cwd string, envs []string) (string, error) { if !IsDestDir(dest) { if !filepath.IsAbs(dest) { - return filepath.Join(cwd, dest) + return filepath.Join(cwd, dest), nil } - return dest + return dest, nil } urlBase := filepath.Base(rawurl) + urlBase, err := ResolveEnvironmentReplacement(urlBase, envs, true) + if err != nil { + return "", err + } destPath := filepath.Join(dest, urlBase) if !filepath.IsAbs(dest) { destPath = filepath.Join(cwd, destPath) } - return destPath + return destPath, nil } func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, resolvedSources []string, root string) error { diff --git a/pkg/util/command_util_test.go b/pkg/util/command_util_test.go index c4c6fead4..f7a4bf211 100644 --- a/pkg/util/command_util_test.go +++ b/pkg/util/command_util_test.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + "reflect" "sort" "testing" @@ -27,14 +28,12 @@ var testURL = "https://github.com/GoogleContainerTools/runtimes-common/blob/mast var testEnvReplacement = []struct { path string - command string envs []string isFilepath bool expectedPath string }{ { - path: "/simple/path", - command: "WORKDIR /simple/path", + path: "/simple/path", envs: []string{ "simple=/path/", }, @@ -42,8 +41,7 @@ var testEnvReplacement = []struct { expectedPath: "/simple/path", }, { - path: "/simple/path/", - command: "WORKDIR /simple/path/", + path: "/simple/path/", envs: []string{ "simple=/path/", }, @@ -51,8 +49,7 @@ var testEnvReplacement = []struct { expectedPath: "/simple/path/", }, { - path: "${a}/b", - command: "WORKDIR ${a}/b", + path: "${a}/b", envs: []string{ "a=/path/", "b=/path2/", @@ -61,8 +58,7 @@ var testEnvReplacement = []struct { expectedPath: "/path/b", }, { - path: "/$a/b", - command: "COPY ${a}/b /c/", + path: "/$a/b", envs: []string{ "a=/path/", "b=/path2/", @@ -71,8 +67,7 @@ var testEnvReplacement = []struct { expectedPath: "/path/b", }, { - path: "/$a/b/", - command: "COPY /${a}/b /c/", + path: "/$a/b/", envs: []string{ "a=/path/", "b=/path2/", @@ -81,8 +76,7 @@ var testEnvReplacement = []struct { expectedPath: "/path/b/", }, { - path: "\\$foo", - command: "COPY \\$foo /quux", + path: "\\$foo", envs: []string{ "foo=/path/", }, @@ -90,8 +84,14 @@ var testEnvReplacement = []struct { expectedPath: "$foo", }, { - path: "8080/$protocol", - command: "EXPOSE 8080/$protocol", + path: "8080/$protocol", + envs: []string{ + "protocol=udp", + }, + expectedPath: "8080/udp", + }, + { + path: "8080/$protocol", envs: []string{ "protocol=udp", }, @@ -183,6 +183,7 @@ var urlDestFilepathTests = []struct { cwd string dest string expectedDest string + envs []string }{ { url: "https://something/something", @@ -202,12 +203,19 @@ var urlDestFilepathTests = []struct { dest: "/dest/", expectedDest: "/dest/something", }, + { + url: "https://something/$foo.tar.gz", + cwd: "/test", + dest: "/foo/", + expectedDest: "/foo/bar.tar.gz", + envs: []string{"foo=bar"}, + }, } func Test_UrlDestFilepath(t *testing.T) { for _, test := range urlDestFilepathTests { - actualDest := URLDestinationFilepath(test.url, test.dest, test.cwd) - testutil.CheckErrorAndDeepEqual(t, false, nil, test.expectedDest, actualDest) + actualDest, err := URLDestinationFilepath(test.url, test.dest, test.cwd, test.envs) + testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedDest, actualDest) } } @@ -448,3 +456,57 @@ func Test_RemoteUrls(t *testing.T) { } } + +func TestResolveEnvironmentReplacementList(t *testing.T) { + type args struct { + values []string + envs []string + isFilepath bool + } + tests := []struct { + name string + args args + want []string + wantErr bool + }{ + { + name: "url", + args: args{ + values: []string{ + "https://google.com/$foo", "$bar", + }, + envs: []string{ + "foo=baz", + "bar=bat", + }, + }, + want: []string{"https://google.com/baz", "bat"}, + }, + { + name: "mixed", + args: args{ + values: []string{ + "$foo", "$bar$baz", "baz", + }, + envs: []string{ + "foo=FOO", + "bar=BAR", + "baz=BAZ", + }, + }, + want: []string{"FOO", "BARBAZ", "baz"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ResolveEnvironmentReplacementList(tt.args.values, tt.args.envs, tt.args.isFilepath) + if (err != nil) != tt.wantErr { + t.Errorf("ResolveEnvironmentReplacementList() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ResolveEnvironmentReplacementList() = %v, want %v", got, tt.want) + } + }) + } +} From f6f26dfe806207c1b1afbeea4db79d106a16b260 Mon Sep 17 00:00:00 2001 From: Anthony Weston Date: Sun, 3 Mar 2019 10:38:42 -0500 Subject: [PATCH 04/22] Added missing documentation for --skip-tls-verify-pull arg --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a4206963e..10810464f 100644 --- a/README.md +++ b/README.md @@ -391,7 +391,11 @@ This flag takes a single snapshot of the filesystem at the end of the build, so #### --skip-tls-verify -Set this flag to skip TLS certificate validation when connecting to a registry. It is supposed to be used for testing purposes only and should not be used in production! +Set this flag to skip TLS certificate validation when pushing to a registry. It is supposed to be used for testing purposes only and should not be used in production! + +#### --skip-tls-verify-pull + +Set this flag to skip TLS certificate validation when pulling from a registry. It is supposed to be used for testing purposes only and should not be used in production! #### --snapshotMode From 6b1ac2ac9638ad85c6f9861aaa0110e5f8e1f226 Mon Sep 17 00:00:00 2001 From: Anthony Weston Date: Mon, 4 Mar 2019 18:14:35 -0500 Subject: [PATCH 05/22] Fixed spelling error --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 10810464f..f6071e551 100644 --- a/README.md +++ b/README.md @@ -362,7 +362,7 @@ You can set it multiple times for multiple registries. #### --skip-tls-verify-registry -Set this flag to skip TLS cerificate validation when accessing a registry. It is supposed to be useed for testing purposes only and should not be used in production! +Set this flag to skip TLS cerificate validation when accessing a registry. It is supposed to be used for testing purposes only and should not be used in production! You can set it multiple times for multiple registries. #### --cleanup From 969321521ebae1455a827da03ce664a2fca01761 Mon Sep 17 00:00:00 2001 From: priyawadhwa Date: Wed, 6 Mar 2019 10:39:51 -0800 Subject: [PATCH 06/22] Update go-containerregistry (#599) * Update go-containerregistry Update go-containerregistry since it can now handle image names of the format repo:tag@digest. Should fix #535. Thanks @ViceIce for the fix! * update go-containerregistry again --- Gopkg.lock | 11 +- Gopkg.toml | 4 +- .../go-containerregistry/pkg/authn/helper.go | 16 +- .../pkg/authn/k8schain/doc.go | 2 +- .../pkg/authn/k8schain/k8schain.go | 2 +- .../go-containerregistry/pkg/name/digest.go | 16 +- .../go-containerregistry/pkg/name/registry.go | 20 ++ .../go-containerregistry/pkg/v1/config.go | 25 +- .../pkg/v1/daemon/image.go | 8 +- .../pkg/v1/daemon/options.go | 2 + .../pkg/v1/daemon/write.go | 21 +- .../google/go-containerregistry/pkg/v1/doc.go | 4 +- .../pkg/v1/empty/index.go | 59 ++++ .../go-containerregistry/pkg/v1/hash.go | 2 +- .../go-containerregistry/pkg/v1/image.go | 3 - .../go-containerregistry/pkg/v1/index.go | 11 +- .../pkg/v1/mutate/mutate.go | 222 ++++++++----- .../pkg/v1/mutate/rebase.go | 3 +- .../pkg/v1/partial/compressed.go | 12 +- .../pkg/v1/partial/uncompressed.go | 14 +- .../pkg/v1/partial/with.go | 25 +- .../pkg/v1/random/image.go | 17 +- .../pkg/v1/random/index.go | 106 +++++++ .../pkg/v1/remote/image.go | 113 ++++--- .../pkg/v1/remote/index.go | 139 ++++++++ .../pkg/v1/remote/list.go | 12 +- .../pkg/v1/remote/mount.go | 2 +- .../pkg/v1/remote/transport/bearer.go | 11 +- .../pkg/v1/remote/{ => transport}/error.go | 13 +- .../pkg/v1/remote/transport/ping.go | 68 ++-- .../pkg/v1/remote/transport/transport.go | 1 + .../pkg/v1/remote/write.go | 300 +++++++++++++----- .../pkg/v1/stream/layer.go | 194 +++++++++++ .../pkg/v1/tarball/image.go | 4 +- .../pkg/v1/tarball/layer.go | 2 +- .../pkg/v1/tarball/write.go | 2 +- .../go-containerregistry/pkg/v1/v1util/nop.go | 40 --- .../pkg/v1/v1util/verify.go | 2 +- .../go-containerregistry/pkg/v1/v1util/zip.go | 52 +-- .../plugin/pkg/client/auth/exec/exec.go | 18 +- .../client-go/transport/round_trippers.go | 111 ++++++- 41 files changed, 1240 insertions(+), 449 deletions(-) create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/random/index.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go rename vendor/github.com/google/go-containerregistry/pkg/v1/remote/{ => transport}/error.go (90%) create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go delete mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/v1util/nop.go diff --git a/Gopkg.lock b/Gopkg.lock index 8aa683042..7d5486b54 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -430,7 +430,7 @@ version = "v0.2.0" [[projects]] - digest = "1:f1b23f53418c1b035a5965ac2600a28b16c08643683d5213fb581ecf4e79a02a" + digest = "1:a4f41b57b6a09cf498024fd9d2872b99c32bfc1462a8f34ac625e88531d52930" name = "github.com/google/go-containerregistry" packages = [ "pkg/authn", @@ -444,12 +444,13 @@ "pkg/v1/random", "pkg/v1/remote", "pkg/v1/remote/transport", + "pkg/v1/stream", "pkg/v1/tarball", "pkg/v1/types", "pkg/v1/v1util", ] pruneopts = "NUT" - revision = "88d8d18eb1bde1fcef23c745205c738074290515" + revision = "678f6c51f585140f8d0c07f6f7e193f7a4c8e457" [[projects]] digest = "1:f4f203acd8b11b8747bdcd91696a01dbc95ccb9e2ca2db6abf81c3a4f5e950ce" @@ -1102,7 +1103,7 @@ version = "kubernetes-1.11.0" [[projects]] - digest = "1:b960fc62d636ccdc3265dd1e190b7f5e7bf5f8d29bf4f02af7f1352768c58f3f" + digest = "1:2f523dd16b56091fab1f329f772c3540742920e270bf0f9b8451106b7f005a66" name = "k8s.io/client-go" packages = [ "discovery", @@ -1154,8 +1155,8 @@ "util/integer", ] pruneopts = "NUT" - revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65" - version = "kubernetes-1.11.0" + revision = "2cefa64ff137e128daeddbd1775cd775708a05bf" + version = "kubernetes-1.11.3" [[projects]] digest = "1:e345c95cf277bb7f650306556904df69e0904395c56959a56002d0140747eda0" diff --git a/Gopkg.toml b/Gopkg.toml index 9431660cc..2ab371521 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -33,11 +33,11 @@ required = [ [[constraint]] name = "k8s.io/client-go" - version = "kubernetes-1.11.0" + version = "kubernetes-1.11.3" [[constraint]] name = "github.com/google/go-containerregistry" - revision = "88d8d18eb1bde1fcef23c745205c738074290515" + revision = "678f6c51f585140f8d0c07f6f7e193f7a4c8e457" [[override]] name = "k8s.io/apimachinery" diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go b/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go index 4a8ec2404..c8ba4e6e2 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go @@ -72,7 +72,7 @@ func (h *helper) Authorization() (string, error) { var out bytes.Buffer cmd.Stdout = &out - err := h.r.Run(cmd) + cmdErr := h.r.Run(cmd) // If we see this specific message, it means the domain wasn't found // and we should fall back on anonymous auth. @@ -81,16 +81,22 @@ func (h *helper) Authorization() (string, error) { return Anonymous.Authorization() } - if err != nil { - return "", err - } - // Any other output should be parsed as JSON and the Username / Secret // fields used for Basic authentication. ho := helperOutput{} if err := json.Unmarshal([]byte(output), &ho); err != nil { + if cmdErr != nil { + // If we failed to parse output, it won't contain Secret, so returning it + // in an error should be fine. + return "", fmt.Errorf("invoking %s: %v; output: %s", helperName, cmdErr, output) + } return "", err } + + if cmdErr != nil { + return "", fmt.Errorf("invoking %s: %v", helperName, cmdErr) + } + b := Basic{Username: ho.Username, Password: ho.Secret} return b.Authorization() } diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/doc.go b/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/doc.go index bc10365a0..c9ae7f128 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/doc.go +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/doc.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// package k8schain exposes an implementation of the authn.Keychain interface +// Package k8schain exposes an implementation of the authn.Keychain interface // based on the semantics the Kubelet follows when pulling the images for a // Pod in Kubernetes. package k8schain diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain.go index 667b09c1b..d90ac4d1d 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain.go +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/k8schain/k8schain.go @@ -17,7 +17,7 @@ package k8schain import ( "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go index ea6287a84..dc573ef1d 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go @@ -73,14 +73,14 @@ func NewDigest(name string, strict Strictness) (Digest, error) { base := parts[0] digest := parts[1] - // We don't require a digest, but if we get one check it's valid, - // even when not being strict. - // If we are being strict, we want to validate the digest regardless in case - // it's empty. - if digest != "" || strict == StrictValidation { - if err := checkDigest(digest); err != nil { - return Digest{}, err - } + // Always check that the digest is valid. + if err := checkDigest(digest); err != nil { + return Digest{}, err + } + + tag, err := NewTag(base, strict) + if err == nil { + base = tag.Repository.Name() } repo, err := NewRepository(base, strict) diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go index c2bf5758a..ab7419308 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go @@ -15,12 +15,14 @@ package name import ( + "net" "net/url" "regexp" "strings" ) const ( + // DefaultRegistry is Docker Hub, assumed when a hostname is omitted. DefaultRegistry = "index.docker.io" defaultRegistryAlias = "docker.io" ) @@ -63,11 +65,29 @@ func (r Registry) Scope(string) string { return "registry:catalog:*" } +func (r Registry) isRFC1918() bool { + ipStr := strings.Split(r.Name(), ":")[0] + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} { + _, block, _ := net.ParseCIDR(cidr) + if block.Contains(ip) { + return true + } + } + return false +} + // Scheme returns https scheme for all the endpoints except localhost or when explicitly defined. func (r Registry) Scheme() string { if r.insecure { return "http" } + if r.isRFC1918() { + return "http" + } if strings.HasPrefix(r.Name(), "localhost:") { return "http" } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go index d1d809d91..3d8d6d30d 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -21,27 +21,28 @@ import ( ) // ConfigFile is the configuration file that holds the metadata describing -// how to launch a container. The names of the fields are chosen to reflect -// the JSON payload of the ConfigFile as defined here: https://git.io/vrAEY +// how to launch a container. See: +// https://github.com/opencontainers/image-spec/blob/master/config.md type ConfigFile struct { Architecture string `json:"architecture"` - Container string `json:"container"` - Created Time `json:"created"` - DockerVersion string `json:"docker_version"` - History []History `json:"history"` + Author string `json:"author,omitempty"` + Container string `json:"container,omitempty"` + Created Time `json:"created,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + History []History `json:"history,omitempty"` OS string `json:"os"` RootFS RootFS `json:"rootfs"` Config Config `json:"config"` - ContainerConfig Config `json:"container_config"` - OSVersion string `json:"osversion"` + ContainerConfig Config `json:"container_config,omitempty"` + OSVersion string `json:"osversion,omitempty"` } // History is one entry of a list recording how this container image was built. type History struct { - Author string `json:"author"` - Created Time `json:"created"` - CreatedBy string `json:"created_by"` - Comment string `json:"comment"` + Author string `json:"author,omitempty"` + Created Time `json:"created,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` EmptyLayer bool `json:"empty_layer,omitempty"` } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image.go index e013a2258..b7f5f3ef6 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image.go @@ -20,11 +20,10 @@ import ( "io" "io/ioutil" - "github.com/google/go-containerregistry/pkg/v1/tarball" - "github.com/docker/docker/client" "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/tarball" ) // image accesses an image from a docker daemon @@ -42,6 +41,7 @@ type imageOpener struct { buffered bool } +// ImageOption is a functional option for Image. type ImageOption func(*imageOpener) error func (i *imageOpener) Open() (v1.Image, error) { @@ -66,7 +66,7 @@ func (i *imageOpener) Open() (v1.Image, error) { return img, nil } -// API interface for testing. +// ImageSaver is an interface for testing. type ImageSaver interface { ImageSave(context.Context, []string) (io.ReadCloser, error) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/options.go index 393507660..4e03952ee 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/options.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/options.go @@ -14,12 +14,14 @@ package daemon +// WithBufferedOpener buffers the image. func WithBufferedOpener() ImageOption { return func(i *imageOpener) error { return i.setBuffered(true) } } +// WithUnbufferedOpener streams the image to avoid buffering. func WithUnbufferedOpener() ImageOption { return func(i *imageOpener) error { return i.setBuffered(false) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/write.go index 7310b7057..09e9bbdb5 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/daemon/write.go @@ -19,22 +19,21 @@ import ( "io" "io/ioutil" - "github.com/pkg/errors" - "github.com/docker/docker/api/types" "github.com/docker/docker/client" - "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/pkg/errors" ) -// API interface for testing. +// ImageLoader is an interface for testing. type ImageLoader interface { ImageLoad(context.Context, io.Reader, bool) (types.ImageLoadResponse, error) + ImageTag(context.Context, string, string) error } -// This is a variable so we can override in tests. +// GetImageLoader is a variable so we can override in tests. var GetImageLoader = func() (ImageLoader, error) { cli, err := client.NewEnvClient() if err != nil { @@ -44,6 +43,16 @@ var GetImageLoader = func() (ImageLoader, error) { return cli, nil } +// Tag adds a tag to an already existent image. +func Tag(src, dest name.Tag) error { + cli, err := GetImageLoader() + if err != nil { + return err + } + + return cli.ImageTag(context.Background(), src.String(), dest.String()) +} + // Write saves the image into the daemon as the given tag. func Write(tag name.Tag, img v1.Image) (string, error) { cli, err := GetImageLoader() diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go index c9b203173..7273ec5ab 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package v1 defines structured types for OCI v1 images +//go:generate deepcopy-gen -O zz_deepcopy_generated --go-header-file $BOILER_PLATE_FILE -i . // +k8s:deepcopy-gen=package -//go:generate deepcopy-gen -O zz_deepcopy_generated --go-header-file $BOILER_PLATE_FILE -i . +// Package v1 defines structured types for OCI v1 images package v1 diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go new file mode 100644 index 000000000..83dc58ab7 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go @@ -0,0 +1,59 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package empty + +import ( + "encoding/json" + "errors" + + "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Index is a singleton empty index, think: FROM scratch. +var Index = emptyIndex{} + +type emptyIndex struct{} + +func (i emptyIndex) MediaType() (types.MediaType, error) { + return types.OCIImageIndex, nil +} + +func (i emptyIndex) Digest() (v1.Hash, error) { + return partial.Digest(i) +} + +func (i emptyIndex) IndexManifest() (*v1.IndexManifest, error) { + return &v1.IndexManifest{ + SchemaVersion: 2, + }, nil +} + +func (i emptyIndex) RawManifest() ([]byte, error) { + im, err := i.IndexManifest() + if err != nil { + return nil, err + } + return json.Marshal(im) +} + +func (i emptyIndex) Image(v1.Hash) (v1.Image, error) { + return nil, errors.New("empty index") +} + +func (i emptyIndex) ImageIndex(v1.Hash) (v1.ImageIndex, error) { + return nil, errors.New("empty index") +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go index f0db0d51c..40933030d 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go @@ -49,7 +49,7 @@ func NewHash(s string) (Hash, error) { } // MarshalJSON implements json.Marshaler -func (h *Hash) MarshalJSON() ([]byte, error) { +func (h Hash) MarshalJSON() ([]byte, error) { return json.Marshal(h.String()) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go index 05568aae0..17b9839a6 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go @@ -24,9 +24,6 @@ type Image interface { // The order of the list is oldest/base layer first, and most-recent/top layer last. Layers() ([]Layer, error) - // BlobSet returns an unordered collection of all the blobs in the image. - BlobSet() (map[Hash]struct{}, error) - // MediaType of this image's manifest. MediaType() (types.MediaType, error) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go index 25ba29ed7..604e6de36 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go @@ -18,6 +18,7 @@ import ( "github.com/google/go-containerregistry/pkg/v1/types" ) +// ImageIndex defines the interface for interacting with an OCI image index. type ImageIndex interface { // MediaType of this image's manifest. MediaType() (types.MediaType, error) @@ -28,6 +29,12 @@ type ImageIndex interface { // IndexManifest returns this image index's manifest object. IndexManifest() (*IndexManifest, error) - // RawIndexManifest returns the serialized bytes of IndexManifest(). - RawIndexManifest() ([]byte, error) + // RawManifest returns the serialized bytes of IndexManifest(). + RawManifest() ([]byte, error) + + // Image returns a v1.Image that this ImageIndex references. + Image(Hash) (Image, error) + + // ImageIndex returns a v1.ImageIndex that this ImageIndex references. + ImageIndex(Hash) (ImageIndex, error) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go index b24d6896b..a327e7594 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -26,9 +26,10 @@ import ( "strings" "time" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/empty" "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/stream" "github.com/google/go-containerregistry/pkg/v1/tarball" "github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/v1util" @@ -58,77 +59,14 @@ func Append(base v1.Image, adds ...Addendum) (v1.Image, error) { if len(adds) == 0 { return base, nil } - if err := validate(adds); err != nil { return nil, err } - m, err := base.Manifest() - if err != nil { - return nil, err - } - - cf, err := base.ConfigFile() - if err != nil { - return nil, err - } - - image := &image{ - Image: base, - manifest: m.DeepCopy(), - configFile: cf.DeepCopy(), - diffIDMap: make(map[v1.Hash]v1.Layer), - digestMap: make(map[v1.Hash]v1.Layer), - } - - diffIDs := image.configFile.RootFS.DiffIDs - history := image.configFile.History - - for _, add := range adds { - diffID, err := add.Layer.DiffID() - if err != nil { - return nil, err - } - diffIDs = append(diffIDs, diffID) - history = append(history, add.History) - image.diffIDMap[diffID] = add.Layer - } - - manifestLayers := image.manifest.Layers - - for _, add := range adds { - d := v1.Descriptor{ - MediaType: types.DockerLayer, - } - - if d.Size, err = add.Layer.Size(); err != nil { - return nil, err - } - - if d.Digest, err = add.Layer.Digest(); err != nil { - return nil, err - } - - manifestLayers = append(manifestLayers, d) - image.digestMap[d.Digest] = add.Layer - } - - image.configFile.RootFS.DiffIDs = diffIDs - image.configFile.History = history - image.manifest.Layers = manifestLayers - - rcfg, err := image.RawConfigFile() - if err != nil { - return nil, err - } - d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg)) - if err != nil { - return nil, err - } - image.manifest.Config.Digest = d - image.manifest.Config.Size = sz - - return image, nil + return &image{ + base: base, + adds: adds, + }, nil } // Config mutates the provided v1.Image to have the provided v1.Config @@ -150,22 +88,11 @@ func configFile(base v1.Image, cfg *v1.ConfigFile) (v1.Image, error) { } image := &image{ - Image: base, + base: base, manifest: m.DeepCopy(), configFile: cfg, - digestMap: make(map[v1.Hash]v1.Layer), } - rcfg, err := image.RawConfigFile() - if err != nil { - return nil, err - } - d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg)) - if err != nil { - return nil, err - } - image.manifest.Config.Digest = d - image.manifest.Config.Size = sz return image, nil } @@ -183,16 +110,118 @@ func CreatedAt(base v1.Image, created v1.Time) (v1.Image, error) { } type image struct { - v1.Image + base v1.Image + adds []Addendum + + computed bool configFile *v1.ConfigFile manifest *v1.Manifest diffIDMap map[v1.Hash]v1.Layer digestMap map[v1.Hash]v1.Layer } +var _ v1.Image = (*image)(nil) + +func (i *image) MediaType() (types.MediaType, error) { return i.base.MediaType() } + +func (i *image) compute() error { + // Don't re-compute if already computed. + if i.computed { + return nil + } + var configFile *v1.ConfigFile + if i.configFile != nil { + configFile = i.configFile + } else { + cf, err := i.base.ConfigFile() + if err != nil { + return err + } + configFile = cf.DeepCopy() + } + diffIDs := configFile.RootFS.DiffIDs + history := configFile.History + + diffIDMap := make(map[v1.Hash]v1.Layer) + digestMap := make(map[v1.Hash]v1.Layer) + + for _, add := range i.adds { + diffID, err := add.Layer.DiffID() + if err != nil { + return err + } + diffIDs = append(diffIDs, diffID) + history = append(history, add.History) + diffIDMap[diffID] = add.Layer + } + + m, err := i.base.Manifest() + if err != nil { + return err + } + manifest := m.DeepCopy() + manifestLayers := manifest.Layers + for _, add := range i.adds { + d := v1.Descriptor{ + MediaType: types.DockerLayer, + } + + var err error + if d.Size, err = add.Layer.Size(); err != nil { + return err + } + + if d.Digest, err = add.Layer.Digest(); err != nil { + return err + } + + manifestLayers = append(manifestLayers, d) + digestMap[d.Digest] = add.Layer + } + + configFile.RootFS.DiffIDs = diffIDs + configFile.History = history + + manifest.Layers = manifestLayers + + rcfg, err := json.Marshal(configFile) + if err != nil { + return err + } + d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg)) + if err != nil { + return err + } + manifest.Config.Digest = d + manifest.Config.Size = sz + + i.configFile = configFile + i.manifest = manifest + i.diffIDMap = diffIDMap + i.digestMap = digestMap + i.computed = true + return nil +} + // Layers returns the ordered collection of filesystem layers that comprise this image. // The order of the list is oldest/base layer first, and most-recent/top layer last. func (i *image) Layers() ([]v1.Layer, error) { + if err := i.compute(); err == stream.ErrNotComputed { + // Image contains a streamable layer which has not yet been + // consumed. Just return the layers we have in case the caller + // is going to consume the layers. + layers, err := i.base.Layers() + if err != nil { + return nil, err + } + for _, add := range i.adds { + layers = append(layers, add.Layer) + } + return layers, nil + } else if err != nil { + return nil, err + } + diffIDs, err := partial.DiffIDs(i) if err != nil { return nil, err @@ -208,38 +237,51 @@ func (i *image) Layers() ([]v1.Layer, error) { return ls, nil } -// BlobSet returns an unordered collection of all the blobs in the image. -func (i *image) BlobSet() (map[v1.Hash]struct{}, error) { - return partial.BlobSet(i) -} - // ConfigName returns the hash of the image's config file. func (i *image) ConfigName() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } return partial.ConfigName(i) } // ConfigFile returns this image's config file. func (i *image) ConfigFile() (*v1.ConfigFile, error) { + if err := i.compute(); err != nil { + return nil, err + } return i.configFile, nil } // RawConfigFile returns the serialized bytes of ConfigFile() func (i *image) RawConfigFile() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } return json.Marshal(i.configFile) } // Digest returns the sha256 of this image's manifest. func (i *image) Digest() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } return partial.Digest(i) } // Manifest returns this image's Manifest object. func (i *image) Manifest() (*v1.Manifest, error) { + if err := i.compute(); err != nil { + return nil, err + } return i.manifest, nil } // RawManifest returns the serialized bytes of Manifest() func (i *image) RawManifest() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } return json.Marshal(i.manifest) } @@ -254,7 +296,7 @@ func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) { if layer, ok := i.digestMap[h]; ok { return layer, nil } - return i.Image.LayerByDigest(h) + return i.base.LayerByDigest(h) } // LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" @@ -263,7 +305,7 @@ func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { if layer, ok := i.diffIDMap[h]; ok { return layer, nil } - return i.Image.LayerByDiffID(h) + return i.base.LayerByDiffID(h) } func validate(adds []Addendum) error { @@ -468,6 +510,10 @@ func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) { } } + if err := tarWriter.Close(); err != nil { + return nil, err + } + b := w.Bytes() // gzip the contents, then create the layer opener := func() (io.ReadCloser, error) { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go index d6c8a7040..6d1fe8d51 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go @@ -17,10 +17,11 @@ package mutate import ( "fmt" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/empty" ) +// Rebase returns a new v1.Image where the oldBase in orig is replaced by newBase. func Rebase(orig, oldBase, newBase v1.Image) (v1.Image, error) { // Verify that oldBase's layers are present in orig, otherwise orig is // not based on oldBase at all. diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go index 6c3998e52..497d1af0d 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go @@ -17,7 +17,7 @@ package partial import ( "io" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/v1util" ) @@ -91,11 +91,6 @@ type compressedImageExtender struct { // Assert that our extender type completes the v1.Image interface var _ v1.Image = (*compressedImageExtender)(nil) -// BlobSet implements v1.Image -func (i *compressedImageExtender) BlobSet() (map[v1.Hash]struct{}, error) { - return BlobSet(i) -} - // Digest implements v1.Image func (i *compressedImageExtender) Digest() (v1.Hash, error) { return Digest(i) @@ -125,11 +120,6 @@ func (i *compressedImageExtender) Layers() ([]v1.Layer, error) { // LayerByDigest implements v1.Image func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { - if cfgName, err := i.ConfigName(); err != nil { - return nil, err - } else if cfgName == h { - return ConfigLayer(i) - } cl, err := i.CompressedImageCore.LayerByDigest(h) if err != nil { return nil, err diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go index f7055dcad..9f75723ec 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go @@ -19,7 +19,7 @@ import ( "io" "sync" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/v1util" ) @@ -112,11 +112,6 @@ type uncompressedImageExtender struct { // Assert that our extender type completes the v1.Image interface var _ v1.Image = (*uncompressedImageExtender)(nil) -// BlobSet implements v1.Image -func (i *uncompressedImageExtender) BlobSet() (map[v1.Hash]struct{}, error) { - return BlobSet(i) -} - // Digest implements v1.Image func (i *uncompressedImageExtender) Digest() (v1.Hash, error) { return Digest(i) @@ -220,13 +215,6 @@ func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, err // LayerByDigest implements v1.Image func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { - // Support returning the ConfigFile when asked for its hash. - if cfgName, err := i.ConfigName(); err != nil { - return nil, err - } else if cfgName == h { - return ConfigLayer(i) - } - diffID, err := BlobToDiffID(i, h) if err != nil { return nil, err diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go index bc6fd8e9f..f724ec8ab 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go @@ -19,8 +19,9 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/v1util" ) @@ -49,8 +50,6 @@ func ConfigName(i WithRawConfigFile) (v1.Hash, error) { return h, err } -// configLayer implements v1.Layer from the raw config bytes. -// This is so that clients (e.g. remote) can access the config as a blob. type configLayer struct { hash v1.Hash content []byte @@ -68,12 +67,12 @@ func (cl *configLayer) DiffID() (v1.Hash, error) { // Uncompressed implements v1.Layer func (cl *configLayer) Uncompressed() (io.ReadCloser, error) { - return v1util.NopReadCloser(bytes.NewBuffer(cl.content)), nil + return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil } // Compressed implements v1.Layer func (cl *configLayer) Compressed() (io.ReadCloser, error) { - return v1util.NopReadCloser(bytes.NewBuffer(cl.content)), nil + return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil } // Size implements v1.Layer @@ -83,6 +82,8 @@ func (cl *configLayer) Size() (int64, error) { var _ v1.Layer = (*configLayer)(nil) +// ConfigLayer implements v1.Layer from the raw config bytes. +// This is so that clients (e.g. remote) can access the config as a blob. func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) { h, err := ConfigName(i) if err != nil { @@ -190,20 +191,6 @@ func FSLayers(i WithManifest) ([]v1.Hash, error) { return fsl, nil } -// BlobSet is a helper for implementing v1.Image -func BlobSet(i WithManifest) (map[v1.Hash]struct{}, error) { - m, err := i.Manifest() - if err != nil { - return nil, err - } - bs := make(map[v1.Hash]struct{}) - for _, l := range m.Layers { - bs[l.Digest] = struct{}{} - } - bs[m.Config.Digest] = struct{}{} - return bs, nil -} - // BlobSize is a helper for implementing v1.Image func BlobSize(i WithManifest, h v1.Hash) (int64, error) { m, err := i.Manifest() diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/random/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/random/image.go index 2f9930fbd..cc269d6b5 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/random/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/random/image.go @@ -20,12 +20,12 @@ import ( "crypto/rand" "fmt" "io" + "io/ioutil" "time" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/types" - "github.com/google/go-containerregistry/pkg/v1/v1util" ) // uncompressedLayer implements partial.UncompressedLayer from raw bytes. @@ -42,7 +42,7 @@ func (ul *uncompressedLayer) DiffID() (v1.Hash, error) { // Uncompressed implements partial.UncompressedLayer func (ul *uncompressedLayer) Uncompressed() (io.ReadCloser, error) { - return v1util.NopReadCloser(bytes.NewBuffer(ul.content)), nil + return ioutil.NopCloser(bytes.NewBuffer(ul.content)), nil } var _ partial.UncompressedLayer = (*uncompressedLayer)(nil) @@ -54,14 +54,18 @@ func Image(byteSize, layers int64) (v1.Image, error) { var b bytes.Buffer tw := tar.NewWriter(&b) if err := tw.WriteHeader(&tar.Header{ - Name: fmt.Sprintf("random_file_%d.txt", i), - Size: byteSize, + Name: fmt.Sprintf("random_file_%d.txt", i), + Size: byteSize, + Typeflag: tar.TypeRegA, }); err != nil { return nil, err } if _, err := io.CopyN(tw, rand.Reader, byteSize); err != nil { return nil, err } + if err := tw.Close(); err != nil { + return nil, err + } bts := b.Bytes() h, _, err := v1.SHA256(bytes.NewReader(bts)) if err != nil { @@ -75,6 +79,9 @@ func Image(byteSize, layers int64) (v1.Image, error) { cfg := &v1.ConfigFile{} + // Some clients check this. + cfg.RootFS.Type = "layers" + // It is ok that iteration order is random in Go, because this is the random image anyways. for k := range layerz { cfg.RootFS.DiffIDs = append(cfg.RootFS.DiffIDs, k) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/random/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/random/index.go new file mode 100644 index 000000000..b6dacec5a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/random/index.go @@ -0,0 +1,106 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package random + +import ( + "bytes" + "encoding/json" + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type randomIndex struct { + images map[v1.Hash]v1.Image + manifest *v1.IndexManifest +} + +// Index returns a pseudo-randomly generated ImageIndex with count images, each +// having the given number of layers of size byteSize. +func Index(byteSize, layers, count int64) (v1.ImageIndex, error) { + manifest := v1.IndexManifest{ + SchemaVersion: 2, + Manifests: []v1.Descriptor{}, + } + + images := make(map[v1.Hash]v1.Image) + for i := int64(0); i < count; i++ { + img, err := Image(byteSize, layers) + if err != nil { + return nil, err + } + + rawManifest, err := img.RawManifest() + if err != nil { + return nil, err + } + digest, size, err := v1.SHA256(bytes.NewReader(rawManifest)) + if err != nil { + return nil, err + } + mediaType, err := img.MediaType() + if err != nil { + return nil, err + } + + manifest.Manifests = append(manifest.Manifests, v1.Descriptor{ + Digest: digest, + Size: size, + MediaType: mediaType, + }) + + images[digest] = img + } + + return &randomIndex{ + images: images, + manifest: &manifest, + }, nil +} + +func (i *randomIndex) MediaType() (types.MediaType, error) { + return types.OCIImageIndex, nil +} + +func (i *randomIndex) Digest() (v1.Hash, error) { + return partial.Digest(i) +} + +func (i *randomIndex) IndexManifest() (*v1.IndexManifest, error) { + return i.manifest, nil +} + +func (i *randomIndex) RawManifest() ([]byte, error) { + m, err := i.IndexManifest() + if err != nil { + return nil, err + } + return json.Marshal(m) +} + +func (i *randomIndex) Image(h v1.Hash) (v1.Image, error) { + if img, ok := i.images[h]; ok { + return img, nil + } + + return nil, fmt.Errorf("image not found: %v", h) +} + +func (i *randomIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + // This is a single level index (for now?). + return nil, fmt.Errorf("image not found: %v", h) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go index 1c963ec82..37e25ad94 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go @@ -21,11 +21,12 @@ import ( "io/ioutil" "net/http" "net/url" + "strings" "sync" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/remote/transport" "github.com/google/go-containerregistry/pkg/v1/types" @@ -34,14 +35,15 @@ import ( // remoteImage accesses an image from a remote registry type remoteImage struct { - ref name.Reference - client *http.Client + fetcher manifestLock sync.Mutex // Protects manifest manifest []byte configLock sync.Mutex // Protects config config []byte + mediaType types.MediaType } +// ImageOption is a functional option for Image. type ImageOption func(*imageOpener) error var _ partial.CompressedImageCore = (*remoteImage)(nil) @@ -59,8 +61,10 @@ func (i *imageOpener) Open() (v1.Image, error) { return nil, err } ri := &remoteImage{ - ref: i.ref, - client: &http.Client{Transport: tr}, + fetcher: fetcher{ + Ref: i.ref, + Client: &http.Client{Transport: tr}, + }, } imgCore, err := partial.CompressedToImage(ri) if err != nil { @@ -91,58 +95,57 @@ func Image(ref name.Reference, options ...ImageOption) (v1.Image, error) { return img.Open() } -func (r *remoteImage) url(resource, identifier string) url.URL { +// fetcher implements methods for reading from a remote image. +type fetcher struct { + Ref name.Reference + Client *http.Client +} + +// url returns a url.Url for the specified path in the context of this remote image reference. +func (f *fetcher) url(resource, identifier string) url.URL { return url.URL{ - Scheme: r.ref.Context().Registry.Scheme(), - Host: r.ref.Context().RegistryStr(), - Path: fmt.Sprintf("/v2/%s/%s/%s", r.ref.Context().RepositoryStr(), resource, identifier), + Scheme: f.Ref.Context().Registry.Scheme(), + Host: f.Ref.Context().RegistryStr(), + Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier), } } -func (r *remoteImage) MediaType() (types.MediaType, error) { - // TODO(jonjohnsonjr): Determine this based on response. - return types.DockerManifestSchema2, nil -} - -// TODO(jonjohnsonjr): Handle manifest lists. -func (r *remoteImage) RawManifest() ([]byte, error) { - r.manifestLock.Lock() - defer r.manifestLock.Unlock() - if r.manifest != nil { - return r.manifest, nil - } - - u := r.url("manifests", r.ref.Identifier()) +func (f *fetcher) fetchManifest(acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) { + u := f.url("manifests", f.Ref.Identifier()) req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { - return nil, err + return nil, nil, err } - // TODO(jonjohnsonjr): Accept OCI manifest, manifest list, and image index. - req.Header.Set("Accept", string(types.DockerManifestSchema2)) - resp, err := r.client.Do(req) + accept := []string{} + for _, mt := range acceptable { + accept = append(accept, string(mt)) + } + req.Header.Set("Accept", strings.Join(accept, ",")) + + resp, err := f.Client.Do(req) if err != nil { - return nil, err + return nil, nil, err } defer resp.Body.Close() - if err := CheckError(resp, http.StatusOK); err != nil { - return nil, err + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, nil, err } manifest, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, err + return nil, nil, err } - digest, _, err := v1.SHA256(bytes.NewReader(manifest)) + digest, size, err := v1.SHA256(bytes.NewReader(manifest)) if err != nil { - return nil, err + return nil, nil, err } // Validate the digest matches what we asked for, if pulling by digest. - if dgst, ok := r.ref.(name.Digest); ok { + if dgst, ok := f.Ref.(name.Digest); ok { if digest.String() != dgst.DigestStr() { - return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), r.ref) + return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) } } else { // Do nothing for tags; I give up. @@ -155,6 +158,42 @@ func (r *remoteImage) RawManifest() ([]byte, error) { // https://github.com/GoogleContainerTools/kaniko/issues/298 } + // Return all this info since we have to calculate it anyway. + desc := v1.Descriptor{ + Digest: digest, + Size: size, + MediaType: types.MediaType(resp.Header.Get("Content-Type")), + } + + return manifest, &desc, nil +} + +func (r *remoteImage) MediaType() (types.MediaType, error) { + if string(r.mediaType) != "" { + return r.mediaType, nil + } + return types.DockerManifestSchema2, nil +} + +// TODO(jonjohnsonjr): Handle manifest lists. +func (r *remoteImage) RawManifest() ([]byte, error) { + r.manifestLock.Lock() + defer r.manifestLock.Unlock() + if r.manifest != nil { + return r.manifest, nil + } + + // TODO(jonjohnsonjr): Accept manifest list and image index? + acceptable := []types.MediaType{ + types.DockerManifestSchema2, + types.OCIManifestSchema1, + } + manifest, desc, err := r.fetchManifest(acceptable) + if err != nil { + return nil, err + } + + r.mediaType = desc.MediaType r.manifest = manifest return r.manifest, nil } @@ -202,12 +241,12 @@ func (rl *remoteLayer) Digest() (v1.Hash, error) { // Compressed implements partial.CompressedLayer func (rl *remoteLayer) Compressed() (io.ReadCloser, error) { u := rl.ri.url("blobs", rl.digest.String()) - resp, err := rl.ri.client.Get(u.String()) + resp, err := rl.ri.Client.Get(u.String()) if err != nil { return nil, err } - if err := CheckError(resp, http.StatusOK); err != nil { + if err := transport.CheckError(resp, http.StatusOK); err != nil { resp.Body.Close() return nil, err } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go new file mode 100644 index 000000000..03afc481a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go @@ -0,0 +1,139 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "fmt" + "net/http" + "sync" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// remoteIndex accesses an index from a remote registry +type remoteIndex struct { + fetcher + manifestLock sync.Mutex // Protects manifest + manifest []byte + mediaType types.MediaType +} + +// Index provides access to a remote index reference, applying functional options +// to the underlying imageOpener before resolving the reference into a v1.ImageIndex. +func Index(ref name.Reference, options ...ImageOption) (v1.ImageIndex, error) { + i := &imageOpener{ + auth: authn.Anonymous, + transport: http.DefaultTransport, + ref: ref, + } + + for _, option := range options { + if err := option(i); err != nil { + return nil, err + } + } + tr, err := transport.New(i.ref.Context().Registry, i.auth, i.transport, []string{i.ref.Scope(transport.PullScope)}) + if err != nil { + return nil, err + } + return &remoteIndex{ + fetcher: fetcher{ + Ref: i.ref, + Client: &http.Client{Transport: tr}, + }, + }, nil +} + +func (r *remoteIndex) MediaType() (types.MediaType, error) { + if string(r.mediaType) != "" { + return r.mediaType, nil + } + return types.DockerManifestList, nil +} + +func (r *remoteIndex) Digest() (v1.Hash, error) { + return partial.Digest(r) +} + +func (r *remoteIndex) RawManifest() ([]byte, error) { + r.manifestLock.Lock() + defer r.manifestLock.Unlock() + if r.manifest != nil { + return r.manifest, nil + } + + acceptable := []types.MediaType{ + types.DockerManifestList, + types.OCIImageIndex, + } + manifest, desc, err := r.fetchManifest(acceptable) + if err != nil { + return nil, err + } + + r.mediaType = desc.MediaType + r.manifest = manifest + return r.manifest, nil +} + +func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) { + b, err := r.RawManifest() + if err != nil { + return nil, err + } + return v1.ParseIndexManifest(bytes.NewReader(b)) +} + +func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) { + imgRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), h), name.StrictValidation) + if err != nil { + return nil, err + } + ri := &remoteImage{ + fetcher: fetcher{ + Ref: imgRef, + Client: r.Client, + }, + } + imgCore, err := partial.CompressedToImage(ri) + if err != nil { + return imgCore, err + } + // Wrap the v1.Layers returned by this v1.Image in a hint for downstream + // remote.Write calls to facilitate cross-repo "mounting". + return &mountableImage{ + Image: imgCore, + Reference: r.Ref, + }, nil +} + +func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + idxRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), h), name.StrictValidation) + if err != nil { + return nil, err + } + return &remoteIndex{ + fetcher: fetcher{ + Ref: idxRef, + Client: r.Client, + }, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go index 17c00b5e7..1a36d0a4b 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go @@ -25,12 +25,12 @@ import ( "github.com/google/go-containerregistry/pkg/v1/remote/transport" ) -type Tags struct { +type tags struct { Name string `json:"name"` Tags []string `json:"tags"` } -// TODO(jonjohnsonjr): return []name.Tag? +// List calls /tags/list for the given repository. func List(repo name.Repository, auth authn.Authenticator, t http.RoundTripper) ([]string, error) { scopes := []string{repo.Scope(transport.PullScope)} tr, err := transport.New(repo.Registry, auth, t, scopes) @@ -51,14 +51,14 @@ func List(repo name.Repository, auth authn.Authenticator, t http.RoundTripper) ( } defer resp.Body.Close() - if err := CheckError(resp, http.StatusOK); err != nil { + if err := transport.CheckError(resp, http.StatusOK); err != nil { return nil, err } - tags := Tags{} - if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil { + parsed := tags{} + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { return nil, err } - return tags.Tags, nil + return parsed.Tags, nil } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go index 13b79064d..3afda2a34 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go @@ -16,7 +16,7 @@ package remote import ( "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" ) // MountableLayer wraps a v1.Layer in a shim that enables the layer to be diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go index 2bfdb6e24..f72ab276d 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go @@ -15,9 +15,8 @@ package transport import ( - "fmt" - "encoding/json" + "fmt" "io/ioutil" "net/http" "net/url" @@ -40,6 +39,8 @@ type bearerTransport struct { // See https://docs.docker.com/registry/spec/auth/token/ service string scopes []string + // Scheme we should use, determined by ping response. + scheme string } var _ http.RoundTripper = (*bearerTransport)(nil) @@ -61,6 +62,8 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { in.Header.Set("Authorization", hdr) } in.Header.Set("User-Agent", transportName) + + in.URL.Scheme = bt.scheme return bt.inner.RoundTrip(in) } @@ -103,6 +106,10 @@ func (bt *bearerTransport) refresh() error { } defer resp.Body.Close() + if err := CheckError(resp, http.StatusOK); err != nil { + return err + } + content, err := ioutil.ReadAll(resp.Body) if err != nil { return err diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go similarity index 90% rename from vendor/github.com/google/go-containerregistry/pkg/v1/remote/error.go rename to vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go index 076274821..44885effa 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/error.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package remote +package transport import ( "encoding/json" @@ -35,7 +35,7 @@ var _ error = (*Error)(nil) func (e *Error) Error() string { switch len(e.Errors) { case 0: - return "" + return "" case 1: return e.Errors[0].String() default: @@ -55,9 +55,13 @@ type Diagnostic struct { Detail interface{} `json:"detail,omitempty"` } -// String stringifies the Diagnostic +// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail] func (d Diagnostic) String() string { - return fmt.Sprintf("%s: %q", d.Code, d.Message) + msg := fmt.Sprintf("%s: %s", d.Code, d.Message) + if d.Detail != nil { + msg = fmt.Sprintf("%s; %v", msg, d.Detail) + } + return msg } // ErrorCode is an enumeration of supported error codes. @@ -83,6 +87,7 @@ const ( UnsupportedErrorCode ErrorCode = "UNSUPPORTED" ) +// CheckError returns a structured error if the response status is not in codes. func CheckError(resp *http.Response, codes ...int) error { for _, code := range codes { if resp.StatusCode == code { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go index 89133e326..cc0d2cfea 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go @@ -36,6 +36,9 @@ type pingResp struct { // Following the challenge there are often key/value pairs // e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz" parameters map[string]string + + // The registry's scheme to use. Communicates whether we fell back to http. + scheme string } func (c challenge) Canonical() challenge { @@ -63,31 +66,50 @@ func parseChallenge(suffix string) map[string]string { func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) { client := http.Client{Transport: t} - url := fmt.Sprintf("%s://%s/v2/", reg.Scheme(), reg.Name()) - resp, err := client.Get(url) - if err != nil { - return nil, err + // This first attempts to use "https" for every request, falling back to http + // if the registry matches our localhost heuristic or if it is intentionally + // set to insecure via name.NewInsecureRegistry. + schemes := []string{"https"} + if reg.Scheme() == "http" { + schemes = append(schemes, "http") } - defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusOK: - // If we get a 200, then no authentication is needed. - return &pingResp{challenge: anonymous}, nil - case http.StatusUnauthorized: - wac := resp.Header.Get(http.CanonicalHeaderKey("WWW-Authenticate")) - if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 { - // If there are two parts, then parse the challenge parameters. - return &pingResp{ - challenge: challenge(parts[0]).Canonical(), - parameters: parseChallenge(parts[1]), - }, nil + var connErr error + for _, scheme := range schemes { + url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name()) + resp, err := client.Get(url) + if err != nil { + connErr = err + // Potentially retry with http. + continue + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // If we get a 200, then no authentication is needed. + return &pingResp{ + challenge: anonymous, + scheme: scheme, + }, nil + case http.StatusUnauthorized: + wac := resp.Header.Get(http.CanonicalHeaderKey("WWW-Authenticate")) + if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 { + // If there are two parts, then parse the challenge parameters. + return &pingResp{ + challenge: challenge(parts[0]).Canonical(), + parameters: parseChallenge(parts[1]), + scheme: scheme, + }, nil + } + // Otherwise, just return the challenge without parameters. + return &pingResp{ + challenge: challenge(wac).Canonical(), + scheme: scheme, + }, nil + default: + return nil, fmt.Errorf("unrecognized HTTP status: %v", resp.Status) } - // Otherwise, just return the challenge without parameters. - return &pingResp{ - challenge: challenge(wac).Canonical(), - }, nil - default: - return nil, fmt.Errorf("unrecognized HTTP status: %v", resp.Status) } + return nil, connErr } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go index 6140ab2ce..18c8e66c7 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go @@ -73,6 +73,7 @@ func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scope registry: reg, service: service, scopes: scopes, + scheme: pr.scheme, } if err := bt.refresh(); err != nil { return nil, err diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go index 1fd633c0d..6e2c38a34 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -18,16 +18,27 @@ import ( "bytes" "errors" "fmt" + "io" "log" "net/http" "net/url" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" + "golang.org/x/sync/errgroup" ) +type manifest interface { + RawManifest() ([]byte, error) + MediaType() (types.MediaType, error) + Digest() (v1.Hash, error) +} + // Write pushes the provided img to the specified image reference. func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.RoundTripper) error { ls, err := img.Layers() @@ -41,48 +52,74 @@ func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.Ro return err } w := writer{ - ref: ref, - client: &http.Client{Transport: tr}, - img: img, + ref: ref, + client: &http.Client{Transport: tr}, } - bs, err := img.BlobSet() - if err != nil { - return err - } - - // Spin up go routines to publish each of the members of BlobSet(), - // and use an error channel to collect their results. - errCh := make(chan error) - defer close(errCh) - for h := range bs { - go func(h v1.Hash) { - errCh <- w.uploadOne(h) - }(h) - } - - // Now wait for all of the blob uploads to complete. - var errors []error - for _ = range bs { - if err := <-errCh; err != nil { - errors = append(errors, err) + // Upload individual layers in goroutines and collect any errors. + // If we can dedupe by the layer digest, try to do so. If the layer is + // a stream.Layer, we can't dedupe and might re-upload. + var g errgroup.Group + uploaded := map[v1.Hash]bool{} + for _, l := range ls { + l := l + if _, ok := l.(*stream.Layer); !ok { + h, err := l.Digest() + if err != nil { + return err + } + // If we can determine the layer's digest ahead of + // time, use it to dedupe uploads. + if uploaded[h] { + continue // Already uploading. + } + uploaded[h] = true } + + g.Go(func() error { + return w.uploadOne(l) + }) } - if len(errors) > 0 { - // Return the first error we encountered. - return errors[0] + + if l, err := partial.ConfigLayer(img); err == stream.ErrNotComputed { + // We can't read the ConfigLayer, because of streaming layers, since the + // config hasn't been calculated yet. + if err := g.Wait(); err != nil { + return err + } + + // Now that all the layers are uploaded, upload the config file blob. + l, err := partial.ConfigLayer(img) + if err != nil { + return err + } + if err := w.uploadOne(l); err != nil { + return err + } + } else if err != nil { + // This is an actual error, not a streaming error, just return it. + return err + } else { + // We *can* read the ConfigLayer, so upload it concurrently with the layers. + g.Go(func() error { + return w.uploadOne(l) + }) + + // Wait for the layers + config. + if err := g.Wait(); err != nil { + return err + } } // With all of the constituent elements uploaded, upload the manifest // to commit the image. - return w.commitImage() + return w.commitImage(img) } // writer writes the elements of an image to a remote image reference. type writer struct { - ref name.Reference - client *http.Client - img v1.Image + ref name.Reference + client *http.Client } // url returns a url.Url for the specified path in the context of this remote image reference. @@ -110,11 +147,11 @@ func (w *writer) nextLocation(resp *http.Response) (string, error) { return resp.Request.URL.ResolveReference(u).String(), nil } -// checkExisting checks if a blob exists already in the repository by making a +// checkExistingBlob checks if a blob exists already in the repository by making a // HEAD request to the blob store API. GCR performs an existence check on the // initiation if "mount" is specified, even if no "from" sources are specified. // However, this is not broadly applicable to all registries, e.g. ECR. -func (w *writer) checkExisting(h v1.Hash) (bool, error) { +func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) { u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.ref.Context().RepositoryStr(), h.String())) resp, err := w.client.Head(u.String()) @@ -123,7 +160,31 @@ func (w *writer) checkExisting(h v1.Hash) (bool, error) { } defer resp.Body.Close() - if err := CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + return false, err + } + + return resp.StatusCode == http.StatusOK, nil +} + +// checkExistingManifest checks if a manifest exists already in the repository +// by making a HEAD request to the manifest API. +func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, error) { + u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.ref.Context().RepositoryStr(), h.String())) + + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return false, err + } + req.Header.Set("Accept", string(mt)) + + resp, err := w.client.Do(req) + if err != nil { + return false, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { return false, err } @@ -136,20 +197,14 @@ func (w *writer) checkExisting(h v1.Hash) (bool, error) { // On success, the layer was either mounted (nothing more to do) or a blob // upload was initiated and the body of that blob should be sent to the returned // location. -func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err error) { +func (w *writer) initiateUpload(from, mount string) (location string, mounted bool, err error) { u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.ref.Context().RepositoryStr())) - uv := url.Values{ - "mount": []string{h.String()}, + uv := url.Values{} + if mount != "" { + uv["mount"] = []string{mount} } - l, err := w.img.LayerByDigest(h) - if err != nil { - return "", false, err - } - - if ml, ok := l.(*MountableLayer); ok { - if w.ref.Context().RegistryStr() == ml.Reference.Context().RegistryStr() { - uv["from"] = []string{ml.Reference.Context().RepositoryStr()} - } + if from != "" { + uv["from"] = []string{from} } u.RawQuery = uv.Encode() @@ -160,7 +215,7 @@ func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err e } defer resp.Body.Close() - if err := CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { + if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { return "", false, err } @@ -181,15 +236,7 @@ func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err e // streamBlob streams the contents of the blob to the specified location. // On failure, this will return an error. On success, this will return the location // header indicating how to commit the streamed blob. -func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation string, err error) { - l, err := w.img.LayerByDigest(h) - if err != nil { - return "", err - } - blob, err := l.Compressed() - if err != nil { - return "", err - } +func (w *writer) streamBlob(blob io.ReadCloser, streamLocation string) (commitLocation string, err error) { defer blob.Close() req, err := http.NewRequest(http.MethodPatch, streamLocation, blob) @@ -203,7 +250,7 @@ func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation st } defer resp.Body.Close() - if err := CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { + if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { return "", err } @@ -212,14 +259,15 @@ func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation st return w.nextLocation(resp) } -// commitBlob commits this blob by sending a PUT to the location returned from streaming the blob. -func (w *writer) commitBlob(h v1.Hash, location string) (err error) { +// commitBlob commits this blob by sending a PUT to the location returned from +// streaming the blob. +func (w *writer) commitBlob(location, digest string) error { u, err := url.Parse(location) if err != nil { return err } v := u.Query() - v.Set("digest", h.String()) + v.Set("digest", digest) u.RawQuery = v.Encode() req, err := http.NewRequest(http.MethodPut, u.String(), nil) @@ -233,47 +281,82 @@ func (w *writer) commitBlob(h v1.Hash, location string) (err error) { } defer resp.Body.Close() - return CheckError(resp, http.StatusCreated) + return transport.CheckError(resp, http.StatusCreated) } // uploadOne performs a complete upload of a single layer. -func (w *writer) uploadOne(h v1.Hash) error { - existing, err := w.checkExisting(h) - if err != nil { - return err +func (w *writer) uploadOne(l v1.Layer) error { + var from, mount, digest string + if _, ok := l.(*stream.Layer); !ok { + // Layer isn't streamable, we should take advantage of that to + // skip uploading if possible. + // By sending ?digest= in the request, we'll also check that + // our computed digest matches the one computed by the + // registry. + h, err := l.Digest() + if err != nil { + return err + } + digest = h.String() + + existing, err := w.checkExistingBlob(h) + if err != nil { + return err + } + if existing { + log.Printf("existing blob: %v", h) + return nil + } + + mount = h.String() } - if existing { - log.Printf("existing blob: %v", h) - return nil + if ml, ok := l.(*MountableLayer); ok { + if w.ref.Context().RegistryStr() == ml.Reference.Context().RegistryStr() { + from = ml.Reference.Context().RepositoryStr() + } } - location, mounted, err := w.initiateUpload(h) + location, mounted, err := w.initiateUpload(from, mount) if err != nil { return err } else if mounted { - log.Printf("mounted blob: %v", h) + h, err := l.Digest() + if err != nil { + return err + } + log.Printf("mounted blob: %s", h.String()) return nil } - location, err = w.streamBlob(h, location) + blob, err := l.Compressed() + if err != nil { + return err + } + location, err = w.streamBlob(blob, location) if err != nil { return err } - if err := w.commitBlob(h, location); err != nil { + h, err := l.Digest() + if err != nil { return err } - log.Printf("pushed blob %v", h) + digest = h.String() + + if err := w.commitBlob(location, digest); err != nil { + return err + } + log.Printf("pushed blob: %s", digest) return nil } // commitImage does a PUT of the image's manifest. -func (w *writer) commitImage() error { - raw, err := w.img.RawManifest() +func (w *writer) commitImage(man manifest) error { + raw, err := man.RawManifest() if err != nil { return err } - mt, err := w.img.MediaType() + mt, err := man.MediaType() if err != nil { return err } @@ -293,11 +376,11 @@ func (w *writer) commitImage() error { } defer resp.Body.Close() - if err := CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { + if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { return err } - digest, err := w.img.Digest() + digest, err := man.Digest() if err != nil { return err } @@ -324,11 +407,68 @@ func scopesForUploadingImage(ref name.Reference, layers []v1.Layer) []string { // Push scope should be the first element because a few registries just look at the first scope to determine access. scopes = append(scopes, ref.Scope(transport.PushScope)) - for scope, _ := range scopeSet { + for scope := range scopeSet { scopes = append(scopes, scope) } return scopes } -// TODO(mattmoor): WriteIndex +// WriteIndex pushes the provided ImageIndex to the specified image reference. +// WriteIndex will attempt to push all of the referenced manifests before +// attempting to push the ImageIndex, to retain referential integrity. +func WriteIndex(ref name.Reference, ii v1.ImageIndex, auth authn.Authenticator, t http.RoundTripper) error { + index, err := ii.IndexManifest() + if err != nil { + return err + } + + scopes := []string{ref.Scope(transport.PushScope)} + tr, err := transport.New(ref.Context().Registry, auth, t, scopes) + if err != nil { + return err + } + w := writer{ + ref: ref, + client: &http.Client{Transport: tr}, + } + + for _, desc := range index.Manifests { + ref, err := name.ParseReference(fmt.Sprintf("%s@%s", ref.Context(), desc.Digest), name.StrictValidation) + if err != nil { + return err + } + exists, err := w.checkExistingManifest(desc.Digest, desc.MediaType) + if err != nil { + return err + } + if exists { + log.Printf("existing manifest: %v", desc.Digest) + continue + } + + switch desc.MediaType { + case types.OCIImageIndex, types.DockerManifestList: + ii, err := ii.ImageIndex(desc.Digest) + if err != nil { + return err + } + + if err := WriteIndex(ref, ii, auth, t); err != nil { + return err + } + case types.OCIManifestSchema1, types.DockerManifestSchema2: + img, err := ii.Image(desc.Digest) + if err != nil { + return err + } + if err := Write(ref, img, auth, t); err != nil { + return err + } + } + } + + // With all of the constituent elements uploaded, upload the manifest + // to commit the image. + return w.commitImage(ii) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go new file mode 100644 index 000000000..f8895a226 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go @@ -0,0 +1,194 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stream + +import ( + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "errors" + "hash" + "io" + "sync" + + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +var ( + // ErrNotComputed is returned when the requested value is not yet + // computed because the stream has not been consumed yet. + ErrNotComputed = errors.New("value not computed until stream is consumed") + + // ErrConsumed is returned by Compressed when the underlying stream has + // already been consumed and closed. + ErrConsumed = errors.New("stream was already consumed") +) + +// Layer is a streaming implementation of v1.Layer. +type Layer struct { + blob io.ReadCloser + consumed bool + + mu sync.Mutex + digest, diffID *v1.Hash + size int64 +} + +var _ v1.Layer = (*Layer)(nil) + +// NewLayer creates a Layer from an io.ReadCloser. +func NewLayer(rc io.ReadCloser) *Layer { return &Layer{blob: rc} } + +// Digest implements v1.Layer. +func (l *Layer) Digest() (v1.Hash, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.digest == nil { + return v1.Hash{}, ErrNotComputed + } + return *l.digest, nil +} + +// DiffID implements v1.Layer. +func (l *Layer) DiffID() (v1.Hash, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.diffID == nil { + return v1.Hash{}, ErrNotComputed + } + return *l.diffID, nil +} + +// Size implements v1.Layer. +func (l *Layer) Size() (int64, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.size == 0 { + return 0, ErrNotComputed + } + return l.size, nil +} + +// Uncompressed implements v1.Layer. +func (l *Layer) Uncompressed() (io.ReadCloser, error) { + return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented") +} + +// Compressed implements v1.Layer. +func (l *Layer) Compressed() (io.ReadCloser, error) { + if l.consumed { + return nil, ErrConsumed + } + return newCompressedReader(l) +} + +type compressedReader struct { + closer io.Closer // original blob's Closer. + + h, zh hash.Hash // collects digests of compressed and uncompressed stream. + pr io.Reader + count *countWriter + + l *Layer // stream.Layer to update upon Close. +} + +func newCompressedReader(l *Layer) (*compressedReader, error) { + h := sha256.New() + zh := sha256.New() + count := &countWriter{} + + // gzip.Writer writes to the output stream via pipe, a hasher to + // capture compressed digest, and a countWriter to capture compressed + // size. + pr, pw := io.Pipe() + zw, err := gzip.NewWriterLevel(io.MultiWriter(pw, zh, count), gzip.BestSpeed) + if err != nil { + return nil, err + } + + cr := &compressedReader{ + closer: newMultiCloser(zw, l.blob), + pr: pr, + h: h, + zh: zh, + count: count, + l: l, + } + go func() { + if _, err := io.Copy(io.MultiWriter(h, zw), l.blob); err != nil { + pw.CloseWithError(err) + return + } + // Now close the compressed reader, to flush the gzip stream + // and calculate digest/diffID/size. This will cause pr to + // return EOF which will cause readers of the Compressed stream + // to finish reading. + pw.CloseWithError(cr.Close()) + }() + + return cr, nil +} + +func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) } + +func (cr *compressedReader) Close() error { + cr.l.mu.Lock() + defer cr.l.mu.Unlock() + + // Close the inner ReadCloser. + if err := cr.closer.Close(); err != nil { + return err + } + + diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.h.Sum(nil))) + if err != nil { + return err + } + cr.l.diffID = &diffID + + digest, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.zh.Sum(nil))) + if err != nil { + return err + } + cr.l.digest = &digest + + cr.l.size = cr.count.n + cr.l.consumed = true + return nil +} + +// countWriter counts bytes written to it. +type countWriter struct{ n int64 } + +func (c *countWriter) Write(p []byte) (int, error) { + c.n += int64(len(p)) + return len(p), nil +} + +// multiCloser is a Closer that collects multiple Closers and Closes them in order. +type multiCloser []io.Closer + +var _ io.Closer = (multiCloser)(nil) + +func newMultiCloser(c ...io.Closer) multiCloser { return multiCloser(c) } + +func (m multiCloser) Close() error { + for _, c := range m { + if err := c.Close(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go index 2a62327ce..ced18735c 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go @@ -26,7 +26,7 @@ import ( "sync" "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/v1util" @@ -54,6 +54,7 @@ type compressedImage struct { var _ partial.UncompressedImageCore = (*uncompressedImage)(nil) var _ partial.CompressedImageCore = (*compressedImage)(nil) +// Opener is a thunk for opening a tar file. type Opener func() (io.ReadCloser, error) func pathOpener(path string) Opener { @@ -62,6 +63,7 @@ func pathOpener(path string) Opener { } } +// ImageFromPath returns a v1.Image from a tarball located on path. func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) { return Image(pathOpener(path), tag) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go index 6d43ff7d4..00256e8f2 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go @@ -20,7 +20,7 @@ import ( "io/ioutil" "os" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/v1util" ) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go index b0d45061e..44dbe15aa 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go @@ -23,7 +23,7 @@ import ( "os" "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" ) // WriteToFile writes in the compressed format to a tarball, on disk. diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/nop.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/nop.go deleted file mode 100644 index 8ff288d97..000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/nop.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1util - -import ( - "io" -) - -func nop() error { - return nil -} - -// NopWriteCloser wraps the io.Writer as an io.WriteCloser with a Close() method that does nothing. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &writeAndCloser{ - Writer: w, - CloseFunc: nop, - } -} - -// NopReadCloser wraps the io.Reader as an io.ReadCloser with a Close() method that does nothing. -// This is technically redundant with ioutil.NopCloser, but provided for symmetry and clarity. -func NopReadCloser(r io.Reader) io.ReadCloser { - return &readAndCloser{ - Reader: r, - CloseFunc: nop, - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go index 7ebb9dde9..c9699770c 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go @@ -20,7 +20,7 @@ import ( "hash" "io" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" ) type verifyReader struct { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go index f12d0ed88..2b0f24f6a 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go @@ -70,56 +70,14 @@ func GunzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { }, nil } -// GzipWriteCloser returns an io.WriteCloser to which uncompressed data may be -// written, and the compressed data is then written to the provided -// io.WriteCloser. -func GzipWriteCloser(w io.WriteCloser) io.WriteCloser { - gw := gzip.NewWriter(w) - return &writeAndCloser{ - Writer: gw, - CloseFunc: func() error { - if err := gw.Close(); err != nil { - return err - } - return w.Close() - }, - } -} - -// gunzipWriteCloser implements io.WriteCloser -// It is used to implement GunzipWriteClose. -type gunzipWriteCloser struct { - *bytes.Buffer - writer io.WriteCloser -} - -// Close implements io.WriteCloser -func (gwc *gunzipWriteCloser) Close() error { - // TODO(mattmoor): How to avoid buffering this whole thing into memory? - gr, err := gzip.NewReader(gwc.Buffer) - if err != nil { - return err - } - if _, err := io.Copy(gwc.writer, gr); err != nil { - return err - } - return gwc.writer.Close() -} - -// GunzipWriteCloser returns an io.WriteCloser to which compressed data may be -// written, and the uncompressed data is then written to the provided -// io.WriteCloser. -func GunzipWriteCloser(w io.WriteCloser) (io.WriteCloser, error) { - return &gunzipWriteCloser{ - Buffer: bytes.NewBuffer(nil), - writer: w, - }, nil -} - // IsGzipped detects whether the input stream is compressed. func IsGzipped(r io.Reader) (bool, error) { magicHeader := make([]byte, 2) - if _, err := r.Read(magicHeader); err != nil { + n, err := r.Read(magicHeader) + if n == 0 && err == io.EOF { + return false, nil + } + if err != nil { return false, err } return bytes.Equal(magicHeader, gzipMagicHeader), nil diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index 2d05ac622..ed09f66f4 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "crypto/tls" + "errors" "fmt" "io" "net" @@ -178,21 +179,10 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { return &roundTripper{a, rt} } - getCert := c.TLS.GetCert - c.TLS.GetCert = func() (*tls.Certificate, error) { - // If previous GetCert is present and returns a valid non-nil - // certificate, use that. Otherwise use cert from exec plugin. - if getCert != nil { - cert, err := getCert() - if err != nil { - return nil, err - } - if cert != nil { - return cert, nil - } - } - return a.cert() + if c.TLS.GetCert != nil { + return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set") } + c.TLS.GetCert = a.cert var dial func(ctx context.Context, network, addr string) (net.Conn, error) if c.Dial != nil { diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 459a93760..316a5c0d0 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -129,7 +129,7 @@ func SetAuthProxyHeaders(req *http.Request, username string, groups []string, ex } for key, values := range extra { for _, value := range values { - req.Header.Add("X-Remote-Extra-"+key, value) + req.Header.Add("X-Remote-Extra-"+headerKeyEscape(key), value) } } } @@ -246,7 +246,7 @@ func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Respons } for k, vv := range rt.impersonate.Extra { for _, v := range vv { - req.Header.Add(ImpersonateUserExtraHeaderPrefix+k, v) + req.Header.Add(ImpersonateUserExtraHeaderPrefix+headerKeyEscape(k), v) } } @@ -422,3 +422,110 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegatedRoundTripper } + +func legalHeaderByte(b byte) bool { + return int(b) < len(legalHeaderKeyBytes) && legalHeaderKeyBytes[b] +} + +func shouldEscape(b byte) bool { + // url.PathUnescape() returns an error if any '%' is not followed by two + // hexadecimal digits, so we'll intentionally encode it. + return !legalHeaderByte(b) || b == '%' +} + +func headerKeyEscape(key string) string { + buf := strings.Builder{} + for i := 0; i < len(key); i++ { + b := key[i] + if shouldEscape(b) { + // %-encode bytes that should be escaped: + // https://tools.ietf.org/html/rfc3986#section-2.1 + fmt.Fprintf(&buf, "%%%02X", b) + continue + } + buf.WriteByte(b) + } + return buf.String() +} + +// legalHeaderKeyBytes was copied from net/http/lex.go's isTokenTable. +// See https://httpwg.github.io/specs/rfc7230.html#rule.token.separators +var legalHeaderKeyBytes = [127]bool{ + '%': true, + '!': true, + '#': true, + '$': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} From 9912ccbf8d22bbafbf971124600fbb0b13b9cbd6 Mon Sep 17 00:00:00 2001 From: dlorenc Date: Thu, 7 Mar 2019 07:05:24 -0800 Subject: [PATCH 07/22] Fix USER handling. There were two issues: (#600) - We were validating usernames/groupnames existed in etc/passwd. Docker does not do this - We were incorrectly caching USER commands. This was fixed automatically by fixing the first part. --- integration/dockerfiles/Dockerfile_test_user | 17 +++++++++++++++++ pkg/commands/user.go | 9 --------- pkg/commands/user_test.go | 19 ++----------------- 3 files changed, 19 insertions(+), 26 deletions(-) create mode 100644 integration/dockerfiles/Dockerfile_test_user diff --git a/integration/dockerfiles/Dockerfile_test_user b/integration/dockerfiles/Dockerfile_test_user new file mode 100644 index 000000000..51896647f --- /dev/null +++ b/integration/dockerfiles/Dockerfile_test_user @@ -0,0 +1,17 @@ +# Copyright 2018 Google, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0 +USER testuser:testgroup + diff --git a/pkg/commands/user.go b/pkg/commands/user.go index c024f38f0..a8cfb2774 100644 --- a/pkg/commands/user.go +++ b/pkg/commands/user.go @@ -31,10 +31,6 @@ type UserCommand struct { cmd *instructions.UserCommand } -func (r *UserCommand) RequiresUnpackedFS() bool { - return true -} - func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { logrus.Info("cmd: USER") u := r.cmd.User @@ -52,11 +48,6 @@ func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu } } - _, _, err = util.GetUserFromUsername(userStr, groupStr) - if err != nil { - return err - } - if groupStr != "" { userStr = userStr + ":" + groupStr } diff --git a/pkg/commands/user_test.go b/pkg/commands/user_test.go index 6770d127b..343a7bed4 100644 --- a/pkg/commands/user_test.go +++ b/pkg/commands/user_test.go @@ -28,57 +28,42 @@ import ( var userTests = []struct { user string expectedUID string - shouldError bool }{ { user: "root", expectedUID: "root", - shouldError: false, }, { user: "0", expectedUID: "0", - shouldError: false, }, { user: "fakeUser", - expectedUID: "", - shouldError: true, + expectedUID: "fakeUser", }, { user: "root:root", expectedUID: "root:root", - shouldError: false, }, { user: "0:root", expectedUID: "0:root", - shouldError: false, }, { user: "root:0", expectedUID: "root:0", - shouldError: false, }, { user: "0:0", expectedUID: "0:0", - shouldError: false, - }, - { - user: "root:fakeGroup", - expectedUID: "", - shouldError: true, }, { user: "$envuser", expectedUID: "root", - shouldError: false, }, { user: "root:$envgroup", expectedUID: "root:root", - shouldError: false, }, } @@ -97,6 +82,6 @@ func TestUpdateUser(t *testing.T) { } buildArgs := dockerfile.NewBuildArgs([]string{}) err := cmd.ExecuteCommand(cfg, buildArgs) - testutil.CheckErrorAndDeepEqual(t, test.shouldError, err, test.expectedUID, cfg.User) + testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedUID, cfg.User) } } From 6ce3dfb93a5d2998fa7768ebd9ba61334b5b7230 Mon Sep 17 00:00:00 2001 From: Vincent Demeester Date: Mon, 11 Mar 2019 21:44:32 +0100 Subject: [PATCH 08/22] Bump go-containerregistry to 8c1640add99804503b4126abc718931a4d93c31a (#609) The main reason is to include the fixes from https://github.com/google/go-containerregistry/pull/401. This should fix the build+push to quay.io (with v2 schema enabled) cases. Signed-off-by: Vincent Demeester --- Gopkg.lock | 4 +- Gopkg.toml | 2 +- .../pkg/v1/empty/index.go | 2 +- .../pkg/v1/remote/image.go | 54 ++++++++++++++++++- .../pkg/v1/remote/options.go | 8 +++ .../pkg/v1/remote/write.go | 5 +- 6 files changed, 67 insertions(+), 8 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 7d5486b54..c15fd937e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -430,7 +430,7 @@ version = "v0.2.0" [[projects]] - digest = "1:a4f41b57b6a09cf498024fd9d2872b99c32bfc1462a8f34ac625e88531d52930" + digest = "1:3edac9d0a5f7e0e636f85bd7d3105df6180af528ab7e6a88f00b1ae6fc0bf947" name = "github.com/google/go-containerregistry" packages = [ "pkg/authn", @@ -450,7 +450,7 @@ "pkg/v1/v1util", ] pruneopts = "NUT" - revision = "678f6c51f585140f8d0c07f6f7e193f7a4c8e457" + revision = "8c1640add99804503b4126abc718931a4d93c31a" [[projects]] digest = "1:f4f203acd8b11b8747bdcd91696a01dbc95ccb9e2ca2db6abf81c3a4f5e950ce" diff --git a/Gopkg.toml b/Gopkg.toml index 2ab371521..e39d19a01 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -37,7 +37,7 @@ required = [ [[constraint]] name = "github.com/google/go-containerregistry" - revision = "678f6c51f585140f8d0c07f6f7e193f7a4c8e457" + revision = "8c1640add99804503b4126abc718931a4d93c31a" [[override]] name = "k8s.io/apimachinery" diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go index 83dc58ab7..a03d758fd 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go @@ -18,7 +18,7 @@ import ( "encoding/json" "errors" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/types" ) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go index 37e25ad94..1be0ad2ea 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go @@ -33,6 +33,11 @@ import ( "github.com/google/go-containerregistry/pkg/v1/v1util" ) +var defaultPlatform = v1.Platform{ + Architecture: "amd64", + OS: "linux", +} + // remoteImage accesses an image from a remote registry type remoteImage struct { fetcher @@ -41,6 +46,7 @@ type remoteImage struct { configLock sync.Mutex // Protects config config []byte mediaType types.MediaType + platform v1.Platform } // ImageOption is a functional option for Image. @@ -53,6 +59,7 @@ type imageOpener struct { transport http.RoundTripper ref name.Reference client *http.Client + platform v1.Platform } func (i *imageOpener) Open() (v1.Image, error) { @@ -65,6 +72,7 @@ func (i *imageOpener) Open() (v1.Image, error) { Ref: i.ref, Client: &http.Client{Transport: tr}, }, + platform: i.platform, } imgCore, err := partial.CompressedToImage(ri) if err != nil { @@ -85,6 +93,7 @@ func Image(ref name.Reference, options ...ImageOption) (v1.Image, error) { auth: authn.Anonymous, transport: http.DefaultTransport, ref: ref, + platform: defaultPlatform, } for _, option := range options { @@ -183,16 +192,26 @@ func (r *remoteImage) RawManifest() ([]byte, error) { return r.manifest, nil } - // TODO(jonjohnsonjr): Accept manifest list and image index? acceptable := []types.MediaType{ types.DockerManifestSchema2, types.OCIManifestSchema1, + // We'll resolve these to an image based on the platform. + types.DockerManifestList, + types.OCIImageIndex, } manifest, desc, err := r.fetchManifest(acceptable) if err != nil { return nil, err } + // We want an image but the registry has an index, resolve it to an image. + for desc.MediaType == types.DockerManifestList || desc.MediaType == types.OCIImageIndex { + manifest, desc, err = r.matchImage(manifest) + if err != nil { + return nil, err + } + } + r.mediaType = desc.MediaType r.manifest = manifest return r.manifest, nil @@ -283,3 +302,36 @@ func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) digest: h, }, nil } + +// This naively matches the first manifest with matching Architecture and OS. +// +// We should probably use this instead: +// github.com/containerd/containerd/platforms +// +// But first we'd need to migrate to: +// github.com/opencontainers/image-spec/specs-go/v1 +func (r *remoteImage) matchImage(rawIndex []byte) ([]byte, *v1.Descriptor, error) { + index, err := v1.ParseIndexManifest(bytes.NewReader(rawIndex)) + if err != nil { + return nil, nil, err + } + for _, childDesc := range index.Manifests { + // If platform is missing from child descriptor, assume it's amd64/linux. + p := defaultPlatform + if childDesc.Platform != nil { + p = *childDesc.Platform + } + if r.platform.Architecture == p.Architecture && r.platform.OS == p.OS { + childRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), childDesc.Digest), name.StrictValidation) + if err != nil { + return nil, nil, err + } + r.fetcher = fetcher{ + Client: r.Client, + Ref: childRef, + } + return r.fetchManifest([]types.MediaType{childDesc.MediaType}) + } + } + return nil, nil, fmt.Errorf("no matching image for %s/%s, index: %s", r.platform.Architecture, r.platform.OS, string(rawIndex)) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go index a6e9584ee..335e3fe5b 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go @@ -19,6 +19,7 @@ import ( "net/http" "github.com/google/go-containerregistry/pkg/authn" + v1 "github.com/google/go-containerregistry/pkg/v1" ) // WithTransport is a functional option for overriding the default transport @@ -54,3 +55,10 @@ func WithAuthFromKeychain(keys authn.Keychain) ImageOption { return nil } } + +func WithPlatform(p v1.Platform) ImageOption { + return func(i *imageOpener) error { + i.platform = p + return nil + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go index 6e2c38a34..66f148155 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -200,10 +200,9 @@ func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, err func (w *writer) initiateUpload(from, mount string) (location string, mounted bool, err error) { u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.ref.Context().RepositoryStr())) uv := url.Values{} - if mount != "" { + if mount != "" && from != "" { + // Quay will fail if we specify a "mount" without a "from". uv["mount"] = []string{mount} - } - if from != "" { uv["from"] = []string{from} } u.RawQuery = uv.Encode() From 246cc92a33c29d76b811f7da605375d8bc4c2198 Mon Sep 17 00:00:00 2001 From: dlorenc Date: Wed, 13 Mar 2019 07:47:28 -0700 Subject: [PATCH 09/22] Optimize file copying and stage saving between stages. (#605) This change calculates the exact files and directories needed between stages used in the COPY command. Instead of saving the entire stage as a tarball, we now save only the necessary files. --- Gopkg.lock | 9 + pkg/commands/add.go | 4 +- pkg/commands/copy.go | 16 +- pkg/config/stage.go | 1 + pkg/dockerfile/dockerfile.go | 12 +- pkg/dockerfile/dockerfile_test.go | 2 +- pkg/executor/build.go | 104 ++++++++- pkg/executor/build_test.go | 209 +++++++++++++++++- pkg/executor/foo | 0 pkg/util/command_util.go | 45 ++-- pkg/util/command_util_test.go | 1 - vendor/github.com/otiai10/copy/LICENSE | 21 ++ vendor/github.com/otiai10/copy/copy.go | 93 ++++++++ .../otiai10/copy/testdata/case03/case01 | 1 + 14 files changed, 469 insertions(+), 49 deletions(-) create mode 100644 pkg/executor/foo create mode 100644 vendor/github.com/otiai10/copy/LICENSE create mode 100644 vendor/github.com/otiai10/copy/copy.go create mode 120000 vendor/github.com/otiai10/copy/testdata/case03/case01 diff --git a/Gopkg.lock b/Gopkg.lock index c15fd937e..e1a1c09c1 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -680,6 +680,14 @@ revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" version = "v1.0.2" +[[projects]] + branch = "master" + digest = "1:15057fc7395024283a7d2639b8afc61c5b6df3fe260ce06ff5834c8464f16b5c" + name = "github.com/otiai10/copy" + packages = ["."] + pruneopts = "NUT" + revision = "7e9a647135a142c2669943d4a4d29be015ce9392" + [[projects]] branch = "master" digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" @@ -1204,6 +1212,7 @@ "github.com/moby/buildkit/frontend/dockerfile/instructions", "github.com/moby/buildkit/frontend/dockerfile/parser", "github.com/moby/buildkit/frontend/dockerfile/shell", + "github.com/otiai10/copy", "github.com/pkg/errors", "github.com/sirupsen/logrus", "github.com/spf13/cobra", diff --git a/pkg/commands/add.go b/pkg/commands/add.go index b66b56db2..72f97653c 100644 --- a/pkg/commands/add.go +++ b/pkg/commands/add.go @@ -47,7 +47,7 @@ type AddCommand struct { func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { replacementEnvs := buildArgs.ReplacementEnvs(config.Env) - srcs, dest, err := resolveEnvAndWildcards(a.cmd.SourcesAndDest, a.buildcontext, replacementEnvs) + srcs, dest, err := util.ResolveEnvAndWildcards(a.cmd.SourcesAndDest, a.buildcontext, replacementEnvs) if err != nil { return err } @@ -114,7 +114,7 @@ func (a *AddCommand) String() string { func (a *AddCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerfile.BuildArgs) ([]string, error) { replacementEnvs := buildArgs.ReplacementEnvs(config.Env) - srcs, _, err := resolveEnvAndWildcards(a.cmd.SourcesAndDest, a.buildcontext, replacementEnvs) + srcs, _, err := util.ResolveEnvAndWildcards(a.cmd.SourcesAndDest, a.buildcontext, replacementEnvs) if err != nil { return nil, err } diff --git a/pkg/commands/copy.go b/pkg/commands/copy.go index 46e7f08aa..9af5ce3fd 100644 --- a/pkg/commands/copy.go +++ b/pkg/commands/copy.go @@ -45,7 +45,7 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu replacementEnvs := buildArgs.ReplacementEnvs(config.Env) - srcs, dest, err := resolveEnvAndWildcards(c.cmd.SourcesAndDest, c.buildcontext, replacementEnvs) + srcs, dest, err := util.ResolveEnvAndWildcards(c.cmd.SourcesAndDest, c.buildcontext, replacementEnvs) if err != nil { return err } @@ -100,18 +100,6 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu return nil } -func resolveEnvAndWildcards(sd instructions.SourcesAndDest, buildcontext string, envs []string) ([]string, string, error) { - // First, resolve any environment replacement - resolvedEnvs, err := util.ResolveEnvironmentReplacementList(sd, envs, true) - if err != nil { - return nil, "", err - } - dest := resolvedEnvs[len(resolvedEnvs)-1] - // Resolve wildcards and get a list of resolved sources - srcs, err := util.ResolveSources(resolvedEnvs, buildcontext) - return srcs, dest, err -} - // FilesToSnapshot should return an empty array if still nil; no files were changed func (c *CopyCommand) FilesToSnapshot() []string { return c.snapshotFiles @@ -129,7 +117,7 @@ func (c *CopyCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerf } replacementEnvs := buildArgs.ReplacementEnvs(config.Env) - srcs, _, err := resolveEnvAndWildcards(c.cmd.SourcesAndDest, c.buildcontext, replacementEnvs) + srcs, _, err := util.ResolveEnvAndWildcards(c.cmd.SourcesAndDest, c.buildcontext, replacementEnvs) if err != nil { return nil, err } diff --git a/pkg/config/stage.go b/pkg/config/stage.go index 2cdfaad15..56c4a3f0f 100644 --- a/pkg/config/stage.go +++ b/pkg/config/stage.go @@ -26,4 +26,5 @@ type KanikoStage struct { BaseImageStoredLocally bool SaveStage bool MetaArgs []instructions.ArgCommand + Index int } diff --git a/pkg/dockerfile/dockerfile.go b/pkg/dockerfile/dockerfile.go index c7625f458..331219159 100644 --- a/pkg/dockerfile/dockerfile.go +++ b/pkg/dockerfile/dockerfile.go @@ -25,6 +25,8 @@ import ( "strconv" "strings" + "github.com/sirupsen/logrus" + "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/moby/buildkit/frontend/dockerfile/instructions" @@ -67,6 +69,7 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) { return nil, errors.Wrap(err, "resolving base name") } stage.Name = resolvedBaseName + logrus.Infof("Resolved base name %s to %s", stage.BaseName, stage.Name) kanikoStages = append(kanikoStages, config.KanikoStage{ Stage: stage, BaseImageIndex: baseImageIndex(index, stages), @@ -74,6 +77,7 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) { SaveStage: saveStage(index, stages), Final: index == targetStage, MetaArgs: metaArgs, + Index: index, }) if index == targetStage { break @@ -175,14 +179,6 @@ func saveStage(index int, stages []instructions.Stage) bool { return true } } - for _, cmd := range stage.Commands { - switch c := cmd.(type) { - case *instructions.CopyCommand: - if c.From == strconv.Itoa(index) { - return true - } - } - } } return false } diff --git a/pkg/dockerfile/dockerfile_test.go b/pkg/dockerfile/dockerfile_test.go index 829a59b7f..1fa68890c 100644 --- a/pkg/dockerfile/dockerfile_test.go +++ b/pkg/dockerfile/dockerfile_test.go @@ -114,7 +114,7 @@ func Test_SaveStage(t *testing.T) { { name: "reference stage in later copy command", index: 0, - expected: true, + expected: false, }, { name: "reference stage in later from command", diff --git a/pkg/executor/build.go b/pkg/executor/build.go index 5286c035c..7b6ecf14c 100644 --- a/pkg/executor/build.go +++ b/pkg/executor/build.go @@ -23,6 +23,8 @@ import ( "strconv" "time" + "github.com/otiai10/copy" + "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/moby/buildkit/frontend/dockerfile/instructions" @@ -60,10 +62,11 @@ type stageBuilder struct { opts *config.KanikoOptions cmds []commands.DockerCommand args *dockerfile.BuildArgs + crossStageDeps map[int][]string } // newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage -func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*stageBuilder, error) { +func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, crossStageDeps map[int][]string) (*stageBuilder, error) { sourceImage, err := util.RetrieveSourceImage(stage, opts) if err != nil { return nil, err @@ -96,6 +99,7 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*sta snapshotter: snapshotter, baseImageDigest: digest.String(), opts: opts, + crossStageDeps: crossStageDeps, } for _, cmd := range s.stage.Commands { @@ -207,6 +211,10 @@ func (s *stageBuilder) build() error { break } } + if len(s.crossStageDeps[s.stage.Index]) > 0 { + shouldUnpack = true + } + if shouldUnpack { t := timing.Start("FS Unpacking") if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil { @@ -353,6 +361,63 @@ func (s *stageBuilder) saveSnapshotToImage(createdBy string, tarPath string) err } +func CalculateDependencies(opts *config.KanikoOptions) (map[int][]string, error) { + stages, err := dockerfile.Stages(opts) + if err != nil { + return nil, err + } + images := []v1.Image{} + depGraph := map[int][]string{} + for _, s := range stages { + ba := dockerfile.NewBuildArgs(opts.BuildArgs) + ba.AddMetaArgs(s.MetaArgs) + var image v1.Image + var err error + if s.BaseImageStoredLocally { + image = images[s.BaseImageIndex] + } else if s.Name == constants.NoBaseImage { + image = empty.Image + } else { + image, err = util.RetrieveSourceImage(s, opts) + if err != nil { + return nil, err + } + } + initializeConfig(image) + cfg, err := image.ConfigFile() + if err != nil { + return nil, err + } + for _, c := range s.Commands { + switch cmd := c.(type) { + case *instructions.CopyCommand: + if cmd.From != "" { + i, err := strconv.Atoi(cmd.From) + if err != nil { + continue + } + resolved, err := util.ResolveEnvironmentReplacementList(cmd.SourcesAndDest, cfg.Config.Env, true) + if err != nil { + return nil, err + } + + depGraph[i] = append(depGraph[i], resolved[0:len(resolved)-1]...) + } + case *instructions.EnvCommand: + if err := util.UpdateConfigEnv(cmd.Env, &cfg.Config, ba.ReplacementEnvs(cfg.Config.Env)); err != nil { + return nil, err + } + image, err = mutate.Config(image, cfg.Config) + if err != nil { + return nil, err + } + } + } + images = append(images, image) + } + return depGraph, nil +} + // DoBuild executes building the Dockerfile func DoBuild(opts *config.KanikoOptions) (v1.Image, error) { t := timing.Start("Total Build Time") @@ -369,8 +434,14 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) { return nil, err } + crossStageDependencies, err := CalculateDependencies(opts) + if err != nil { + return nil, err + } + logrus.Infof("Built cross stage deps: %v", crossStageDependencies) + for index, stage := range stages { - sb, err := newStageBuilder(opts, stage) + sb, err := newStageBuilder(opts, stage, crossStageDependencies) if err != nil { return nil, err } @@ -405,10 +476,21 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) { if err := saveStageAsTarball(strconv.Itoa(index), sourceImage); err != nil { return nil, err } - if err := extractImageToDependecyDir(strconv.Itoa(index), sourceImage); err != nil { - return nil, err - } } + + filesToSave, err := filesToSave(crossStageDependencies[index]) + if err != nil { + return nil, err + } + dstDir := filepath.Join(constants.KanikoDir, strconv.Itoa(index)) + if err := os.MkdirAll(dstDir, 0644); err != nil { + return nil, err + } + for _, p := range filesToSave { + logrus.Infof("Saving file %s for later use.", p) + copy.Copy(p, filepath.Join(dstDir, p)) + } + // Delete the filesystem if err := util.DeleteFilesystem(); err != nil { return nil, err @@ -418,6 +500,18 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) { return nil, err } +func filesToSave(deps []string) ([]string, error) { + allFiles := []string{} + for _, src := range deps { + srcs, err := filepath.Glob(src) + if err != nil { + return nil, err + } + allFiles = append(allFiles, srcs...) + } + return allFiles, nil +} + func fetchExtraStages(stages []config.KanikoStage, opts *config.KanikoOptions) error { t := timing.Start("Fetching Extra Stages") defer timing.DefaultRun.Stop(t) diff --git a/pkg/executor/build_test.go b/pkg/executor/build_test.go index 71cf89b12..b0cd34d0f 100644 --- a/pkg/executor/build_test.go +++ b/pkg/executor/build_test.go @@ -17,14 +17,19 @@ limitations under the License. package executor import ( + "io/ioutil" + "os" + "path/filepath" + "reflect" + "sort" "testing" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "github.com/GoogleContainerTools/kaniko/testutil" - "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-cmp/cmp" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/moby/buildkit/frontend/dockerfile/instructions" ) func Test_reviewConfig(t *testing.T) { @@ -180,3 +185,201 @@ func Test_stageBuilder_shouldTakeSnapshot(t *testing.T) { }) } } + +func TestCalculateDependencies(t *testing.T) { + type args struct { + dockerfile string + } + tests := []struct { + name string + args args + want map[int][]string + }{ + { + name: "no deps", + args: args{ + dockerfile: ` +FROM debian as stage1 +RUN foo +FROM stage1 +RUN bar +`, + }, + want: map[int][]string{}, + }, + { + name: "simple deps", + args: args{ + dockerfile: ` +FROM debian as stage1 +FROM alpine +COPY --from=stage1 /foo /bar +`, + }, + want: map[int][]string{ + 0: {"/foo"}, + }, + }, + { + name: "two sets deps", + args: args{ + dockerfile: ` +FROM debian as stage1 +FROM ubuntu as stage2 +RUN foo +COPY --from=stage1 /foo /bar +FROM alpine +COPY --from=stage2 /bar /bat +`, + }, + want: map[int][]string{ + 0: {"/foo"}, + 1: {"/bar"}, + }, + }, + { + name: "double deps", + args: args{ + dockerfile: ` +FROM debian as stage1 +FROM ubuntu as stage2 +RUN foo +COPY --from=stage1 /foo /bar +FROM alpine +COPY --from=stage1 /baz /bat +`, + }, + want: map[int][]string{ + 0: {"/foo", "/baz"}, + }, + }, + { + name: "envs in deps", + args: args{ + dockerfile: ` +FROM debian as stage1 +FROM ubuntu as stage2 +RUN foo +ENV key1 val1 +ENV key2 val2 +COPY --from=stage1 /foo/$key1 /foo/$key2 /bar +FROM alpine +COPY --from=stage2 /bar /bat +`, + }, + want: map[int][]string{ + 0: {"/foo/val1", "/foo/val2"}, + 1: {"/bar"}, + }, + }, + { + name: "envs from base image in deps", + args: args{ + dockerfile: ` +FROM debian as stage1 +ENV key1 baseval1 +FROM stage1 as stage2 +RUN foo +ENV key2 val2 +COPY --from=stage1 /foo/$key1 /foo/$key2 /bar +FROM alpine +COPY --from=stage2 /bar /bat +`, + }, + want: map[int][]string{ + 0: {"/foo/baseval1", "/foo/val2"}, + 1: {"/bar"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + f, _ := ioutil.TempFile("", "") + ioutil.WriteFile(f.Name(), []byte(tt.args.dockerfile), 0755) + opts := &config.KanikoOptions{ + DockerfilePath: f.Name(), + } + + if got, _ := CalculateDependencies(opts); !reflect.DeepEqual(got, tt.want) { + diff := cmp.Diff(got, tt.want) + t.Errorf("CalculateDependencies() = %v, want %v, diff %v", got, tt.want, diff) + } + }) + } +} + +func Test_filesToSave(t *testing.T) { + tests := []struct { + name string + args []string + want []string + files []string + }{ + { + name: "simple", + args: []string{"foo"}, + files: []string{"foo"}, + want: []string{"foo"}, + }, + { + name: "glob", + args: []string{"foo*"}, + files: []string{"foo", "foo2", "fooooo", "bar"}, + want: []string{"foo", "foo2", "fooooo"}, + }, + { + name: "complex glob", + args: []string{"foo*", "bar?"}, + files: []string{"foo", "foo2", "fooooo", "bar", "bar1", "bar2", "bar33"}, + want: []string{"foo", "foo2", "fooooo", "bar1", "bar2"}, + }, + { + name: "dir", + args: []string{"foo"}, + files: []string{"foo/bar", "foo/baz", "foo/bat/baz"}, + want: []string{"foo"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "") + if err != nil { + t.Errorf("error creating tmpdir: %s", err) + } + defer os.RemoveAll(tmpDir) + + for _, f := range tt.files { + p := filepath.Join(tmpDir, f) + dir := filepath.Dir(p) + if dir != "." { + if err := os.MkdirAll(dir, 0755); err != nil { + t.Errorf("error making dir: %s", err) + } + } + fp, err := os.Create(p) + if err != nil { + t.Errorf("error making file: %s", err) + } + fp.Close() + } + + args := []string{} + for _, arg := range tt.args { + args = append(args, filepath.Join(tmpDir, arg)) + } + got, err := filesToSave(args) + if err != nil { + t.Errorf("got err: %s", err) + } + want := []string{} + for _, w := range tt.want { + want = append(want, filepath.Join(tmpDir, w)) + } + sort.Strings(want) + sort.Strings(got) + if !reflect.DeepEqual(got, want) { + t.Errorf("filesToSave() = %v, want %v", got, want) + } + }) + } +} diff --git a/pkg/executor/foo b/pkg/executor/foo new file mode 100644 index 000000000..e69de29bb diff --git a/pkg/util/command_util.go b/pkg/util/command_util.go index e64fadf55..a38972ced 100644 --- a/pkg/util/command_util.go +++ b/pkg/util/command_util.go @@ -78,6 +78,22 @@ func ResolveEnvironmentReplacement(value string, envs []string, isFilepath bool) return fp, nil } +func ResolveEnvAndWildcards(sd instructions.SourcesAndDest, buildcontext string, envs []string) ([]string, string, error) { + // First, resolve any environment replacement + resolvedEnvs, err := ResolveEnvironmentReplacementList(sd, envs, true) + if err != nil { + return nil, "", err + } + dest := resolvedEnvs[len(resolvedEnvs)-1] + // Resolve wildcards and get a list of resolved sources + srcs, err := ResolveSources(resolvedEnvs[0:len(resolvedEnvs)-1], buildcontext) + if err != nil { + return nil, "", err + } + err = IsSrcsValid(sd, srcs, buildcontext) + return srcs, dest, err +} + // ContainsWildcards returns true if any entry in paths contains wildcards func ContainsWildcards(paths []string) bool { for _, path := range paths { @@ -90,23 +106,22 @@ func ContainsWildcards(paths []string) bool { // ResolveSources resolves the given sources if the sources contains wildcards // It returns a list of resolved sources -func ResolveSources(srcsAndDest instructions.SourcesAndDest, root string) ([]string, error) { - srcs := srcsAndDest[:len(srcsAndDest)-1] +func ResolveSources(srcs []string, root string) ([]string, error) { // If sources contain wildcards, we first need to resolve them to actual paths - if ContainsWildcards(srcs) { - logrus.Debugf("Resolving srcs %v...", srcs) - files, err := RelativeFiles("", root) - if err != nil { - return nil, err - } - srcs, err = matchSources(srcs, files) - if err != nil { - return nil, err - } - logrus.Debugf("Resolved sources to %v", srcs) + if !ContainsWildcards(srcs) { + return srcs, nil } - // Check to make sure the sources are valid - return srcs, IsSrcsValid(srcsAndDest, srcs, root) + logrus.Infof("Resolving srcs %v...", srcs) + files, err := RelativeFiles("", root) + if err != nil { + return nil, err + } + resolved, err := matchSources(srcs, files) + if err != nil { + return nil, err + } + logrus.Debugf("Resolved sources to %v", resolved) + return resolved, nil } // matchSources returns a list of sources that match wildcards diff --git a/pkg/util/command_util_test.go b/pkg/util/command_util_test.go index f7a4bf211..3e2342595 100644 --- a/pkg/util/command_util_test.go +++ b/pkg/util/command_util_test.go @@ -408,7 +408,6 @@ var testResolveSources = []struct { "context/foo", "context/b*", testURL, - "dest/", }, expectedList: []string{ "context/foo", diff --git a/vendor/github.com/otiai10/copy/LICENSE b/vendor/github.com/otiai10/copy/LICENSE new file mode 100644 index 000000000..1f0cc5dec --- /dev/null +++ b/vendor/github.com/otiai10/copy/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2018 otiai10 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/otiai10/copy/copy.go b/vendor/github.com/otiai10/copy/copy.go new file mode 100644 index 000000000..9e0b09162 --- /dev/null +++ b/vendor/github.com/otiai10/copy/copy.go @@ -0,0 +1,93 @@ +package copy + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// Copy copies src to dest, doesn't matter if src is a directory or a file +func Copy(src, dest string) error { + info, err := os.Lstat(src) + if err != nil { + return err + } + return copy(src, dest, info) +} + +// copy dispatches copy-funcs according to the mode. +// Because this "copy" could be called recursively, +// "info" MUST be given here, NOT nil. +func copy(src, dest string, info os.FileInfo) error { + if info.Mode()&os.ModeSymlink != 0 { + return lcopy(src, dest, info) + } + if info.IsDir() { + return dcopy(src, dest, info) + } + return fcopy(src, dest, info) +} + +// fcopy is for just a file, +// with considering existence of parent directory +// and file permission. +func fcopy(src, dest string, info os.FileInfo) error { + + if err := os.MkdirAll(filepath.Dir(dest), os.ModePerm); err != nil { + return err + } + + f, err := os.Create(dest) + if err != nil { + return err + } + defer f.Close() + + if err = os.Chmod(f.Name(), info.Mode()); err != nil { + return err + } + + s, err := os.Open(src) + if err != nil { + return err + } + defer s.Close() + + _, err = io.Copy(f, s) + return err +} + +// dcopy is for a directory, +// with scanning contents inside the directory +// and pass everything to "copy" recursively. +func dcopy(srcdir, destdir string, info os.FileInfo) error { + + if err := os.MkdirAll(destdir, info.Mode()); err != nil { + return err + } + + contents, err := ioutil.ReadDir(srcdir) + if err != nil { + return err + } + + for _, content := range contents { + cs, cd := filepath.Join(srcdir, content.Name()), filepath.Join(destdir, content.Name()) + if err := copy(cs, cd, content); err != nil { + // If any error, exit immediately + return err + } + } + return nil +} + +// lcopy is for a symlink, +// with just creating a new symlink by replicating src symlink. +func lcopy(src, dest string, info os.FileInfo) error { + src, err := os.Readlink(src) + if err != nil { + return err + } + return os.Symlink(src, dest) +} diff --git a/vendor/github.com/otiai10/copy/testdata/case03/case01 b/vendor/github.com/otiai10/copy/testdata/case03/case01 new file mode 120000 index 000000000..091feb4af --- /dev/null +++ b/vendor/github.com/otiai10/copy/testdata/case03/case01 @@ -0,0 +1 @@ +./testdata/case01 \ No newline at end of file From 28bfb75a3156bf785958f816b51e3bf4e365327a Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Tue, 19 Mar 2019 03:28:24 +0900 Subject: [PATCH 10/22] Fix file mode bug (#618) * Fix file mode * Add Dockerfile for special file permission test --- .../dockerfiles/Dockerfile_test_cache_perm | 8 ++++ integration/images.go | 1 + pkg/util/fs_util.go | 47 ++++++++++--------- pkg/util/fs_util_test.go | 24 ++++++++++ 4 files changed, 59 insertions(+), 21 deletions(-) create mode 100644 integration/dockerfiles/Dockerfile_test_cache_perm diff --git a/integration/dockerfiles/Dockerfile_test_cache_perm b/integration/dockerfiles/Dockerfile_test_cache_perm new file mode 100644 index 000000000..06c311fe6 --- /dev/null +++ b/integration/dockerfiles/Dockerfile_test_cache_perm @@ -0,0 +1,8 @@ +# Test to make sure the cache works with special file permissions properly. +# If the image is built twice, directory foo should have the sticky bit, +# and file bar should have the setuid and setgid bits. + +FROM busybox + +RUN mkdir foo && chmod +t foo +RUN touch bar && chmod u+s,g+s bar diff --git a/integration/images.go b/integration/images.go index 27d54501c..2eb00be60 100644 --- a/integration/images.go +++ b/integration/images.go @@ -134,6 +134,7 @@ func NewDockerFileBuilder(dockerfiles []string) *DockerFileBuilder { d.TestCacheDockerfiles = map[string]struct{}{ "Dockerfile_test_cache": {}, "Dockerfile_test_cache_install": {}, + "Dockerfile_test_cache_perm": {}, } return &d } diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index 38b25bc51..5ef48845f 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -216,27 +216,16 @@ func extractFile(dest string, hdr *tar.Header, tr io.Reader) error { if err != nil { return err } - // manually set permissions on file, since the default umask (022) will interfere - if err = os.Chmod(path, mode); err != nil { - return err - } if _, err = io.Copy(currFile, tr); err != nil { return err } - if err = currFile.Chown(uid, gid); err != nil { + if err = setFilePermissions(path, mode, uid, gid); err != nil { return err } currFile.Close() case tar.TypeDir: logrus.Debugf("creating dir %s", path) - if err := os.MkdirAll(path, mode); err != nil { - return err - } - // In some cases, MkdirAll doesn't change the permissions, so run Chmod - if err := os.Chmod(path, mode); err != nil { - return err - } - if err := os.Chown(path, uid, gid); err != nil { + if err := mkdirAllWithPermissions(path, mode, uid, gid); err != nil { return err } @@ -429,10 +418,7 @@ func CreateFile(path string, reader io.Reader, perm os.FileMode, uid uint32, gid if _, err := io.Copy(dest, reader); err != nil { return err } - if err := dest.Chmod(perm); err != nil { - return err - } - return dest.Chown(int(uid), int(gid)) + return setFilePermissions(path, perm, int(uid), int(gid)) } // AddVolumePath adds the given path to the volume whitelist. @@ -492,13 +478,11 @@ func CopyDir(src, dest, buildcontext string) ([]string, error) { if fi.IsDir() { logrus.Debugf("Creating directory %s", destPath) + mode := fi.Mode() uid := int(fi.Sys().(*syscall.Stat_t).Uid) gid := int(fi.Sys().(*syscall.Stat_t).Gid) - if err := os.MkdirAll(destPath, fi.Mode()); err != nil { - return nil, err - } - if err := os.Chown(destPath, uid, gid); err != nil { + if err := mkdirAllWithPermissions(destPath, mode, uid, gid); err != nil { return nil, err } } else if fi.Mode()&os.ModeSymlink != 0 { @@ -614,3 +598,24 @@ func HasFilepathPrefix(path, prefix string, prefixMatchOnly bool) bool { func Volumes() []string { return volumes } + +func mkdirAllWithPermissions(path string, mode os.FileMode, uid, gid int) error { + if err := os.MkdirAll(path, mode); err != nil { + return err + } + if err := os.Chown(path, uid, gid); err != nil { + return err + } + // In some cases, MkdirAll doesn't change the permissions, so run Chmod + // Must chmod after chown because chown resets the file mode. + return os.Chmod(path, mode) +} + +func setFilePermissions(path string, mode os.FileMode, uid, gid int) error { + if err := os.Chown(path, uid, gid); err != nil { + return err + } + // manually set permissions on file, since the default umask (022) will interfere + // Must chmod after chown because chown resets the file mode. + return os.Chmod(path, mode) +} diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index 4f91343f8..eff39d5d9 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -503,6 +503,30 @@ func TestExtractFile(t *testing.T) { filesAreHardlinks("/bin/uncompress", "/bin/gzip"), }, }, + { + name: "file with setuid bit", + contents: []byte("helloworld"), + hdrs: []*tar.Header{fileHeader("./bar", "helloworld", 04644)}, + checkers: []checker{ + fileExists("/bar"), + fileMatches("/bar", []byte("helloworld")), + permissionsMatch("/bar", 0644|os.ModeSetuid), + }, + }, + { + name: "dir with sticky bit", + contents: []byte("helloworld"), + hdrs: []*tar.Header{ + dirHeader("./foo", 01755), + fileHeader("./foo/bar", "helloworld", 0644), + }, + checkers: []checker{ + fileExists("/foo/bar"), + fileMatches("/foo/bar", []byte("helloworld")), + permissionsMatch("/foo/bar", 0644), + permissionsMatch("/foo", 0755|os.ModeDir|os.ModeSticky), + }, + }, } for _, tc := range tcs { From 3fa411ceb9a2b8fa13e8d4e4d8adbb5ce363b2ed Mon Sep 17 00:00:00 2001 From: Jason Hall Date: Tue, 19 Mar 2019 13:39:59 -0400 Subject: [PATCH 11/22] Check push permissions before building images (#622) * Check push permissions before building images * Fix doc comment * improve error messages --- Gopkg.lock | 4 +- Gopkg.toml | 2 +- cmd/executor/cmd/root.go | 6 +- pkg/executor/push.go | 24 ++++++++ .../go-containerregistry/pkg/v1/manifest.go | 2 +- .../pkg/v1/remote/check.go | 56 +++++++++++++++++++ .../pkg/v1/tarball/write.go | 51 +++++++++++++---- 7 files changed, 128 insertions(+), 17 deletions(-) create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go diff --git a/Gopkg.lock b/Gopkg.lock index e1a1c09c1..e9c254cdb 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -430,7 +430,7 @@ version = "v0.2.0" [[projects]] - digest = "1:3edac9d0a5f7e0e636f85bd7d3105df6180af528ab7e6a88f00b1ae6fc0bf947" + digest = "1:d40a26f0daf07f3b5c916356a3e10fabbf97d5166f77e57aa3983013ab57004c" name = "github.com/google/go-containerregistry" packages = [ "pkg/authn", @@ -450,7 +450,7 @@ "pkg/v1/v1util", ] pruneopts = "NUT" - revision = "8c1640add99804503b4126abc718931a4d93c31a" + revision = "8621d738a07bc74b2adeafd175a3c738423577a0" [[projects]] digest = "1:f4f203acd8b11b8747bdcd91696a01dbc95ccb9e2ca2db6abf81c3a4f5e950ce" diff --git a/Gopkg.toml b/Gopkg.toml index e39d19a01..1cc672456 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -37,7 +37,7 @@ required = [ [[constraint]] name = "github.com/google/go-containerregistry" - revision = "8c1640add99804503b4126abc718931a4d93c31a" + revision = "8621d738a07bc74b2adeafd175a3c738423577a0" [[override]] name = "k8s.io/apimachinery" diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index f8a1c14b4..e51a03ae3 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -24,12 +24,11 @@ import ( "strings" "time" - "github.com/GoogleContainerTools/kaniko/pkg/timing" - "github.com/GoogleContainerTools/kaniko/pkg/buildcontext" "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/constants" "github.com/GoogleContainerTools/kaniko/pkg/executor" + "github.com/GoogleContainerTools/kaniko/pkg/timing" "github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/genuinetools/amicontained/container" "github.com/pkg/errors" @@ -79,6 +78,9 @@ var RootCmd = &cobra.Command{ } logrus.Warn("kaniko is being run outside of a container. This can have dangerous effects on your system") } + if err := executor.CheckPushPermissions(opts); err != nil { + exit(errors.Wrap(err, "error checking push permissions -- make sure you entered the correct tag name, and that you are authenticated correctly, and try again")) + } if err := os.Chdir("/"); err != nil { exit(errors.Wrap(err, "error changing to root dir")) } diff --git a/pkg/executor/push.go b/pkg/executor/push.go index 0ee021f83..579aabc61 100644 --- a/pkg/executor/push.go +++ b/pkg/executor/push.go @@ -47,6 +47,30 @@ func (w *withUserAgent) RoundTrip(r *http.Request) (*http.Response, error) { return w.t.RoundTrip(r) } +// CheckPushPermissionos checks that the configured credentials can be used to +// push to every specified destination. +func CheckPushPermissions(opts *config.KanikoOptions) error { + if opts.NoPush { + return nil + } + + checked := map[string]bool{} + for _, destination := range opts.Destinations { + destRef, err := name.NewTag(destination, name.WeakValidation) + if err != nil { + return errors.Wrap(err, "getting tag for destination") + } + if checked[destRef.Context().RepositoryStr()] { + continue + } + if err := remote.CheckPushPermission(destRef, creds.GetKeychain(), http.DefaultTransport); err != nil { + return errors.Wrapf(err, "checking push permission for %q", destRef) + } + checked[destRef.Context().RepositoryStr()] = true + } + return nil +} + // DoPush is responsible for pushing image to the destinations specified in opts func DoPush(image v1.Image, opts *config.KanikoOptions) error { t := timing.Start("Total Push Time") diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go index 932ae056a..36c341df8 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go @@ -23,7 +23,7 @@ import ( // Manifest represents the OCI image manifest in a structured way. type Manifest struct { - SchemaVersion int64 `json:"schemaVersion"` + SchemaVersion int64 `json:"schemaVersion,omitempty"` MediaType types.MediaType `json:"mediaType"` Config Descriptor `json:"config"` Layers []Descriptor `json:"layers"` diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go new file mode 100644 index 000000000..aa574eb8b --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go @@ -0,0 +1,56 @@ +package remote + +import ( + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +// CheckPushPermission returns an error if the given keychain cannot authorize +// a push operation to the given ref. +// +// This can be useful to check whether the caller has permission to push an +// image before doing work to construct the image. +// +// TODO(#412): Remove the need for this method. +func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTripper) error { + auth, err := kc.Resolve(ref.Context().Registry) + if err != nil { + return err + } + + scopes := []string{ref.Scope(transport.PushScope)} + tr, err := transport.New(ref.Context().Registry, auth, t, scopes) + if err != nil { + return err + } + // TODO(jasonhall): Against GCR, just doing the token handshake is + // enough, but this doesn't extend to Dockerhub + // (https://github.com/docker/hub-feedback/issues/1771), so we actually + // need to initiate an upload to tell whether the credentials can + // authorize a push. Figure out how to return early here when we can, + // to avoid a roundtrip for spec-compliant registries. + w := writer{ + ref: ref, + client: &http.Client{Transport: tr}, + } + loc, _, err := w.initiateUpload("", "") + if loc != "" { + // Since we're only initiating the upload to check whether we + // can, we should attempt to cancel it, in case initiating + // reserves some resources on the server. We shouldn't wait for + // cancelling to complete, and we don't care if it fails. + go w.cancelUpload(loc) + } + return err +} + +func (w *writer) cancelUpload(loc string) { + req, err := http.NewRequest(http.MethodDelete, loc, nil) + if err != nil { + return + } + _, _ = w.client.Do(req) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go index 44dbe15aa..2ee81f0b8 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go @@ -28,31 +28,41 @@ import ( // WriteToFile writes in the compressed format to a tarball, on disk. // This is just syntactic sugar wrapping tarball.Write with a new file. -func WriteToFile(p string, tag name.Tag, img v1.Image) error { +func WriteToFile(p string, ref name.Reference, img v1.Image) error { w, err := os.Create(p) if err != nil { return err } defer w.Close() - return Write(tag, img, w) + return Write(ref, img, w) } // MultiWriteToFile writes in the compressed format to a tarball, on disk. // This is just syntactic sugar wrapping tarball.MultiWrite with a new file. func MultiWriteToFile(p string, tagToImage map[name.Tag]v1.Image) error { + var refToImage map[name.Reference]v1.Image = make(map[name.Reference]v1.Image, len(tagToImage)) + for i, d := range tagToImage { + refToImage[i] = d + } + return MultiRefWriteToFile(p, refToImage) +} + +// MultiRefWriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.MultiRefWrite with a new file. +func MultiRefWriteToFile(p string, refToImage map[name.Reference]v1.Image) error { w, err := os.Create(p) if err != nil { return err } defer w.Close() - return MultiWrite(tagToImage, w) + return MultiRefWrite(refToImage, w) } // Write is a wrapper to write a single image and tag to a tarball. -func Write(tag name.Tag, img v1.Image, w io.Writer) error { - return MultiWrite(map[name.Tag]v1.Image{tag: img}, w) +func Write(ref name.Reference, img v1.Image, w io.Writer) error { + return MultiRefWrite(map[name.Reference]v1.Image{ref: img}, w) } // MultiWrite writes the contents of each image to the provided reader, in the compressed format. @@ -61,10 +71,23 @@ func Write(tag name.Tag, img v1.Image, w io.Writer) error { // One file for each layer, named after the layer's SHA. // One file for the config blob, named after its SHA. func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer) error { + var refToImage map[name.Reference]v1.Image = make(map[name.Reference]v1.Image, len(tagToImage)) + for i, d := range tagToImage { + refToImage[i] = d + } + return MultiRefWrite(refToImage, w) +} + +// MultiRefWrite writes the contents of each image to the provided reader, in the compressed format. +// The contents are written in the following format: +// One manifest.json file at the top level containing information about several images. +// One file for each layer, named after the layer's SHA. +// One file for the config blob, named after its SHA. +func MultiRefWrite(refToImage map[name.Reference]v1.Image, w io.Writer) error { tf := tar.NewWriter(w) defer tf.Close() - imageToTags := dedupTagToImage(tagToImage) + imageToTags := dedupRefToImage(refToImage) var td tarDescriptor for img, tags := range imageToTags { @@ -135,14 +158,20 @@ func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer) error { return writeTarEntry(tf, "manifest.json", bytes.NewReader(tdBytes), int64(len(tdBytes))) } -func dedupTagToImage(tagToImage map[name.Tag]v1.Image) map[v1.Image][]string { +func dedupRefToImage(refToImage map[name.Reference]v1.Image) map[v1.Image][]string { imageToTags := make(map[v1.Image][]string) - for tag, img := range tagToImage { - if tags, ok := imageToTags[img]; ok { - imageToTags[img] = append(tags, tag.String()) + for ref, img := range refToImage { + if tag, ok := ref.(name.Tag); ok { + if tags, ok := imageToTags[img]; ok && tags != nil { + imageToTags[img] = append(tags, tag.String()) + } else { + imageToTags[img] = []string{tag.String()} + } } else { - imageToTags[img] = []string{tag.String()} + if _, ok := imageToTags[img]; !ok { + imageToTags[img] = nil + } } } From 1bf4421047570790ced358f60118b58709628c15 Mon Sep 17 00:00:00 2001 From: Daisuke Taniwaki Date: Wed, 20 Mar 2019 02:40:15 +0900 Subject: [PATCH 12/22] Fix parent directory permissions (#619) * Add parent directories of adding files * Add integration Dockerfile to test parent directory permissions * Remove unnecessary helper method * Use a file on the internet for integration Dockerfile --- .../Dockerfile_test_parent_dir_perms | 8 ++ pkg/snapshot/layered_map.go | 10 +-- pkg/snapshot/snapshot.go | 77 ++++++++++--------- pkg/snapshot/snapshot_test.go | 21 ++++- 4 files changed, 70 insertions(+), 46 deletions(-) create mode 100644 integration/dockerfiles/Dockerfile_test_parent_dir_perms diff --git a/integration/dockerfiles/Dockerfile_test_parent_dir_perms b/integration/dockerfiles/Dockerfile_test_parent_dir_perms new file mode 100644 index 000000000..e0514e40e --- /dev/null +++ b/integration/dockerfiles/Dockerfile_test_parent_dir_perms @@ -0,0 +1,8 @@ +FROM busybox + +RUN adduser --disabled-password --gecos "" --uid 1000 user +RUN mkdir -p /home/user/foo +RUN chown -R user /home/user +RUN chmod 700 /home/user/foo +ADD https://raw.githubusercontent.com/GoogleContainerTools/kaniko/master/README.md /home/user/foo/README.md +RUN chown -R user /home/user diff --git a/pkg/snapshot/layered_map.go b/pkg/snapshot/layered_map.go index 2ec822ae7..ad2bee4d2 100644 --- a/pkg/snapshot/layered_map.go +++ b/pkg/snapshot/layered_map.go @@ -109,11 +109,10 @@ func (l *LayeredMap) Add(s string) error { return nil } -// MaybeAdd will add the specified file s to the layered map if -// the layered map's hashing function determines it has changed. If -// it has not changed, it will not be added. Returns true if the file -// was added. -func (l *LayeredMap) MaybeAdd(s string) (bool, error) { +// CheckFileChange checkes whether a given file changed +// from the current layered map by its hashing function. +// Returns true if the file is changed. +func (l *LayeredMap) CheckFileChange(s string) (bool, error) { oldV, ok := l.Get(s) t := timing.Start("Hashing files") defer timing.DefaultRun.Stop(t) @@ -124,6 +123,5 @@ func (l *LayeredMap) MaybeAdd(s string) (bool, error) { if ok && newV == oldV { return false, nil } - l.layers[len(l.layers)-1][s] = newV return true, nil } diff --git a/pkg/snapshot/snapshot.go b/pkg/snapshot/snapshot.go index bf71c506b..d87378b62 100644 --- a/pkg/snapshot/snapshot.go +++ b/pkg/snapshot/snapshot.go @@ -73,44 +73,15 @@ func (s *Snapshotter) TakeSnapshot(files []string) (string, error) { } logrus.Info("Taking snapshot of files...") logrus.Debugf("Taking snapshot of files %v", files) - snapshottedFiles := make(map[string]bool) - // First add to the tar any parent directories that haven't been added - parentDirs := map[string]struct{}{} - for _, file := range files { - for _, p := range util.ParentDirectories(file) { - parentDirs[p] = struct{}{} - } - } - filesToAdd := []string{} - for file := range parentDirs { - file = filepath.Clean(file) - snapshottedFiles[file] = true - - // The parent directory might already be in a previous layer. - fileAdded, err := s.l.MaybeAdd(file) - if err != nil { - return "", fmt.Errorf("Unable to add parent dir %s to layered map: %s", file, err) - } - - if fileAdded { - filesToAdd = append(filesToAdd, file) - } - } - - // Next add the files themselves to the tar - for _, file := range files { - // We might have already added the file above as a parent directory of another file. - file = filepath.Clean(file) - if _, ok := snapshottedFiles[file]; ok { - continue - } - snapshottedFiles[file] = true + // Also add parent directories to keep the permission of them correctly. + filesToAdd := filesWithParentDirs(files) + // Add files to the layered map + for _, file := range filesToAdd { if err := s.l.Add(file); err != nil { return "", fmt.Errorf("Unable to add file %s to layered map: %s", file, err) } - filesToAdd = append(filesToAdd, file) } t := util.NewTar(f) @@ -201,16 +172,27 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) { logrus.Debugf("Not adding %s to layer, as it's whitelisted", path) continue } - // Only add to the tar if we add it to the layeredmap. - maybeAdd, err := s.l.MaybeAdd(path) + // Only add changed files. + fileChanged, err := s.l.CheckFileChange(path) if err != nil { return nil, nil, err } - if maybeAdd { - logrus.Debugf("Adding %s to layer, because it was changed.", path) + if fileChanged { + logrus.Infof("Adding %s to layer, because it was changed.", path) filesToAdd = append(filesToAdd, path) } } + + // Also add parent directories to keep the permission of them correctly. + filesToAdd = filesWithParentDirs(filesToAdd) + + // Add files to the layered map + for _, file := range filesToAdd { + if err := s.l.Add(file); err != nil { + return nil, nil, fmt.Errorf("Unable to add file %s to layered map: %s", file, err) + } + } + return filesToAdd, filesToWhiteOut, nil } @@ -230,3 +212,24 @@ func writeToTar(t util.Tar, files, whiteouts []string) error { } return nil } + +func filesWithParentDirs(files []string) []string { + filesSet := map[string]bool{} + + for _, file := range files { + file = filepath.Clean(file) + filesSet[file] = true + + for _, dir := range util.ParentDirectories(file) { + dir = filepath.Clean(dir) + filesSet[dir] = true + } + } + + newFiles := []string{} + for file := range filesSet { + newFiles = append(newFiles, file) + } + + return newFiles +} diff --git a/pkg/snapshot/snapshot_test.go b/pkg/snapshot/snapshot_test.go index 11c63c791..ea6f4bceb 100644 --- a/pkg/snapshot/snapshot_test.go +++ b/pkg/snapshot/snapshot_test.go @@ -21,6 +21,7 @@ import ( "io/ioutil" "os" "path/filepath" + "sort" "testing" "github.com/GoogleContainerTools/kaniko/pkg/util" @@ -60,6 +61,12 @@ func TestSnapshotFSFileChange(t *testing.T) { fooPath: "newbaz1", batPath: "baz", } + for _, dir := range util.ParentDirectories(fooPath) { + snapshotFiles[dir] = "" + } + for _, dir := range util.ParentDirectories(batPath) { + snapshotFiles[dir] = "" + } numFiles := 0 for { hdr, err := tr.Next() @@ -75,7 +82,7 @@ func TestSnapshotFSFileChange(t *testing.T) { t.Fatalf("Contents of %s incorrect, expected: %s, actual: %s", hdr.Name, snapshotFiles[hdr.Name], string(contents)) } } - if numFiles != 2 { + if numFiles != len(snapshotFiles) { t.Fatalf("Incorrect number of files were added, expected: 2, actual: %v", numFiles) } } @@ -105,6 +112,9 @@ func TestSnapshotFSChangePermissions(t *testing.T) { snapshotFiles := map[string]string{ batPath: "baz2", } + for _, dir := range util.ParentDirectories(batPath) { + snapshotFiles[dir] = "" + } numFiles := 0 for { hdr, err := tr.Next() @@ -120,7 +130,7 @@ func TestSnapshotFSChangePermissions(t *testing.T) { t.Fatalf("Contents of %s incorrect, expected: %s, actual: %s", hdr.Name, snapshotFiles[hdr.Name], string(contents)) } } - if numFiles != 1 { + if numFiles != len(snapshotFiles) { t.Fatalf("Incorrect number of files were added, expected: 1, got: %v", numFiles) } } @@ -147,7 +157,10 @@ func TestSnapshotFiles(t *testing.T) { } defer os.Remove(tarPath) - expectedFiles := []string{"/", "/tmp", filepath.Join(testDir, "foo")} + expectedFiles := []string{ + filepath.Join(testDir, "foo"), + } + expectedFiles = append(expectedFiles, util.ParentDirectories(filepath.Join(testDir, "foo"))...) f, err := os.Open(tarPath) if err != nil { @@ -166,6 +179,8 @@ func TestSnapshotFiles(t *testing.T) { } actualFiles = append(actualFiles, hdr.Name) } + sort.Strings(expectedFiles) + sort.Strings(actualFiles) testutil.CheckErrorAndDeepEqual(t, false, nil, expectedFiles, actualFiles) } From c8fabdf6e43b19f6a223f1d0b06e127d0774bd7e Mon Sep 17 00:00:00 2001 From: dlorenc Date: Fri, 22 Mar 2019 12:24:43 -0500 Subject: [PATCH 13/22] Fix arg handling for multi-stage images in COPY instructions. (#621) --- .../dockerfiles/Dockerfile_test_arg_multi | 12 +++++++++ pkg/commands/arg.go | 26 ++++++++++++------- pkg/executor/build.go | 11 +++++--- pkg/executor/build_test.go | 24 ++++++++++++++++- 4 files changed, 60 insertions(+), 13 deletions(-) create mode 100644 integration/dockerfiles/Dockerfile_test_arg_multi diff --git a/integration/dockerfiles/Dockerfile_test_arg_multi b/integration/dockerfiles/Dockerfile_test_arg_multi new file mode 100644 index 000000000..a81e47e50 --- /dev/null +++ b/integration/dockerfiles/Dockerfile_test_arg_multi @@ -0,0 +1,12 @@ +ARG FILE_NAME=myFile + +FROM busybox:latest AS builder +ARG FILE_NAME + +RUN echo $FILE_NAME && touch /$FILE_NAME.txt && stat /$FILE_NAME.txt; + +FROM busybox:latest +ARG FILE_NAME + +RUN echo $FILE_NAME && touch /$FILE_NAME.txt && stat /$FILE_NAME.txt; +COPY --from=builder /$FILE_NAME.txt / \ No newline at end of file diff --git a/pkg/commands/arg.go b/pkg/commands/arg.go index 6b890cff0..e17767338 100644 --- a/pkg/commands/arg.go +++ b/pkg/commands/arg.go @@ -30,27 +30,35 @@ type ArgCommand struct { // ExecuteCommand only needs to add this ARG key/value as seen func (r *ArgCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { - replacementEnvs := buildArgs.ReplacementEnvs(config.Env) - resolvedKey, err := util.ResolveEnvironmentReplacement(r.cmd.Key, replacementEnvs, false) + key, val, err := ParseArg(r.cmd.Key, r.cmd.Value, config.Env, buildArgs) if err != nil { return err } + + buildArgs.AddArg(key, val) + return nil +} + +func ParseArg(key string, val *string, env []string, ba *dockerfile.BuildArgs) (string, *string, error) { + replacementEnvs := ba.ReplacementEnvs(env) + resolvedKey, err := util.ResolveEnvironmentReplacement(key, replacementEnvs, false) + if err != nil { + return "", nil, err + } var resolvedValue *string - if r.cmd.Value != nil { - value, err := util.ResolveEnvironmentReplacement(*r.cmd.Value, replacementEnvs, false) + if val != nil { + value, err := util.ResolveEnvironmentReplacement(*val, replacementEnvs, false) if err != nil { - return err + return "", nil, err } resolvedValue = &value } else { - meta := buildArgs.GetAllMeta() + meta := ba.GetAllMeta() if value, ok := meta[resolvedKey]; ok { resolvedValue = &value } } - - buildArgs.AddArg(resolvedKey, resolvedValue) - return nil + return resolvedKey, resolvedValue, nil } // String returns some information about the command for the image config history diff --git a/pkg/executor/build.go b/pkg/executor/build.go index 7b6ecf14c..2fbf592ce 100644 --- a/pkg/executor/build.go +++ b/pkg/executor/build.go @@ -383,8 +383,7 @@ func CalculateDependencies(opts *config.KanikoOptions) (map[int][]string, error) return nil, err } } - initializeConfig(image) - cfg, err := image.ConfigFile() + cfg, err := initializeConfig(image) if err != nil { return nil, err } @@ -396,7 +395,7 @@ func CalculateDependencies(opts *config.KanikoOptions) (map[int][]string, error) if err != nil { continue } - resolved, err := util.ResolveEnvironmentReplacementList(cmd.SourcesAndDest, cfg.Config.Env, true) + resolved, err := util.ResolveEnvironmentReplacementList(cmd.SourcesAndDest, ba.ReplacementEnvs(cfg.Config.Env), true) if err != nil { return nil, err } @@ -411,6 +410,12 @@ func CalculateDependencies(opts *config.KanikoOptions) (map[int][]string, error) if err != nil { return nil, err } + case *instructions.ArgCommand: + k, v, err := commands.ParseArg(cmd.Key, cmd.Value, cfg.Config.Env, ba) + if err != nil { + return nil, err + } + ba.AddArg(k, v) } } images = append(images, image) diff --git a/pkg/executor/build_test.go b/pkg/executor/build_test.go index b0cd34d0f..c48e4bb26 100644 --- a/pkg/executor/build_test.go +++ b/pkg/executor/build_test.go @@ -207,6 +207,23 @@ RUN bar }, want: map[int][]string{}, }, + { + name: "args", + args: args{ + dockerfile: ` +ARG myFile=foo +FROM debian as stage1 +RUN foo +FROM stage1 +ARG myFile +COPY --from=stage1 /tmp/$myFile.txt . +RUN bar +`, + }, + want: map[int][]string{ + 0: {"/tmp/foo.txt"}, + }, + }, { name: "simple deps", args: args{ @@ -300,7 +317,12 @@ COPY --from=stage2 /bar /bat DockerfilePath: f.Name(), } - if got, _ := CalculateDependencies(opts); !reflect.DeepEqual(got, tt.want) { + got, err := CalculateDependencies(opts) + if err != nil { + t.Errorf("got error: %s,", err) + } + + if !reflect.DeepEqual(got, tt.want) { diff := cmp.Diff(got, tt.want) t.Errorf("CalculateDependencies() = %v, want %v, diff %v", got, tt.want, diff) } From dd9d08144780edf4dfcbf2bf4d9b9c21f8afcdd8 Mon Sep 17 00:00:00 2001 From: Dirk Gustke Date: Mon, 15 Apr 2019 22:22:46 +0200 Subject: [PATCH 14/22] this is quite spammy in my multistage build (#640) .. and as i am surely not the only one, move it down to debug. --- pkg/snapshot/snapshot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/snapshot/snapshot.go b/pkg/snapshot/snapshot.go index d87378b62..90638168d 100644 --- a/pkg/snapshot/snapshot.go +++ b/pkg/snapshot/snapshot.go @@ -178,7 +178,7 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) { return nil, nil, err } if fileChanged { - logrus.Infof("Adding %s to layer, because it was changed.", path) + logrus.Debugf("Adding %s to layer, because it was changed.", path) filesToAdd = append(filesToAdd, path) } } From 841cfb3f53951fe24b8da82216ce278947d560f2 Mon Sep 17 00:00:00 2001 From: Johan Hernandez Date: Mon, 15 Apr 2019 15:32:44 -0500 Subject: [PATCH 15/22] Add documentation for --verbosity flag (#634) --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index f6071e551..de087882a 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,7 @@ _If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPME - [--skip-tls-verify-pull](#--skip-tls-verify-pull) - [--target](#--target) - [--tarPath](#--tarpath) + - [--verbosity](#--verbosity) - [Debug Image](#debug-image) - [Security](#security) - [Comparison with Other Tools](#comparison-with-other-tools) @@ -411,6 +412,10 @@ Set this flag to indicate which build stage is the target build stage. Set this flag as `--tarPath=` to save the image as a tarball at path instead of pushing the image. +#### --verbosity + +Set this flag as `--verbosity=` to set the logging level. Defaults to `info`. + ### Debug Image The kaniko executor image is based off of scratch and doesn't contain a shell. From 7901c76127bec751f16afc5ed6ce24d5db3fef1c Mon Sep 17 00:00:00 2001 From: Balint Pato Date: Tue, 16 Apr 2019 15:53:20 -0700 Subject: [PATCH 16/22] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index de087882a..6efe487a6 100644 --- a/README.md +++ b/README.md @@ -358,7 +358,7 @@ _This flag must be used in conjunction with the `--cache=true` flag._ #### --insecure-registry -Set this flag to use plain HTTP requests when accessing a registry. It is supposed to be useed for testing purposes only and should not be used in production! +Set this flag to use plain HTTP requests when accessing a registry. It is supposed to be used for testing purposes only and should not be used in production! You can set it multiple times for multiple registries. #### --skip-tls-verify-registry From 404af20f7c041264282a4ca09b79660da6f9d616 Mon Sep 17 00:00:00 2001 From: Akihiro Suda Date: Fri, 26 Apr 2019 02:41:38 +0900 Subject: [PATCH 17/22] README.md: update BuildKit/img comparison (#642) Latest BuildKit/img no longer necessarily requires procMount to be unmasked, by not unsharing PID namespaces. The current drawback of BuildKit/img compared to kaniko is that BuildKit/img requires seccomp and AppArmor to be disabled so as to create nested containers. https://github.com/moby/buildkit/pull/768 https://github.com/genuinetools/img/pull/221 Signed-off-by: Akihiro Suda --- README.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 6efe487a6..823b0712b 100644 --- a/README.md +++ b/README.md @@ -450,6 +450,7 @@ You may be able to achieve the same default seccomp profile that Docker uses in Similar tools include: +- [BuildKit](https://github.com/moby/buildkit) - [img](https://github.com/genuinetools/img) - [orca-build](https://github.com/cyphar/orca-build) - [umoci](https://github.com/openSUSE/umoci) @@ -459,10 +460,10 @@ Similar tools include: All of these tools build container images with different approaches. -`img` can perform as a non root user from within a container, but requires that -the `img` container has `RawProc` access to create nested containers. `kaniko` -does not actually create nested containers, so it does not require `RawProc` -access. +BuildKit (and `img`) can perform as a non root user from within a container, but requires +seccomp and AppArmor to be disabled to create nested containers. `kaniko` +does not actually create nested containers, so it does not require seccomp and AppArmor +to be disabled. `orca-build` depends on `runc` to build images from Dockerfiles, which can not run inside a container (for similar reasons to `img` above). `kaniko` doesn't From a6e3ddfc7976b399644d076f7d9bcfbe5d1f7fb9 Mon Sep 17 00:00:00 2001 From: Gijs Date: Thu, 2 May 2019 13:34:38 +0200 Subject: [PATCH 18/22] Add `--digestfile` flag to output built digest to file. This flag, when set, takes a file in the container and writes the image digest to it. This can be used to extract the exact digest of the built image by surrounding tooling without having to parse the logs from Kaniko, for example by pointing the file to a mounted volume or to a file used durint exit status, such as with Kubernetes' [Termination message policy](https://kubernetes.io/docs/tasks/debug-application-cluster/determine-reason-pod-failure/)] When the flag is not set, the digest is not written to file and the executor behaves as before. The digest is also written to file in case of a tarball or a `--no-push`. Closes #654 --- README.md | 13 +++++++++++++ cmd/executor/cmd/root.go | 1 + pkg/config/options.go | 1 + pkg/executor/push.go | 14 ++++++++++++++ 4 files changed, 29 insertions(+) diff --git a/README.md b/README.md index 823b0712b..8f16488d4 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,7 @@ _If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPME - [--cache-dir](#--cache-dir) - [--cache-repo](#--cache-repo) - [--cleanup](#--cleanup) + - [--digestfile](#--digestfile) - [--insecure](#--insecure) - [--insecure-pull](#--insecure-pull) - [--no-push](#--no-push) @@ -356,6 +357,18 @@ If `--destination=gcr.io/kaniko-project/test`, then cached layers will be stored _This flag must be used in conjunction with the `--cache=true` flag._ + +#### --digestfile + +Set this flag to specify a file in the container. This file will +receive the digest of a built image. This can be used to +automatically track the exact image built by Kaniko. + +For example, setting the flag to `--digestfile=/dev/termination-log` +will write the digest to that file, which is picked up by +Kubernetes automatically as the `{{.state.terminated.message}}` +of the container. + #### --insecure-registry Set this flag to use plain HTTP requests when accessing a registry. It is supposed to be used for testing purposes only and should not be used in production! diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index e51a03ae3..a3a59511b 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -128,6 +128,7 @@ func addKanikoOptionsFlags(cmd *cobra.Command) { RootCmd.PersistentFlags().BoolVarP(&opts.NoPush, "no-push", "", false, "Do not push the image to the registry") RootCmd.PersistentFlags().StringVarP(&opts.CacheRepo, "cache-repo", "", "", "Specify a repository to use as a cache, otherwise one will be inferred from the destination provided") RootCmd.PersistentFlags().StringVarP(&opts.CacheDir, "cache-dir", "", "/cache", "Specify a local directory to use as a cache.") + RootCmd.PersistentFlags().StringVarP(&opts.DigestFile, "digestfile", "", "", "Specify a file to save the digest of the built image to.") RootCmd.PersistentFlags().BoolVarP(&opts.Cache, "cache", "", false, "Use cache when building image") RootCmd.PersistentFlags().BoolVarP(&opts.Cleanup, "cleanup", "", false, "Clean the filesystem at the end") RootCmd.PersistentFlags().DurationVarP(&opts.CacheTTL, "cache-ttl", "", time.Hour*336, "Cache timeout in hours. Defaults to two weeks.") diff --git a/pkg/config/options.go b/pkg/config/options.go index 9a912f919..ff4d60a13 100644 --- a/pkg/config/options.go +++ b/pkg/config/options.go @@ -30,6 +30,7 @@ type KanikoOptions struct { Target string CacheRepo string CacheDir string + DigestFile string Destinations multiArg BuildArgs multiArg Insecure bool diff --git a/pkg/executor/push.go b/pkg/executor/push.go index 579aabc61..628982c95 100644 --- a/pkg/executor/push.go +++ b/pkg/executor/push.go @@ -19,6 +19,7 @@ package executor import ( "crypto/tls" "fmt" + "io/ioutil" "net/http" "time" @@ -74,6 +75,19 @@ func CheckPushPermissions(opts *config.KanikoOptions) error { // DoPush is responsible for pushing image to the destinations specified in opts func DoPush(image v1.Image, opts *config.KanikoOptions) error { t := timing.Start("Total Push Time") + + if image != nil && opts.DigestFile != "" { + digest, err := image.Digest() + if err != nil { + return errors.Wrap(err, "error fetching digest") + } + digestByteArray := []byte(digest.String()) + err = ioutil.WriteFile(opts.DigestFile, digestByteArray, 0644) + if err != nil { + return errors.Wrap(err, "writing digest to file failed") + } + } + destRefs := []name.Tag{} for _, destination := range opts.Destinations { destRef, err := name.NewTag(destination, name.WeakValidation) From abd21669f8a89549843860cf4206332bdb717e5f Mon Sep 17 00:00:00 2001 From: Jake Shadle Date: Fri, 10 May 2019 16:55:12 +0200 Subject: [PATCH 19/22] Improve changelog dates (#657) Use ISO-8601 date format for all the dates in the changelog. --- CHANGELOG.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 465cfb0e7..94dc5302e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,11 @@ -# v0.9.0 Release - 2/8/2019 +# v0.9.0 Release - 2019-02-08 ## Bug Fixes * Bug fix with volumes declared in base images during multi-stage builds * Bug fix during snapshotting multi-stage builds. * Bug fix for caching with tar output. -# v0.8.0 Release - 1/29/2019 +# v0.8.0 Release - 2019-01-29 ## New Features * Even faster snapshotting with godirwalk @@ -20,7 +20,7 @@ * Fix bug with USER command and unpacking base images. * Added COPY --from=previous stage name/number validation -# v0.7.0 Release - 12/10/2018 +# v0.7.0 Release - 2018-12-10 ## New Features * Add support for COPY --from an unrelated image @@ -34,7 +34,7 @@ * Fix bug with call loop * Fix caching for multi-step builds -# v0.6.0 Release - 11/06/2018 +# v0.6.0 Release - 2018-11-06 ## New Features * parse arg commands at the top of dockerfiles [#404](https://github.com/GoogleContainerTools/kaniko/pull/404) @@ -59,7 +59,7 @@ * fix releasing the cache warmer [#418](https://github.com/GoogleContainerTools/kaniko/pull/418) -# v0.5.0 Release - 10/16/2018 +# v0.5.0 Release - 2018-10-16 ## New Features * Persistent volume caching for base images [#383](https://github.com/GoogleContainerTools/kaniko/pull/383) @@ -78,7 +78,7 @@ * Don't cut everything after an equals sign [#381](https://github.com/GoogleContainerTools/kaniko/pull/381) -# v0.4.0 Release - 10/01/2018 +# v0.4.0 Release - 2018-10-01 ## New Features * Add a benchmark package to store and monitor timings. [#367](https://github.com/GoogleContainerTools/kaniko/pull/367) @@ -137,7 +137,7 @@ * Fix handling of the volume directive [#334](https://github.com/GoogleContainerTools/kaniko/pull/334) -# v0.3.0 Release - 7/31/2018 +# v0.3.0 Release - 2018-07-31 New Features * Local integration testing [#256](https://github.com/GoogleContainerTools/kaniko/pull/256) * Add --target flag for multistage builds [#255](https://github.com/GoogleContainerTools/kaniko/pull/255) @@ -149,7 +149,7 @@ Bug Fixes * Multi-stage errors when referencing earlier stages [#233](https://github.com/GoogleContainerTools/kaniko/issues/233) -# v0.2.0 Release - 7/09/2018 +# v0.2.0 Release - 2018-07-09 New Features * Support for adding different source contexts, including Amazon S3 [#195](https://github.com/GoogleContainerTools/kaniko/issues/195) @@ -158,7 +158,7 @@ New Features * Update go-containerregistry so kaniko works better with Harbor and Gitlab[#227](https://github.com/GoogleContainerTools/kaniko/pull/227) * Push image to multiple destinations [#184](https://github.com/GoogleContainerTools/kaniko/pull/184) -# v0.1.0 Release - 5/17/2018 +# v0.1.0 Release - 2018-05-17 New Features * The majority of Dockerfile commands are feature complete [#1](https://github.com/GoogleContainerTools/kaniko/issues/1) From 8c732f6f52bb334727194d090826e8dbd1df3793 Mon Sep 17 00:00:00 2001 From: Johannes 'fish' Ziemke Date: Fri, 10 May 2019 15:57:04 +0100 Subject: [PATCH 20/22] Fix kaniko caching (#639) * Revert "Change cache key calculation to be more reproducible. (#525)" This reverts commit 1ffae47fdd91d38006c1d28e14e66869742cbdba. * Add logging of composition key back * Do not include build args in cache key This should be save, given that the commands will have the args included when the cache key gets built. --- integration/dockerfiles/Dockerfile_test_cache | 14 ++------- pkg/executor/build.go | 7 +---- pkg/util/fs_util.go | 20 +++---------- pkg/util/tar_util.go | 1 - pkg/util/util.go | 29 ------------------- 5 files changed, 8 insertions(+), 63 deletions(-) diff --git a/integration/dockerfiles/Dockerfile_test_cache b/integration/dockerfiles/Dockerfile_test_cache index 71644d560..e3ebe304d 100644 --- a/integration/dockerfiles/Dockerfile_test_cache +++ b/integration/dockerfiles/Dockerfile_test_cache @@ -16,15 +16,7 @@ # If the image is built twice, /date should be the same in both images # if the cache is implemented correctly -FROM alpine as base_stage - -RUN mkdir foo && echo base_stage > foo/base - -FROM base_stage as cached_stage - -RUN echo cached_stage > foo/cache - -FROM cached_stage as bug_stage - -RUN echo bug_stage > foo/bug +FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0 RUN date > /date +COPY context/foo /foo +RUN echo hey diff --git a/pkg/executor/build.go b/pkg/executor/build.go index 2fbf592ce..c29de8ca1 100644 --- a/pkg/executor/build.go +++ b/pkg/executor/build.go @@ -190,12 +190,7 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro func (s *stageBuilder) build() error { // Set the initial cache key to be the base image digest, the build args and the SrcContext. - dgst, err := util.ReproducibleDigest(s.image) - if err != nil { - return err - } - compositeKey := NewCompositeCache(dgst) - compositeKey.AddKey(s.opts.BuildArgs...) + compositeKey := NewCompositeCache(s.baseImageDigest) // Apply optimizations to the instructions. if err := s.optimize(*compositeKey, s.cf.Config); err != nil { diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index 5ef48845f..5f72c7784 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -79,10 +79,7 @@ func GetFSFromImage(root string, img v1.Image) ([]string, error) { if err != nil { return nil, err } - - // Store a map of files to their mtime. We need to set mtimes in a second pass because creating files - // can change the mtime of a directory. - extractedFiles := map[string]time.Time{} + extractedFiles := []string{} for i, l := range layers { logrus.Debugf("Extracting layer %d", i) @@ -113,17 +110,10 @@ func GetFSFromImage(root string, img v1.Image) ([]string, error) { if err := extractFile(root, hdr, tr); err != nil { return nil, err } - extractedFiles[filepath.Join(root, filepath.Clean(hdr.Name))] = hdr.ModTime + extractedFiles = append(extractedFiles, filepath.Join(root, filepath.Clean(hdr.Name))) } } - - fileNames := []string{} - for f, t := range extractedFiles { - fileNames = append(fileNames, f) - os.Chtimes(f, time.Time{}, t) - } - - return fileNames, nil + return extractedFiles, nil } // DeleteFilesystem deletes the extracted image file system @@ -272,7 +262,6 @@ func extractFile(dest string, hdr *tar.Header, tr io.Reader) error { return err } } - return nil } @@ -377,8 +366,7 @@ func RelativeFiles(fp string, root string) ([]string, error) { } // ParentDirectories returns a list of paths to all parent directories -// Ex. /some/temp/dir -> [/some, /some/temp, /some/temp/dir] -// This purposefully excludes the /. +// Ex. /some/temp/dir -> [/, /some, /some/temp, /some/temp/dir] func ParentDirectories(path string) []string { path = filepath.Clean(path) dirs := strings.Split(path, "/") diff --git a/pkg/util/tar_util.go b/pkg/util/tar_util.go index b72785707..bc1cc67a0 100644 --- a/pkg/util/tar_util.go +++ b/pkg/util/tar_util.go @@ -54,7 +54,6 @@ func (t *Tar) Close() { // AddFileToTar adds the file at path p to the tar func (t *Tar) AddFileToTar(p string) error { - logrus.Debugf("Adding file %s to tar", p) i, err := os.Lstat(p) if err != nil { return fmt.Errorf("Failed to get file info for %s: %s", p, err) diff --git a/pkg/util/util.go b/pkg/util/util.go index e7344c88f..873cbae20 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -20,14 +20,11 @@ import ( "crypto/md5" "crypto/sha256" "encoding/hex" - "encoding/json" "io" "os" "strconv" "syscall" - "github.com/google/go-containerregistry/pkg/v1" - "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -130,29 +127,3 @@ func SHA256(r io.Reader) (string, error) { } return hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), nil } - -type ReproducibleManifest struct { - Layers []v1.Descriptor - Config v1.Config -} - -func ReproducibleDigest(img partial.WithManifestAndConfigFile) (string, error) { - mfst, err := img.Manifest() - if err != nil { - return "", err - } - cfg, err := img.ConfigFile() - if err != nil { - return "", err - } - rm := ReproducibleManifest{ - Layers: mfst.Layers, - Config: cfg.Config, - } - - b, err := json.Marshal(rm) - if err != nil { - return "", err - } - return string(b), nil -} From 3686b654260618d19b621453471cdfce5b9e8f66 Mon Sep 17 00:00:00 2001 From: Gijs Date: Sat, 11 May 2019 15:14:25 +0200 Subject: [PATCH 21/22] Process feedback of priyawadhwa regarding naming/nilcheck. Changes the argument flag from `--digestfile` to `--digest-file`. Skips an unneeded nil check. --- cmd/executor/cmd/root.go | 2 +- pkg/executor/push.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index a3a59511b..1fc2672bc 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -128,7 +128,7 @@ func addKanikoOptionsFlags(cmd *cobra.Command) { RootCmd.PersistentFlags().BoolVarP(&opts.NoPush, "no-push", "", false, "Do not push the image to the registry") RootCmd.PersistentFlags().StringVarP(&opts.CacheRepo, "cache-repo", "", "", "Specify a repository to use as a cache, otherwise one will be inferred from the destination provided") RootCmd.PersistentFlags().StringVarP(&opts.CacheDir, "cache-dir", "", "/cache", "Specify a local directory to use as a cache.") - RootCmd.PersistentFlags().StringVarP(&opts.DigestFile, "digestfile", "", "", "Specify a file to save the digest of the built image to.") + RootCmd.PersistentFlags().StringVarP(&opts.DigestFile, "digest-file", "", "", "Specify a file to save the digest of the built image to.") RootCmd.PersistentFlags().BoolVarP(&opts.Cache, "cache", "", false, "Use cache when building image") RootCmd.PersistentFlags().BoolVarP(&opts.Cleanup, "cleanup", "", false, "Clean the filesystem at the end") RootCmd.PersistentFlags().DurationVarP(&opts.CacheTTL, "cache-ttl", "", time.Hour*336, "Cache timeout in hours. Defaults to two weeks.") diff --git a/pkg/executor/push.go b/pkg/executor/push.go index 628982c95..a02d5b9b9 100644 --- a/pkg/executor/push.go +++ b/pkg/executor/push.go @@ -76,7 +76,7 @@ func CheckPushPermissions(opts *config.KanikoOptions) error { func DoPush(image v1.Image, opts *config.KanikoOptions) error { t := timing.Start("Total Push Time") - if image != nil && opts.DigestFile != "" { + if opts.DigestFile != "" { digest, err := image.Digest() if err != nil { return errors.Wrap(err, "error fetching digest") From 1c13829cdaa24cddd59dde775a279576cc8212fd Mon Sep 17 00:00:00 2001 From: Gijs Date: Tue, 14 May 2019 10:23:50 +0200 Subject: [PATCH 22/22] Update README to reflect flag name change --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 8f16488d4..ed7d23255 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ _If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPME - [--cache-dir](#--cache-dir) - [--cache-repo](#--cache-repo) - [--cleanup](#--cleanup) - - [--digestfile](#--digestfile) + - [--digest-file](#--digest-file) - [--insecure](#--insecure) - [--insecure-pull](#--insecure-pull) - [--no-push](#--no-push) @@ -358,13 +358,13 @@ If `--destination=gcr.io/kaniko-project/test`, then cached layers will be stored _This flag must be used in conjunction with the `--cache=true` flag._ -#### --digestfile +#### --digest-file Set this flag to specify a file in the container. This file will receive the digest of a built image. This can be used to automatically track the exact image built by Kaniko. -For example, setting the flag to `--digestfile=/dev/termination-log` +For example, setting the flag to `--digest-file=/dev/termination-log` will write the digest to that file, which is picked up by Kubernetes automatically as the `{{.state.terminated.message}}` of the container.