From 13accbaf3243a5cf477c9e3d5116dc09dd80ae37 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 4 Sep 2018 13:37:15 -0700 Subject: [PATCH 1/7] Add Key() to LayeredMap and Snapshotter This will return a string representaiton of the current filesystem to be used with caching. Whenever a file is explictly added (via ADD or COPY), it will be stored in "added" in the LayeredMap. The file will map to a hash created by CacheHasher (which doesn't take into account mtime, since that will be different with every build, making the cache useless) Key() will returns a sha of the added files which will be used in determining the overall cache key for a command. --- pkg/executor/build.go | 2 +- pkg/snapshot/layered_map.go | 36 ++++++++++++--- pkg/snapshot/layered_map_test.go | 78 ++++++++++++++++++++++++++++++++ pkg/snapshot/snapshot.go | 8 +++- pkg/snapshot/snapshot_test.go | 2 +- pkg/util/util.go | 38 ++++++++++++++++ 6 files changed, 155 insertions(+), 9 deletions(-) create mode 100644 pkg/snapshot/layered_map_test.go diff --git a/pkg/executor/build.go b/pkg/executor/build.go index 22807d6a7..f6ac89f46 100644 --- a/pkg/executor/build.go +++ b/pkg/executor/build.go @@ -62,7 +62,7 @@ func DoBuild(opts *options.KanikoOptions) (v1.Image, error) { if err := util.GetFSFromImage(constants.RootDir, sourceImage); err != nil { return nil, err } - l := snapshot.NewLayeredMap(hasher) + l := snapshot.NewLayeredMap(hasher, util.CacheHasher()) snapshotter := snapshot.NewSnapshotter(l, constants.RootDir) // Take initial snapshot if err := snapshotter.Init(); err != nil { diff --git a/pkg/snapshot/layered_map.go b/pkg/snapshot/layered_map.go index 0d382d766..c922bbafc 100644 --- a/pkg/snapshot/layered_map.go +++ b/pkg/snapshot/layered_map.go @@ -17,20 +17,27 @@ limitations under the License. package snapshot import ( + "bytes" + "encoding/json" "fmt" "path/filepath" "strings" + + "github.com/GoogleContainerTools/kaniko/pkg/util" ) type LayeredMap struct { - layers []map[string]string - whiteouts []map[string]string - hasher func(string) (string, error) + layers []map[string]string + whiteouts []map[string]string + added []map[string]string + hasher func(string) (string, error) + cacheHasher func(string) (string, error) } -func NewLayeredMap(h func(string) (string, error)) *LayeredMap { +func NewLayeredMap(h func(string) (string, error), c func(string) (string, error)) *LayeredMap { l := LayeredMap{ - hasher: h, + hasher: h, + cacheHasher: c, } l.layers = []map[string]string{} return &l @@ -39,8 +46,18 @@ func NewLayeredMap(h func(string) (string, error)) *LayeredMap { func (l *LayeredMap) Snapshot() { l.whiteouts = append(l.whiteouts, map[string]string{}) l.layers = append(l.layers, map[string]string{}) + l.added = append(l.added, map[string]string{}) } +// Key returns a hash for added files +func (l *LayeredMap) Key() (string, error) { + c := bytes.NewBuffer([]byte{}) + enc := json.NewEncoder(c) + enc.Encode(l.added) + return util.SHA256(c) +} + +// GetFlattenedPathsForWhiteOut returns all paths in the current FS func (l *LayeredMap) GetFlattenedPathsForWhiteOut() map[string]struct{} { paths := map[string]struct{}{} for _, l := range l.layers { @@ -85,11 +102,18 @@ func (l *LayeredMap) MaybeAddWhiteout(s string) (bool, error) { // Add will add the specified file s to the layered map. func (l *LayeredMap) Add(s string) error { + // Use hash function and add to layers newV, err := l.hasher(s) if err != nil { - return fmt.Errorf("Error creating hash for %s: %s", s, err) + return fmt.Errorf("Error creating hash for %s: %v", s, err) } l.layers[len(l.layers)-1][s] = newV + // Use cache hash function and add to added + cacheV, err := l.cacheHasher(s) + if err != nil { + return fmt.Errorf("Error creating cache hash for %s: %v", s, err) + } + l.added[len(l.added)-1][s] = cacheV return nil } diff --git a/pkg/snapshot/layered_map_test.go b/pkg/snapshot/layered_map_test.go new file mode 100644 index 000000000..e5ea64f02 --- /dev/null +++ b/pkg/snapshot/layered_map_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package snapshot + +import ( + "testing" +) + +func Test_CacheKey(t *testing.T) { + tests := []struct { + name string + map1 map[string]string + map2 map[string]string + equal bool + }{ + { + name: "maps are the same", + map1: map[string]string{ + "a": "apple", + "b": "bat", + "c": "cat", + }, + map2: map[string]string{ + "c": "cat", + "b": "bat", + "a": "apple", + }, + equal: true, + }, + { + name: "maps are different", + map1: map[string]string{ + "a": "apple", + "b": "bat", + "c": "cat", + }, + map2: map[string]string{ + "c": "", + "b": "bat", + "a": "apple", + }, + equal: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + lm1 := LayeredMap{added: []map[string]string{test.map1}} + lm2 := LayeredMap{added: []map[string]string{test.map2}} + k1, err := lm1.Key() + if err != nil { + t.Fatalf("error getting key for map 1: %v", err) + } + k2, err := lm2.Key() + if err != nil { + t.Fatalf("error getting key for map 2: %v", err) + } + if test.equal && k1 != k2 { + t.Fatalf("keys differ.\nExpected\n%+v\nActual\n%+v", k1, k2) + } + if !test.equal && k1 == k2 { + t.Fatal("keys are the same, expected different keys") + } + }) + } +} diff --git a/pkg/snapshot/snapshot.go b/pkg/snapshot/snapshot.go index 4f01441dc..55da45d65 100644 --- a/pkg/snapshot/snapshot.go +++ b/pkg/snapshot/snapshot.go @@ -49,6 +49,11 @@ func (s *Snapshotter) Init() error { return nil } +// Key returns a string based on the current state of the file system +func (s *Snapshotter) Key() (string, error) { + return s.l.Key() +} + // TakeSnapshot takes a snapshot of the specified files, avoiding directories in the whitelist, and creates // a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed func (s *Snapshotter) TakeSnapshot(files []string) ([]byte, error) { @@ -102,7 +107,8 @@ func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) { logrus.Info("No files changed in this command, skipping snapshotting.") return false, nil } - logrus.Infof("Taking snapshot of files %v...", files) + logrus.Info("Taking snapshot of files...") + logrus.Debugf("Taking snapshot of files %v", files) snapshottedFiles := make(map[string]bool) filesAdded := false diff --git a/pkg/snapshot/snapshot_test.go b/pkg/snapshot/snapshot_test.go index 72b6a750a..95daa88f6 100644 --- a/pkg/snapshot/snapshot_test.go +++ b/pkg/snapshot/snapshot_test.go @@ -198,7 +198,7 @@ func setUpTestDir() (string, *Snapshotter, error) { } // Take the initial snapshot - l := NewLayeredMap(util.Hasher()) + l := NewLayeredMap(util.Hasher(), util.CacheHasher()) snapshotter := NewSnapshotter(l, testDir) if err := snapshotter.Init(); err != nil { return testDir, nil, errors.Wrap(err, "initializing snapshotter") diff --git a/pkg/util/util.go b/pkg/util/util.go index 617298a7b..bc09a7c27 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -18,6 +18,7 @@ package util import ( "crypto/md5" + "crypto/sha256" "encoding/hex" "io" "os" @@ -72,6 +73,36 @@ func Hasher() func(string) (string, error) { return hasher } +// CacheHasher takes into account everything the regular hasher does except for mtime +func CacheHasher() func(string) (string, error) { + hasher := func(p string) (string, error) { + h := md5.New() + fi, err := os.Lstat(p) + if err != nil { + return "", err + } + h.Write([]byte(fi.Mode().String())) + + h.Write([]byte(strconv.FormatUint(uint64(fi.Sys().(*syscall.Stat_t).Uid), 36))) + h.Write([]byte(",")) + h.Write([]byte(strconv.FormatUint(uint64(fi.Sys().(*syscall.Stat_t).Gid), 36))) + + if fi.Mode().IsRegular() { + f, err := os.Open(p) + if err != nil { + return "", err + } + defer f.Close() + if _, err := io.Copy(h, f); err != nil { + return "", err + } + } + + return hex.EncodeToString(h.Sum(nil)), nil + } + return hasher +} + // MtimeHasher returns a hash function, which only looks at mtime to determine if a file has changed. // Note that the mtime can lag, so it's possible that a file will have changed but the mtime may look the same. func MtimeHasher() func(string) (string, error) { @@ -86,3 +117,10 @@ func MtimeHasher() func(string) (string, error) { } return hasher } + +// SHA256 returns the shasum of the contents of r +func SHA256(r io.Reader) (string, error) { + hasher := sha256.New() + _, err := io.Copy(hasher, r) + return hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), err +} From 80a449f5419cf1d83e2d1d5cc1a17f78374af53d Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Fri, 7 Sep 2018 16:03:56 -0700 Subject: [PATCH 2/7] code review comments --- pkg/snapshot/layered_map.go | 9 +++++---- pkg/snapshot/layered_map_test.go | 11 ++++++----- pkg/util/util.go | 5 ++++- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/pkg/snapshot/layered_map.go b/pkg/snapshot/layered_map.go index c922bbafc..9a356b0e2 100644 --- a/pkg/snapshot/layered_map.go +++ b/pkg/snapshot/layered_map.go @@ -27,10 +27,11 @@ import ( ) type LayeredMap struct { - layers []map[string]string - whiteouts []map[string]string - added []map[string]string - hasher func(string) (string, error) + layers []map[string]string + whiteouts []map[string]string + added []map[string]string + hasher func(string) (string, error) + // cacheHasher doesn't include mtime in it's hash so that filesystem cache keys are stable cacheHasher func(string) (string, error) } diff --git a/pkg/snapshot/layered_map_test.go b/pkg/snapshot/layered_map_test.go index e5ea64f02..6bfff81b3 100644 --- a/pkg/snapshot/layered_map_test.go +++ b/pkg/snapshot/layered_map_test.go @@ -32,11 +32,15 @@ func Test_CacheKey(t *testing.T) { "a": "apple", "b": "bat", "c": "cat", + "d": "dog", + "e": "egg", }, map2: map[string]string{ "c": "cat", + "d": "dog", "b": "bat", "a": "apple", + "e": "egg", }, equal: true, }, @@ -67,11 +71,8 @@ func Test_CacheKey(t *testing.T) { if err != nil { t.Fatalf("error getting key for map 2: %v", err) } - if test.equal && k1 != k2 { - t.Fatalf("keys differ.\nExpected\n%+v\nActual\n%+v", k1, k2) - } - if !test.equal && k1 == k2 { - t.Fatal("keys are the same, expected different keys") + if test.equal != (k1 == k2) { + t.Fatalf("unexpected result: \nExpected\n%s\nActual\n%s\n", k1, k2) } }) } diff --git a/pkg/util/util.go b/pkg/util/util.go index bc09a7c27..873cbae20 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -122,5 +122,8 @@ func MtimeHasher() func(string) (string, error) { func SHA256(r io.Reader) (string, error) { hasher := sha256.New() _, err := io.Copy(hasher, r) - return hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), err + if err != nil { + return "", err + } + return hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), nil } From 63cecbff74d2446d8d76e1e798059362d5169cb2 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 10 Sep 2018 17:06:09 -0700 Subject: [PATCH 3/7] Whitelist /etc/mtab While looking into #345, we were seeing the error: Error: error building image: chmod /etc/mtab: operation not permitted during extraction of `amazonlinux:1`. I looked into why kaniko couldn't extract this file properly, and found that it already existed as a symlink pointing to /proc/mounts, which returned an error when we tried to run chmod on it. Confusingly, in the image the /etc/mtab is a regular file, not a symlink. I can think of two ways to solve this problem: 1. Whitelist /etc/mtab so that whatever already exists in the system is used 2. Check if a regular file already exists, and hasn't been extracted yet, before extracting I went with option 1 because for option 2 we'd have to keep a list of all files that had been extracted in memory. --- pkg/util/fs_util.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index 4c6dd9fff..af5b6df42 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -40,6 +40,9 @@ var whitelist = []string{ // which leads to a special mount on the /var/run/docker.sock file itself, but the directory to exist // in the image with no way to tell if it came from the base image or not. "/var/run", + // similarly, we whitelist /etc/mtab, since there is no way to know if the file was mounted or came + // from the base image + "/etc/mtab", } var volumeWhitelist = []string{} @@ -195,7 +198,6 @@ func extractFile(dest string, hdr *tar.Header, tr io.Reader) error { return err } currFile.Close() - case tar.TypeDir: logrus.Debugf("creating dir %s", path) if err := os.MkdirAll(path, mode); err != nil { From c13f6e84eda3da92d5ffca67f44412d68614ed10 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Mon, 10 Sep 2018 18:20:00 -0700 Subject: [PATCH 4/7] Fixed unit test --- pkg/util/fs_util_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index 5bfb1a403..32df3c406 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -50,7 +50,7 @@ func Test_fileSystemWhitelist(t *testing.T) { } actualWhitelist, err := fileSystemWhitelist(path) - expectedWhitelist := []string{"/kaniko", "/proc", "/dev", "/dev/pts", "/sys", "/var/run"} + expectedWhitelist := []string{"/kaniko", "/proc", "/dev", "/dev/pts", "/sys", "/var/run", "/etc/mtab"} sort.Strings(actualWhitelist) sort.Strings(expectedWhitelist) testutil.CheckErrorAndDeepEqual(t, false, err, expectedWhitelist, actualWhitelist) From 99ab68e7f4dd49b0ea5d89ea09c93f2ade370ef7 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 11 Sep 2018 10:31:20 -0700 Subject: [PATCH 5/7] Replace gometalinter with GolangCI-Lint gometalinter is broken @ HEAD, and I looked into why that was. During that process, I remembered that we took the linting scripts from skaffold, and found that in skaffold gometalinter was replaced with GolangCI-Lint: https://github.com/GoogleContainerTools/skaffold/pull/619 The change made linting in skaffold faster, so I figured instead of fixing gometalinter it made more sense to remove it and replace it with GolangCI-Lint for kaniko as well. --- cmd/executor/cmd/root.go | 4 +- hack/boilerplate/boilerplate.py | 29 +-- hack/gometalinter.json | 17 -- hack/install_golint.sh | 388 ++++++++++++++++++++++++++++ hack/{gometalinter.sh => linter.sh} | 30 ++- integration/integration_test.go | 15 +- pkg/buildcontext/buildcontext.go | 3 +- pkg/buildcontext/s3.go | 5 +- pkg/commands/user_test.go | 24 +- pkg/config/options.go | 10 +- pkg/config/stage.go | 2 +- pkg/dockerfile/buildargs.go | 7 +- pkg/executor/push.go | 2 +- pkg/util/bucket_util.go | 3 +- pkg/util/command_util_test.go | 14 +- pkg/util/fs_util_test.go | 4 +- pkg/util/image_util.go | 3 +- pkg/util/tar_util.go | 4 +- test.sh | 2 +- 19 files changed, 466 insertions(+), 100 deletions(-) delete mode 100644 hack/gometalinter.json create mode 100755 hack/install_golint.sh rename hack/{gometalinter.sh => linter.sh} (61%) diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index 841fce661..97511e092 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -86,7 +86,7 @@ func addKanikoOptionsFlags(cmd *cobra.Command) { RootCmd.PersistentFlags().StringVarP(&opts.SnapshotMode, "snapshotMode", "", "full", "Change the file attributes inspected during snapshotting") RootCmd.PersistentFlags().VarP(&opts.BuildArgs, "build-arg", "", "This flag allows you to pass in ARG values at build time. Set it repeatedly for multiple values.") RootCmd.PersistentFlags().BoolVarP(&opts.InsecurePush, "insecure", "", false, "Push to insecure registry using plain HTTP") - RootCmd.PersistentFlags().BoolVarP(&opts.SkipTlsVerify, "skip-tls-verify", "", false, "Push to insecure registry ignoring TLS verify") + RootCmd.PersistentFlags().BoolVarP(&opts.SkipTLSVerify, "skip-tls-verify", "", false, "Push to insecure registry ignoring TLS verify") RootCmd.PersistentFlags().StringVarP(&opts.TarPath, "tarPath", "", "", "Path to save the image in as a tarball instead of pushing") RootCmd.PersistentFlags().BoolVarP(&opts.SingleSnapshot, "single-snapshot", "", false, "Take a single snapshot at the end of the build.") RootCmd.PersistentFlags().BoolVarP(&opts.Reproducible, "reproducible", "", false, "Strip timestamps out of the image to make it reproducible") @@ -145,7 +145,7 @@ func resolveSourceContext() error { opts.SrcContext = opts.Bucket } } - // if no prefix use Google Cloud Storage as default for backwards compability + // if no prefix use Google Cloud Storage as default for backwards compatibility contextExecutor, err := buildcontext.GetBuildContext(opts.SrcContext) if err != nil { return err diff --git a/hack/boilerplate/boilerplate.py b/hack/boilerplate/boilerplate.py index bcc4b1c8f..83e6b1b3e 100644 --- a/hack/boilerplate/boilerplate.py +++ b/hack/boilerplate/boilerplate.py @@ -18,12 +18,14 @@ from __future__ import print_function import argparse import glob -import json -import mmap import os import re import sys + +SKIPPED_DIRS = ["Godeps", "third_party", ".git", "vendor", "examples", "testdata"] +SKIPPED_FILES = ["install_golint.sh"] + parser = argparse.ArgumentParser() parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*') @@ -71,7 +73,7 @@ def file_passes(filename, refs, regexs): (data, found) = p.subn("", data, 1) # remove shebang from the top of shell files - if extension == "sh": + elif extension == "sh": p = regexs["shebang"] (data, found) = p.subn("", data, 1) @@ -105,17 +107,11 @@ def file_passes(filename, refs, regexs): def file_extension(filename): return os.path.splitext(filename)[1].split(".")[-1].lower() -skipped_dirs = ['Godeps', 'third_party', '.git', "vendor", "differs/testDirs/pipTests"] - def normalize_files(files): newfiles = [] - for pathname in files: - if any(x in pathname for x in skipped_dirs): - continue - newfiles.append(pathname) - for i, pathname in enumerate(newfiles): + for i, pathname in enumerate(files): if not os.path.isabs(pathname): - newfiles[i] = os.path.join(args.rootdir, pathname) + newfiles.append(os.path.join(args.rootdir, pathname)) return newfiles def get_files(extensions): @@ -124,17 +120,14 @@ def get_files(extensions): files = args.filenames else: for root, dirs, walkfiles in os.walk(args.rootdir): - # don't visit certain dirs. This is just a performance improvement - # as we would prune these later in normalize_files(). But doing it - # cuts down the amount of filesystem walking we do and cuts down - # the size of the file list - for d in skipped_dirs: + for d in SKIPPED_DIRS: if d in dirs: dirs.remove(d) for name in walkfiles: - pathname = os.path.join(root, name) - files.append(pathname) + if name not in SKIPPED_FILES: + pathname = os.path.join(root, name) + files.append(pathname) files = normalize_files(files) outfiles = [] diff --git a/hack/gometalinter.json b/hack/gometalinter.json deleted file mode 100644 index 857b558e1..000000000 --- a/hack/gometalinter.json +++ /dev/null @@ -1,17 +0,0 @@ - -{ - "Vendor": true, - "EnableGC": true, - "Debug": false, - "Sort": ["linter", "severity", "path"], - "Enable": [ - "deadcode", - "gofmt", - "golint", - "gosimple", - "ineffassign", - "vet" - ], - - "LineLength": 200 -} diff --git a/hack/install_golint.sh b/hack/install_golint.sh new file mode 100755 index 000000000..6010f3b27 --- /dev/null +++ b/hack/install_golint.sh @@ -0,0 +1,388 @@ +#!/bin/sh +set -e +# Code generated by godownloader on 2018-06-05T12:04:55Z. DO NOT EDIT. +# + +usage() { + this=$1 + cat </dev/null +} +echoerr() { + echo "$@" 1>&2 +} +log_prefix() { + echo "$0" +} +_logp=6 +log_set_priority() { + _logp="$1" +} +log_priority() { + if test -z "$1"; then + echo "$_logp" + return + fi + [ "$1" -le "$_logp" ] +} +log_tag() { + case $1 in + 0) echo "emerg" ;; + 1) echo "alert" ;; + 2) echo "crit" ;; + 3) echo "err" ;; + 4) echo "warning" ;; + 5) echo "notice" ;; + 6) echo "info" ;; + 7) echo "debug" ;; + *) echo "$1" ;; + esac +} +log_debug() { + log_priority 7 || return 0 + echoerr "$(log_prefix)" "$(log_tag 7)" "$@" +} +log_info() { + log_priority 6 || return 0 + echoerr "$(log_prefix)" "$(log_tag 6)" "$@" +} +log_err() { + log_priority 3 || return 0 + echoerr "$(log_prefix)" "$(log_tag 3)" "$@" +} +log_crit() { + log_priority 2 || return 0 + echoerr "$(log_prefix)" "$(log_tag 2)" "$@" +} +uname_os() { + os=$(uname -s | tr '[:upper:]' '[:lower:]') + case "$os" in + msys_nt) os="windows" ;; + esac + echo "$os" +} +uname_arch() { + arch=$(uname -m) + case $arch in + x86_64) arch="amd64" ;; + x86) arch="386" ;; + i686) arch="386" ;; + i386) arch="386" ;; + aarch64) arch="arm64" ;; + armv5*) arch="armv5" ;; + armv6*) arch="armv6" ;; + armv7*) arch="armv7" ;; + esac + echo ${arch} +} +uname_os_check() { + os=$(uname_os) + case "$os" in + darwin) return 0 ;; + dragonfly) return 0 ;; + freebsd) return 0 ;; + linux) return 0 ;; + android) return 0 ;; + nacl) return 0 ;; + netbsd) return 0 ;; + openbsd) return 0 ;; + plan9) return 0 ;; + solaris) return 0 ;; + windows) return 0 ;; + esac + log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib" + return 1 +} +uname_arch_check() { + arch=$(uname_arch) + case "$arch" in + 386) return 0 ;; + amd64) return 0 ;; + arm64) return 0 ;; + armv5) return 0 ;; + armv6) return 0 ;; + armv7) return 0 ;; + ppc64) return 0 ;; + ppc64le) return 0 ;; + mips) return 0 ;; + mipsle) return 0 ;; + mips64) return 0 ;; + mips64le) return 0 ;; + s390x) return 0 ;; + amd64p32) return 0 ;; + esac + log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib" + return 1 +} +untar() { + tarball=$1 + case "${tarball}" in + *.tar.gz | *.tgz) tar -xzf "${tarball}" ;; + *.tar) tar -xf "${tarball}" ;; + *.zip) unzip "${tarball}" ;; + *) + log_err "untar unknown archive format for ${tarball}" + return 1 + ;; + esac +} +mktmpdir() { + test -z "$TMPDIR" && TMPDIR="$(mktemp -d)" + mkdir -p "${TMPDIR}" + echo "${TMPDIR}" +} +http_download_curl() { + local_file=$1 + source_url=$2 + header=$3 + if [ -z "$header" ]; then + code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url") + else + code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url") + fi + if [ "$code" != "200" ]; then + log_debug "http_download_curl received HTTP status $code" + return 1 + fi + return 0 +} +http_download_wget() { + local_file=$1 + source_url=$2 + header=$3 + if [ -z "$header" ]; then + wget -q -O "$local_file" "$source_url" + else + wget -q --header "$header" -O "$local_file" "$source_url" + fi +} +http_download() { + log_debug "http_download $2" + if is_command curl; then + http_download_curl "$@" + return + elif is_command wget; then + http_download_wget "$@" + return + fi + log_crit "http_download unable to find wget or curl" + return 1 +} +http_copy() { + tmp=$(mktemp) + http_download "${tmp}" "$1" "$2" || return 1 + body=$(cat "$tmp") + rm -f "${tmp}" + echo "$body" +} +github_release() { + owner_repo=$1 + version=$2 + test -z "$version" && version="latest" + giturl="https://github.com/${owner_repo}/releases/${version}" + json=$(http_copy "$giturl" "Accept:application/json") + test -z "$json" && return 1 + version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//') + test -z "$version" && return 1 + echo "$version" +} +hash_sha256() { + TARGET=${1:-/dev/stdin} + if is_command gsha256sum; then + hash=$(gsha256sum "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command sha256sum; then + hash=$(sha256sum "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command shasum; then + hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1 + echo "$hash" | cut -d ' ' -f 1 + elif is_command openssl; then + hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1 + echo "$hash" | cut -d ' ' -f a + else + log_crit "hash_sha256 unable to find command to compute sha-256 hash" + return 1 + fi +} +hash_sha256_verify() { + TARGET=$1 + checksums=$2 + if [ -z "$checksums" ]; then + log_err "hash_sha256_verify checksum file not specified in arg2" + return 1 + fi + BASENAME=${TARGET##*/} + want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1) + if [ -z "$want" ]; then + log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'" + return 1 + fi + got=$(hash_sha256 "$TARGET") + if [ "$want" != "$got" ]; then + log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got" + return 1 + fi +} +cat /dev/null < 1 { @@ -135,7 +135,7 @@ func (t *Tar) checkHardlink(p string, i os.FileInfo) (bool, string) { return hardlink, linkDst } -func getSyscallStat_t(i os.FileInfo) *syscall.Stat_t { +func getSyscallStatT(i os.FileInfo) *syscall.Stat_t { if sys := i.Sys(); sys != nil { if stat, ok := sys.(*syscall.Stat_t); ok { return stat diff --git a/test.sh b/test.sh index c332c8f29..c86b974cd 100755 --- a/test.sh +++ b/test.sh @@ -31,7 +31,7 @@ echo "Running validation scripts..." scripts=( "hack/boilerplate.sh" "hack/gofmt.sh" - "hack/gometalinter.sh" + "hack/linter.sh" "hack/dep.sh" ) fail=0 From ccb6259b0687226f8c5c83c091c4769035f9a06c Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 11 Sep 2018 13:56:44 -0700 Subject: [PATCH 6/7] More linting errors --- pkg/commands/shell.go | 9 +-------- pkg/dockerfile/dockerfile.go | 6 +++--- pkg/dockerfile/dockerfile_test.go | 3 +-- pkg/util/command_util.go | 5 +---- pkg/util/fs_util.go | 6 ++++-- pkg/util/tar_util.go | 11 +++-------- 6 files changed, 13 insertions(+), 27 deletions(-) diff --git a/pkg/commands/shell.go b/pkg/commands/shell.go index bb763828e..f1bc1ab0b 100644 --- a/pkg/commands/shell.go +++ b/pkg/commands/shell.go @@ -20,7 +20,6 @@ import ( "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "github.com/google/go-containerregistry/pkg/v1" "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/sirupsen/logrus" ) type ShellCommand struct { @@ -29,13 +28,7 @@ type ShellCommand struct { // ExecuteCommand handles command processing similar to CMD and RUN, func (s *ShellCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { - logrus.Info("cmd: SHELL") - var newShell []string - - newShell = s.cmd.Shell - - logrus.Infof("Replacing Shell in config with %v", newShell) - config.Shell = newShell + config.Shell = s.cmd.Shell return nil } diff --git a/pkg/dockerfile/dockerfile.go b/pkg/dockerfile/dockerfile.go index 5f37a16cf..00fc71cb7 100644 --- a/pkg/dockerfile/dockerfile.go +++ b/pkg/dockerfile/dockerfile.go @@ -54,8 +54,8 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) { stage.Name = resolvedBaseName kanikoStages = append(kanikoStages, config.KanikoStage{ Stage: stage, - BaseImageIndex: baseImageIndex(opts, index, stages), - BaseImageStoredLocally: (baseImageIndex(opts, index, stages) != -1), + BaseImageIndex: baseImageIndex(index, stages), + BaseImageStoredLocally: (baseImageIndex(index, stages) != -1), SaveStage: saveStage(index, stages), FinalStage: index == targetStage, }) @@ -68,7 +68,7 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) { // baseImageIndex returns the index of the stage the current stage is built off // returns -1 if the current stage isn't built off a previous stage -func baseImageIndex(opts *config.KanikoOptions, currentStage int, stages []instructions.Stage) int { +func baseImageIndex(currentStage int, stages []instructions.Stage) int { for i, stage := range stages { if i > currentStage { break diff --git a/pkg/dockerfile/dockerfile_test.go b/pkg/dockerfile/dockerfile_test.go index bd09c26ba..cd83c79ff 100644 --- a/pkg/dockerfile/dockerfile_test.go +++ b/pkg/dockerfile/dockerfile_test.go @@ -20,7 +20,6 @@ import ( "strconv" "testing" - "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/testutil" "github.com/moby/buildkit/frontend/dockerfile/instructions" ) @@ -184,7 +183,7 @@ func Test_baseImageIndex(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - actual := baseImageIndex(&config.KanikoOptions{}, test.currentStage, stages) + actual := baseImageIndex(test.currentStage, stages) if actual != test.expected { t.Fatalf("unexpected result, expected %d got %d", test.expected, actual) } diff --git a/pkg/util/command_util.go b/pkg/util/command_util.go index c8bb50709..b6574faa8 100644 --- a/pkg/util/command_util.go +++ b/pkg/util/command_util.go @@ -228,10 +228,7 @@ func IsSrcRemoteFileURL(rawurl string) bool { return false } _, err = http.Get(rawurl) - if err != nil { - return false - } - return true + return err == nil } func UpdateConfigEnv(newEnvs []instructions.KeyValuePair, config *v1.Config, replacementEnvs []string) error { diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index 4c6dd9fff..fa84107be 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -109,7 +109,7 @@ func GetFSFromImage(root string, img v1.Image) error { // DeleteFilesystem deletes the extracted image file system func DeleteFilesystem() error { logrus.Info("Deleting filesystem...") - err := filepath.Walk(constants.RootDir, func(path string, info os.FileInfo, err error) error { + return filepath.Walk(constants.RootDir, func(path string, info os.FileInfo, _ error) error { whitelisted, err := CheckWhitelist(path) if err != nil { return err @@ -123,7 +123,6 @@ func DeleteFilesystem() error { } return os.RemoveAll(path) }) - return err } // ChildDirInWhitelist returns true if there is a child file or directory of the path in the whitelist @@ -310,6 +309,9 @@ func RelativeFiles(fp string, root string) ([]string, error) { fullPath := filepath.Join(root, fp) logrus.Debugf("Getting files and contents at root %s", fullPath) err := filepath.Walk(fullPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } whitelisted, err := CheckWhitelist(path) if err != nil { return err diff --git a/pkg/util/tar_util.go b/pkg/util/tar_util.go index a109d4111..bc1cc67a0 100644 --- a/pkg/util/tar_util.go +++ b/pkg/util/tar_util.go @@ -195,10 +195,10 @@ func fileIsCompressedTar(src string) (bool, archive.Compression) { func fileIsUncompressedTar(src string) bool { r, err := os.Open(src) - defer r.Close() if err != nil { return false } + defer r.Close() fi, err := os.Lstat(src) if err != nil { return false @@ -210,13 +210,8 @@ func fileIsUncompressedTar(src string) bool { if tr == nil { return false } - for { - _, err := tr.Next() - if err != nil { - return false - } - return true - } + _, err = tr.Next() + return err == nil } // UnpackCompressedTar unpacks the compressed tar at path to dir From 7635421ae99971346ac61d3c594973c974fde114 Mon Sep 17 00:00:00 2001 From: Priya Wadhwa Date: Tue, 11 Sep 2018 16:24:21 -0700 Subject: [PATCH 7/7] Add t.Helper() call to checkLayers for better error logging --- integration/integration_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/integration/integration_test.go b/integration/integration_test.go index e9601da96..20fd22480 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -245,6 +245,7 @@ func TestLayers(t *testing.T) { } func checkLayers(t *testing.T, image1, image2 string, offset int) { + t.Helper() img1, err := getImageDetails(image1) if err != nil { t.Fatalf("Couldn't get details from image reference for (%s): %s", image1, err)