From cd9be5d51370aae72bfc9fad48a0267e425c3a4e Mon Sep 17 00:00:00 2001 From: Cole Wippern Date: Fri, 17 Jan 2020 18:23:01 -0800 Subject: [PATCH 01/37] reuse cached layer --- pkg/commands/cache.go | 37 ++++++++++++++++++ pkg/commands/cache_test.go | 36 ++++++++++++++++++ pkg/commands/copy.go | 8 ++++ pkg/commands/copy_test.go | 19 +++++++--- pkg/commands/run.go | 8 ++++ pkg/commands/run_test.go | 22 ++++++++--- pkg/executor/build.go | 78 ++++++++++++++++++++++++++++---------- 7 files changed, 177 insertions(+), 31 deletions(-) create mode 100644 pkg/commands/cache.go create mode 100644 pkg/commands/cache_test.go diff --git a/pkg/commands/cache.go b/pkg/commands/cache.go new file mode 100644 index 000000000..1d37e6251 --- /dev/null +++ b/pkg/commands/cache.go @@ -0,0 +1,37 @@ +/* +Copyright 2020 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import v1 "github.com/google/go-containerregistry/pkg/v1" + +type Cached interface { + Layer() v1.Layer + ReadSuccess() bool +} + +type caching struct { + layer v1.Layer + readSuccess bool +} + +func (c caching) Layer() v1.Layer { + return c.layer +} + +func (c caching) ReadSuccess() bool { + return c.readSuccess +} diff --git a/pkg/commands/cache_test.go b/pkg/commands/cache_test.go new file mode 100644 index 000000000..b6201af83 --- /dev/null +++ b/pkg/commands/cache_test.go @@ -0,0 +1,36 @@ +/* +Copyright 2020 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "testing" +) + +func Test_caching(t *testing.T) { + c := caching{layer: fakeLayer{}, readSuccess: true} + + actual := c.Layer().(fakeLayer) + expected := fakeLayer{} + actualLen, expectedLen := len(actual.TarContent), len(expected.TarContent) + if actualLen != expectedLen { + t.Errorf("expected layer tar content to be %v but was %v", expectedLen, actualLen) + } + + if !c.ReadSuccess() { + t.Errorf("expected ReadSuccess to be %v but was %v", true, c.ReadSuccess()) + } +} diff --git a/pkg/commands/copy.go b/pkg/commands/copy.go index 9b3113cd7..75cf7d1f5 100644 --- a/pkg/commands/copy.go +++ b/pkg/commands/copy.go @@ -153,6 +153,7 @@ func (c *CopyCommand) From() string { type CachingCopyCommand struct { BaseCommand + caching img v1.Image extractedFiles []string cmd *instructions.CopyCommand @@ -173,6 +174,13 @@ func (cr *CachingCopyCommand) ExecuteCommand(config *v1.Config, buildArgs *docke return errors.Wrapf(err, "retrieve image layers") } + if len(layers) != 1 { + return errors.New(fmt.Sprintf("expected %d layers but got %d", 1, len(layers))) + } + + cr.layer = layers[0] + cr.readSuccess = true + cr.extractedFiles, err = util.GetFSFromLayers(RootDir, layers, util.ExtractFunc(cr.extractFn), util.IncludeWhiteout()) logrus.Infof("extractedFiles: %s", cr.extractedFiles) diff --git a/pkg/commands/copy_test.go b/pkg/commands/copy_test.go index 4e0f3533f..729f897a5 100755 --- a/pkg/commands/copy_test.go +++ b/pkg/commands/copy_test.go @@ -305,14 +305,14 @@ func Test_CachingCopyCommand_ExecuteCommand(t *testing.T) { c := &CachingCopyCommand{ img: fakeImage{}, } - tc := testCase{ - desctiption: "with image containing no layers", - } c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error { return nil } - tc.command = c - return tc + return testCase{ + desctiption: "with image containing no layers", + expectErr: true, + command: c, + } }(), func() testCase { c := &CachingCopyCommand{ @@ -375,6 +375,15 @@ func Test_CachingCopyCommand_ExecuteCommand(t *testing.T) { } } + if c.layer == nil && tc.expectLayer { + t.Error("expected the command to have a layer set but instead was nil") + } else if c.layer != nil && !tc.expectLayer { + t.Error("expected the command to have no layer set but instead found a layer") + } + + if c.readSuccess != tc.expectLayer { + t.Errorf("expected read success to be %v but was %v", tc.expectLayer, c.readSuccess) + } }) } } diff --git a/pkg/commands/run.go b/pkg/commands/run.go index 1687000c4..baa0bff09 100644 --- a/pkg/commands/run.go +++ b/pkg/commands/run.go @@ -184,6 +184,7 @@ func (r *RunCommand) ShouldCacheOutput() bool { type CachingRunCommand struct { BaseCommand + caching img v1.Image extractedFiles []string cmd *instructions.RunCommand @@ -203,6 +204,13 @@ func (cr *CachingRunCommand) ExecuteCommand(config *v1.Config, buildArgs *docker return errors.Wrap(err, "retrieving image layers") } + if len(layers) != 1 { + return errors.New(fmt.Sprintf("expected %d layers but got %d", 1, len(layers))) + } + + cr.layer = layers[0] + cr.readSuccess = true + cr.extractedFiles, err = util.GetFSFromLayers( constants.RootDir, layers, diff --git a/pkg/commands/run_test.go b/pkg/commands/run_test.go index 36ae98bad..7e4984eb1 100644 --- a/pkg/commands/run_test.go +++ b/pkg/commands/run_test.go @@ -238,14 +238,16 @@ func Test_CachingRunCommand_ExecuteCommand(t *testing.T) { c := &CachingRunCommand{ img: fakeImage{}, } - tc := testCase{ - desctiption: "with image containing no layers", - } + c.extractFn = func(_ string, _ *tar.Header, _ io.Reader) error { return nil } - tc.command = c - return tc + + return testCase{ + desctiption: "with image containing no layers", + expectErr: true, + command: c, + } }(), func() testCase { c := &CachingRunCommand{ @@ -310,6 +312,16 @@ func Test_CachingRunCommand_ExecuteCommand(t *testing.T) { t.Errorf("expected files used from context to be empty but was not") } } + + if c.layer == nil && tc.expectLayer { + t.Error("expected the command to have a layer set but instead was nil") + } else if c.layer != nil && !tc.expectLayer { + t.Error("expected the command to have no layer set but instead found a layer") + } + + if c.readSuccess != tc.expectLayer { + t.Errorf("expected read success to be %v but was %v", tc.expectLayer, c.readSuccess) + } }) } } diff --git a/pkg/executor/build.go b/pkg/executor/build.go index d1c17cf1d..62407848f 100644 --- a/pkg/executor/build.go +++ b/pkg/executor/build.go @@ -326,29 +326,47 @@ func (s *stageBuilder) build() error { continue } - tarPath, err := s.takeSnapshot(files) - if err != nil { - return errors.Wrap(err, "failed to take snapshot") + fn := func() bool { + switch v := command.(type) { + case commands.Cached: + return v.ReadSuccess() + default: + return false + } } - logrus.Debugf("build: composite key for command %v %v", command.String(), compositeKey) - ck, err := compositeKey.Hash() - if err != nil { - return errors.Wrap(err, "failed to hash composite key") - } + if fn() { + v := command.(commands.Cached) + layer := v.Layer() + if err := s.saveLayerToImage(layer, command.String()); err != nil { + return err + } + } else { + tarPath, err := s.takeSnapshot(files) + if err != nil { + return errors.Wrap(err, "failed to take snapshot") + } - logrus.Debugf("build: cache key for command %v %v", command.String(), ck) + logrus.Debugf("build: composite key for command %v %v", command.String(), compositeKey) + ck, err := compositeKey.Hash() + if err != nil { + return errors.Wrap(err, "failed to hash composite key") + } - // Push layer to cache (in parallel) now along with new config file - if s.opts.Cache && command.ShouldCacheOutput() { - cacheGroup.Go(func() error { - return s.pushCache(s.opts, ck, tarPath, command.String()) - }) - } - if err := s.saveSnapshotToImage(command.String(), tarPath); err != nil { - return errors.Wrap(err, "failed to save snapshot to image") + logrus.Debugf("build: cache key for command %v %v", command.String(), ck) + + // Push layer to cache (in parallel) now along with new config file + if s.opts.Cache && command.ShouldCacheOutput() { + cacheGroup.Go(func() error { + return s.pushCache(s.opts, ck, tarPath, command.String()) + }) + } + if err := s.saveSnapshotToImage(command.String(), tarPath); err != nil { + return errors.Wrap(err, "failed to save snapshot to image") + } } } + if err := cacheGroup.Wait(); err != nil { logrus.Warnf("error uploading layer to cache: %s", err) } @@ -398,22 +416,40 @@ func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool { } func (s *stageBuilder) saveSnapshotToImage(createdBy string, tarPath string) error { - if tarPath == "" { + layer, err := s.saveSnapshotToLayer(tarPath) + if err != nil { + return err + } + + if layer == nil { return nil } + + return s.saveLayerToImage(layer, createdBy) +} + +func (s *stageBuilder) saveSnapshotToLayer(tarPath string) (v1.Layer, error) { + if tarPath == "" { + return nil, nil + } fi, err := os.Stat(tarPath) if err != nil { - return errors.Wrap(err, "tar file path does not exist") + return nil, errors.Wrap(err, "tar file path does not exist") } if fi.Size() <= emptyTarSize { logrus.Info("No files were changed, appending empty layer to config. No layer added to image.") - return nil + return nil, nil } layer, err := tarball.LayerFromFile(tarPath) if err != nil { - return err + return nil, err } + + return layer, nil +} +func (s *stageBuilder) saveLayerToImage(layer v1.Layer, createdBy string) error { + var err error s.image, err = mutate.Append(s.image, mutate.Addendum{ Layer: layer, From 5951d9b0eeb8e1c9507d3581d6542440e037d332 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Fri, 24 Jan 2020 15:54:44 -0800 Subject: [PATCH 02/37] add /tmp/apt-key to whitelist for Dockerfiles which use command --- pkg/snapshot/layered_map.go | 9 ++++++++- pkg/util/fs_util.go | 19 +++++++++++++++---- pkg/util/fs_util_test.go | 8 ++++++++ 3 files changed, 31 insertions(+), 5 deletions(-) diff --git a/pkg/snapshot/layered_map.go b/pkg/snapshot/layered_map.go index 56e8da4f0..0097b57a5 100644 --- a/pkg/snapshot/layered_map.go +++ b/pkg/snapshot/layered_map.go @@ -20,11 +20,13 @@ import ( "bytes" "encoding/json" "fmt" + "os" "path/filepath" "strings" "github.com/GoogleContainerTools/kaniko/pkg/timing" "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/sirupsen/logrus" ) type LayeredMap struct { @@ -113,13 +115,18 @@ func (l *LayeredMap) Add(s string) error { // from the current layered map by its hashing function. // Returns true if the file is changed. func (l *LayeredMap) CheckFileChange(s string) (bool, error) { - oldV, ok := l.Get(s) t := timing.Start("Hashing files") defer timing.DefaultRun.Stop(t) newV, err := l.hasher(s) if err != nil { + // if this file does not exist in the new layer return. + if os.IsNotExist(err) { + logrus.Tracef("%s detected as changed but does not exist", s) + return false, nil + } return false, err } + oldV, ok := l.Get(s) if ok && newV == oldV { return false, nil } diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index 7aa91adff..7cadce1a3 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -26,6 +26,7 @@ import ( "net/http" "os" "path/filepath" + "regexp" "strings" "syscall" "time" @@ -61,6 +62,12 @@ var initialWhitelist = []WhitelistEntry{ Path: "/etc/mtab", PrefixMatchOnly: false, }, + { + // we whitelist /tmp/apt-key-gpghome, since the apt keys are added temporarily in this directory. + // from the base image + Path: "/tmp/apt-key-gpghome", + PrefixMatchOnly: true, + }, } var whitelist = initialWhitelist @@ -674,7 +681,7 @@ func excludeFile(path, buildcontext string) bool { return match } -// HasFilepathPrefix checks if the given file path begins with prefix +// HasFilepathPrefix checks if the given file path begins with prefix func HasFilepathPrefix(path, prefix string, prefixMatchOnly bool) bool { prefix = filepath.Clean(prefix) prefixArray := strings.Split(prefix, "/") @@ -687,11 +694,15 @@ func HasFilepathPrefix(path, prefix string, prefixMatchOnly bool) bool { if prefixMatchOnly && len(pathArray) == len(prefixArray) { return false } + for index := range prefixArray { - if prefixArray[index] == pathArray[index] { - continue + m, err := regexp.MatchString(prefixArray[index], pathArray[index]) + if err != nil { + return false + } + if !m { + return false } - return false } return true } diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index 2919426ce..ac5568863 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -259,6 +259,14 @@ func Test_CheckWhitelist(t *testing.T) { }, want: false, }, + { + name: "prefix match only ", + args: args{ + path: "/tmp/apt-key-gpghome.xft/gpg.key", + whitelist: []WhitelistEntry{{"/tmp/apt-key-gpghome.*", true}}, + }, + want: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From e0322042c715ea6b8e85764a36ba7e029e3d53c5 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Fri, 24 Jan 2020 22:12:37 -0800 Subject: [PATCH 03/37] use filepath.Match instead of regex --- pkg/util/fs_util.go | 3 +-- pkg/util/fs_util_test.go | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index 7cadce1a3..0e03fa5fc 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -26,7 +26,6 @@ import ( "net/http" "os" "path/filepath" - "regexp" "strings" "syscall" "time" @@ -696,7 +695,7 @@ func HasFilepathPrefix(path, prefix string, prefixMatchOnly bool) bool { } for index := range prefixArray { - m, err := regexp.MatchString(prefixArray[index], pathArray[index]) + m, err := filepath.Match(prefixArray[index], pathArray[index]) if err != nil { return false } diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index ac5568863..d3640553d 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -66,6 +66,7 @@ func Test_DetectFilesystemWhitelist(t *testing.T) { {"/sys", false}, {"/var/run", false}, {"/etc/mtab", false}, + {"/tmp/apt-key-gpghome", true}, } actualWhitelist := whitelist sort.Slice(actualWhitelist, func(i, j int) bool { From 2f1e54e5919d3f30f26bba61ca3d3a3e6b6c819d Mon Sep 17 00:00:00 2001 From: Ben Einaudi Date: Wed, 22 Jan 2020 16:10:28 +0100 Subject: [PATCH 04/37] Expand build argument from environment when no value specified Fixes #713 --- cmd/executor/cmd/root.go | 12 ++++ cmd/executor/cmd/root_test.go | 52 ++++++++++++++++ .../dockerfiles/Dockerfile_test_arg_secret | 10 +++ integration/images.go | 62 ++++++++++++++++--- 4 files changed, 129 insertions(+), 7 deletions(-) create mode 100644 integration/dockerfiles/Dockerfile_test_arg_secret diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index 7f56e2e71..1d3717298 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -55,6 +55,7 @@ var RootCmd = &cobra.Command{ Use: "executor", PersistentPreRunE: func(cmd *cobra.Command, args []string) error { if cmd.Use == "executor" { + resolveEnvironmentBuildArgs(opts.BuildArgs, os.Getenv) if err := util.ConfigureLogging(logLevel); err != nil { return err } @@ -200,6 +201,17 @@ func resolveDockerfilePath() error { return errors.New("please provide a valid path to a Dockerfile within the build context with --dockerfile") } +// resolveEnvironmentBuildArgs replace build args without value by the same named environment variable +func resolveEnvironmentBuildArgs(arguments []string, resolver func(string) string) { + for index, argument := range arguments { + i := strings.Index(argument, "=") + if i < 0 { + value := resolver(argument) + arguments[index] = fmt.Sprintf("%s=%s", argument, value) + } + } +} + // copy Dockerfile to /kaniko/Dockerfile so that if it's specified in the .dockerignore // it won't be copied into the image func copyDockerfile() error { diff --git a/cmd/executor/cmd/root_test.go b/cmd/executor/cmd/root_test.go index cec6dff69..79e8ea122 100644 --- a/cmd/executor/cmd/root_test.go +++ b/cmd/executor/cmd/root_test.go @@ -97,3 +97,55 @@ func TestIsUrl(t *testing.T) { }) } } + +func TestResolveEnvironmentBuildArgs(t *testing.T) { + tests := []struct { + description string + input []string + expected []string + mockedEnvironmentResolver func(string) string + }{ + { + description: "replace when environment variable is present and value is not specified", + input: []string{"variable1"}, + expected: []string{"variable1=value1"}, + mockedEnvironmentResolver: func(variable string) string { + if variable == "variable1" { + return "value1" + } + return "" + }, + }, + { + description: "do not replace when environment variable is present and value is specified", + input: []string{"variable1=value1", "variable2=value2"}, + expected: []string{"variable1=value1", "variable2=value2"}, + mockedEnvironmentResolver: func(variable string) string { + return "unexpected" + }, + }, + { + description: "do not replace when environment variable is present and empty value is specified", + input: []string{"variable1="}, + expected: []string{"variable1="}, + mockedEnvironmentResolver: func(variable string) string { + return "unexpected" + }, + }, + { + description: "replace with empty value when environment variable is not present or empty and value is not specified", + input: []string{"variable1", "variable2=value2"}, + expected: []string{"variable1=", "variable2=value2"}, + mockedEnvironmentResolver: func(variable string) string { + return "" + }, + }, + } + + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + resolveEnvironmentBuildArgs(tt.input, tt.mockedEnvironmentResolver) + testutil.CheckDeepEqual(t, tt.expected, tt.input) + }) + } +} diff --git a/integration/dockerfiles/Dockerfile_test_arg_secret b/integration/dockerfiles/Dockerfile_test_arg_secret new file mode 100644 index 000000000..4429f938c --- /dev/null +++ b/integration/dockerfiles/Dockerfile_test_arg_secret @@ -0,0 +1,10 @@ +FROM debian:9.11 + +ARG SSH_PRIVATE_KEY +ARG SSH_PUBLIC_KEY + +RUN mkdir .ssh && \ + chmod 700 .ssh && \ + echo "$SSH_PRIVATE_KEY" > .ssh/id_rsa && \ + echo "$SSH_PUBLIC_KEY" > .ssh/id_rsa.pub && \ + chmod 600 .ssh/id_rsa .ssh/id_rsa.pub diff --git a/integration/images.go b/integration/images.go index f4df7af49..13bf6d7ec 100644 --- a/integration/images.go +++ b/integration/images.go @@ -17,6 +17,7 @@ limitations under the License. package integration import ( + "bytes" "fmt" "io/ioutil" "os" @@ -46,10 +47,11 @@ const ( // Arguments to build Dockerfiles with, used for both docker and kaniko builds var argsMap = map[string][]string{ - "Dockerfile_test_run": {"file=/file"}, - "Dockerfile_test_workdir": {"workdir=/arg/workdir"}, - "Dockerfile_test_add": {"file=context/foo"}, - "Dockerfile_test_onbuild": {"file=/tmp/onbuild"}, + "Dockerfile_test_run": {"file=/file"}, + "Dockerfile_test_workdir": {"workdir=/arg/workdir"}, + "Dockerfile_test_add": {"file=context/foo"}, + "Dockerfile_test_arg_secret": {"SSH_PRIVATE_KEY", "SSH_PUBLIC_KEY=Pµbl1cK€Y"}, + "Dockerfile_test_onbuild": {"file=/tmp/onbuild"}, "Dockerfile_test_scratch": { "image=scratch", "hello=hello-value", @@ -59,6 +61,11 @@ var argsMap = map[string][]string{ "Dockerfile_test_multistage": {"file=/foo2"}, } +// Environment to build Dockerfiles with, used for both docker and kaniko builds +var envsMap = map[string][]string{ + "Dockerfile_test_arg_secret": {"SSH_PRIVATE_KEY=ThEPriv4t3Key"}, +} + // Arguments to build Dockerfiles with when building with docker var additionalDockerFlagsMap = map[string][]string{ "Dockerfile_test_target": {"--target=second"}, @@ -71,6 +78,36 @@ var additionalKanikoFlagsMap = map[string][]string{ "Dockerfile_test_target": {"--target=second"}, } +// output check to do when building with kaniko +var outputChecks = map[string]func(string, []byte) error{ + "Dockerfile_test_arg_secret": checkArgsNotPrinted, +} + +// Checks if argument are not printed in output. +// Argument may be passed through --build-arg key=value manner or --build-arg key with key in environment +func checkArgsNotPrinted(dockerfile string, out []byte) error { + for _, arg := range argsMap[dockerfile] { + argSplitted := strings.Split(arg, "=") + if len(argSplitted) == 2 { + if idx := bytes.Index(out, []byte(argSplitted[1])); idx >= 0 { + return fmt.Errorf("Argument value %s for argument %s displayed in output", argSplitted[1], argSplitted[0]) + } + } else if len(argSplitted) == 1 { + if envs, ok := envsMap[dockerfile]; ok { + for _, env := range envs { + envSplitted := strings.Split(env, "=") + if len(envSplitted) == 2 { + if idx := bytes.Index(out, []byte(envSplitted[1])); idx >= 0 { + return fmt.Errorf("Argument value %s for argument %s displayed in output", envSplitted[1], argSplitted[0]) + } + } + } + } + } + } + return nil +} + var bucketContextTests = []string{"Dockerfile_test_copy_bucket"} var reproducibleTests = []string{"Dockerfile_test_reproducible"} @@ -164,8 +201,7 @@ func (d *DockerFileBuilder) BuildImage(config *gcpConfig, dockerfilesPath, docke var buildArgs []string buildArgFlag := "--build-arg" for _, arg := range argsMap[dockerfile] { - buildArgs = append(buildArgs, buildArgFlag) - buildArgs = append(buildArgs, arg) + buildArgs = append(buildArgs, buildArgFlag, arg) } // build docker image additionalFlags := append(buildArgs, additionalDockerFlagsMap[dockerfile]...) @@ -177,6 +213,9 @@ func (d *DockerFileBuilder) BuildImage(config *gcpConfig, dockerfilesPath, docke "."}, additionalFlags...)..., ) + if env, ok := envsMap[dockerfile]; ok { + dockerCmd.Env = append(dockerCmd.Env, env...) + } timer := timing.Start(dockerfile + "_docker") out, err := RunCommandWithoutTest(dockerCmd) @@ -225,7 +264,11 @@ func (d *DockerFileBuilder) BuildImage(config *gcpConfig, dockerfilesPath, docke "-v", cwd + ":/workspace", "-v", benchmarkDir + ":/kaniko/benchmarks", } - + if env, ok := envsMap[dockerfile]; ok { + for _, envVariable := range env { + dockerRunFlags = append(dockerRunFlags, "-e", envVariable) + } + } dockerRunFlags = addServiceAccountFlags(dockerRunFlags, serviceAccount) dockerRunFlags = append(dockerRunFlags, ExecutorImage, @@ -242,6 +285,11 @@ func (d *DockerFileBuilder) BuildImage(config *gcpConfig, dockerfilesPath, docke if err != nil { return fmt.Errorf("Failed to build image %s with kaniko command \"%s\": %s %s", dockerImage, kanikoCmd.Args, err, string(out)) } + if outputCheck := outputChecks[dockerfile]; outputCheck != nil { + if err := outputCheck(dockerfile, out); err != nil { + return fmt.Errorf("Output check failed for image %s with kaniko command \"%s\": %s %s", dockerImage, kanikoCmd.Args, err, string(out)) + } + } d.FilesBuilt[dockerfile] = true return nil From 6b6742fd9dedcb59d0bbe413affdf9fe739a892d Mon Sep 17 00:00:00 2001 From: Thomas Bonfort Date: Fri, 31 Jan 2020 12:02:32 +0100 Subject: [PATCH 05/37] Changle loglevel for whiteouts to debug --- pkg/snapshot/snapshot.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/snapshot/snapshot.go b/pkg/snapshot/snapshot.go index 3e37d5a61..80b5c5ce0 100644 --- a/pkg/snapshot/snapshot.go +++ b/pkg/snapshot/snapshot.go @@ -161,7 +161,7 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) { dir := filepath.Dir(path) if _, ok := memFs[dir]; ok { if s.l.MaybeAddWhiteout(path) { - logrus.Infof("Adding whiteout for %s", path) + logrus.Debugf("Adding whiteout for %s", path) filesToWhiteOut = append(filesToWhiteOut, path) } } From 5a4a6c4a17dd694fc7a52c6ffab4ecf4dc2faff9 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Fri, 31 Jan 2020 15:23:38 -0800 Subject: [PATCH 06/37] fix test flake --- pkg/util/fs_util_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index 1b714fb59..9b8d9972c 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -1300,6 +1300,12 @@ func TestUpdateWhitelist(t *testing.T) { whitelist = initialWhitelist defer func() { whitelist = initialWhitelist }() UpdateWhitelist(tt.whitelistVarRun) + sort.Slice(tt.expected, func(i, j int) bool { + return tt.expected[i].Path < tt.expected[j].Path + }) + sort.Slice(whitelist, func(i, j int) bool { + return whitelist[i].Path < whitelist[j].Path + }) testutil.CheckDeepEqual(t, tt.expected, whitelist) }) } From 0e833e46437489e15820266a1aaefcbad61884c0 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Sat, 1 Feb 2020 09:29:18 -0800 Subject: [PATCH 07/37] fix test --- pkg/util/fs_util_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index 9b8d9972c..e2529ed97 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -1279,6 +1279,10 @@ func TestUpdateWhitelist(t *testing.T) { Path: "/var/run", PrefixMatchOnly: false, }, + { + Path: "/tmp/apt-key-gpghome", + PrefixMatchOnly: true, + }, }, }, { @@ -1292,6 +1296,10 @@ func TestUpdateWhitelist(t *testing.T) { Path: "/etc/mtab", PrefixMatchOnly: false, }, + { + Path: "/tmp/apt-key-gpghome", + PrefixMatchOnly: true, + }, }, }, } From 956495784e8b14dea3a80b0d028770f5027d38b0 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Fri, 31 Jan 2020 15:19:57 -0800 Subject: [PATCH 08/37] fix group string being always set to uid in case a user has a gid set --- pkg/commands/user.go | 11 ++++++++++- pkg/commands/user_test.go | 20 +++++++++++++++++++- pkg/util/command_util.go | 15 +++++++++++++++ 3 files changed, 44 insertions(+), 2 deletions(-) diff --git a/pkg/commands/user.go b/pkg/commands/user.go index f79e1bd60..35568f74d 100644 --- a/pkg/commands/user.go +++ b/pkg/commands/user.go @@ -26,6 +26,11 @@ import ( "github.com/sirupsen/logrus" ) +// for testing +var ( + Lookup = util.Lookup +) + type UserCommand struct { BaseCommand cmd *instructions.UserCommand @@ -40,7 +45,11 @@ func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu if err != nil { return err } - groupStr := userStr + userObj, err := Lookup(userStr) + if err != nil { + return err + } + groupStr := userObj.Gid if len(userAndGroup) > 1 { groupStr, err = util.ResolveEnvironmentReplacement(userAndGroup[1], replacementEnvs, false) if err != nil { diff --git a/pkg/commands/user_test.go b/pkg/commands/user_test.go index 916f98c0f..a0db5decb 100644 --- a/pkg/commands/user_test.go +++ b/pkg/commands/user_test.go @@ -16,9 +16,11 @@ limitations under the License. package commands import ( + "os/user" "testing" "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/GoogleContainerTools/kaniko/testutil" v1 "github.com/google/go-containerregistry/pkg/v1" @@ -27,52 +29,64 @@ import ( var userTests = []struct { user string + userObj user.User expectedUID string expectedGID string }{ { user: "root", + userObj: user.User{Uid: "root", Gid: "root"}, expectedUID: "root:root", }, { user: "root-add", - expectedUID: "root-add:root-add", + userObj: user.User{Uid: "root-add", Gid: "root"}, + expectedUID: "root-add:root", }, { user: "0", + userObj: user.User{Uid: "0", Gid: "0"}, expectedUID: "0:0", }, { user: "fakeUser", + userObj: user.User{Uid: "fakeUser", Gid: "fakeUser"}, expectedUID: "fakeUser:fakeUser", }, { user: "root:root", + userObj: user.User{Uid: "root", Gid: "some"}, expectedUID: "root:root", }, { user: "0:root", + userObj: user.User{Uid: "0"}, expectedUID: "0:root", }, { user: "root:0", + userObj: user.User{Uid: "root"}, expectedUID: "root:0", expectedGID: "f0", }, { user: "0:0", + userObj: user.User{Uid: "0"}, expectedUID: "0:0", }, { user: "$envuser", + userObj: user.User{Uid: "root", Gid: "root"}, expectedUID: "root:root", }, { user: "root:$envgroup", + userObj: user.User{Uid: "root"}, expectedUID: "root:grp", }, { user: "some:grp", + userObj: user.User{Uid: "some"}, expectedUID: "some:grp", }, } @@ -90,6 +104,10 @@ func TestUpdateUser(t *testing.T) { User: test.user, }, } + Lookup = func(_ string) (*user.User, error) { + return &test.userObj, nil + } + defer func() { Lookup = util.Lookup }() buildArgs := dockerfile.NewBuildArgs([]string{}) err := cmd.ExecuteCommand(cfg, buildArgs) testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedUID, cfg.User) diff --git a/pkg/util/command_util.go b/pkg/util/command_util.go index 6f391e133..3fdcf7d19 100644 --- a/pkg/util/command_util.go +++ b/pkg/util/command_util.go @@ -363,3 +363,18 @@ func GetUserFromUsername(userStr string, groupStr string) (string, string, error return uid, gid, nil } + +func Lookup(userStr string) (*user.User, error) { + userObj, err := user.Lookup(userStr) + if err != nil { + if _, ok := err.(user.UnknownUserError); !ok { + return nil, err + } + // Lookup by id + userObj, err = user.LookupId(userStr) + if err != nil { + return nil, err + } + } + return userObj, nil +} From 6894be442fefa7d90f6195b4c187fd46ae3f3163 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Fri, 31 Jan 2020 15:32:52 -0800 Subject: [PATCH 09/37] remove duplicate code --- pkg/util/command_util.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/pkg/util/command_util.go b/pkg/util/command_util.go index 3fdcf7d19..cadf6e564 100644 --- a/pkg/util/command_util.go +++ b/pkg/util/command_util.go @@ -328,16 +328,9 @@ Loop: func GetUserFromUsername(userStr string, groupStr string) (string, string, error) { // Lookup by username - userObj, err := user.Lookup(userStr) + userObj, err := Lookup(userStr) if err != nil { - if _, ok := err.(user.UnknownUserError); !ok { - return "", "", err - } - // Lookup by id - userObj, err = user.LookupId(userStr) - if err != nil { - return "", "", err - } + return "", "", err } // Same dance with groups From db6f6c5ad3d6c74369df4d3ba9fcdb521db8a44a Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Mon, 3 Feb 2020 10:11:40 -0800 Subject: [PATCH 10/37] more tests and do not error out if user not found --- pkg/commands/user.go | 21 ++++++++++++++------- pkg/commands/user_test.go | 34 +++++++++++++++++++++------------- 2 files changed, 35 insertions(+), 20 deletions(-) diff --git a/pkg/commands/user.go b/pkg/commands/user.go index 35568f74d..d1e4ceb94 100644 --- a/pkg/commands/user.go +++ b/pkg/commands/user.go @@ -17,6 +17,8 @@ limitations under the License. package commands import ( + "fmt" + "github.com/pkg/errors" "strings" "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" @@ -43,17 +45,13 @@ func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu replacementEnvs := buildArgs.ReplacementEnvs(config.Env) userStr, err := util.ResolveEnvironmentReplacement(userAndGroup[0], replacementEnvs, false) if err != nil { - return err + return errors.Wrap(err, fmt.Sprintf("resolving user %s", userAndGroup[0])) } - userObj, err := Lookup(userStr) - if err != nil { - return err - } - groupStr := userObj.Gid + var groupStr = setGroupDefault(userStr) if len(userAndGroup) > 1 { groupStr, err = util.ResolveEnvironmentReplacement(userAndGroup[1], replacementEnvs, false) if err != nil { - return err + return errors.Wrap(err, fmt.Sprintf("resolving group %s", userAndGroup[1])) } } @@ -66,3 +64,12 @@ func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu func (r *UserCommand) String() string { return r.cmd.String() } + +func setGroupDefault(userStr string) string { + userObj, err := Lookup(userStr) + if err != nil { + logrus.Debugf("could not lookup user %s. Setting group empty", userStr) + return "" + } + return userObj.Gid +} diff --git a/pkg/commands/user_test.go b/pkg/commands/user_test.go index a0db5decb..fc90e2ca9 100644 --- a/pkg/commands/user_test.go +++ b/pkg/commands/user_test.go @@ -16,6 +16,7 @@ limitations under the License. package commands import ( + "fmt" "os/user" "testing" @@ -29,66 +30,70 @@ import ( var userTests = []struct { user string - userObj user.User + userObj *user.User expectedUID string expectedGID string }{ { user: "root", - userObj: user.User{Uid: "root", Gid: "root"}, + userObj: &user.User{Uid: "root", Gid: "root"}, expectedUID: "root:root", }, { user: "root-add", - userObj: user.User{Uid: "root-add", Gid: "root"}, + userObj: &user.User{Uid: "root-add", Gid: "root"}, expectedUID: "root-add:root", }, { user: "0", - userObj: user.User{Uid: "0", Gid: "0"}, + userObj: &user.User{Uid: "0", Gid: "0"}, expectedUID: "0:0", }, { user: "fakeUser", - userObj: user.User{Uid: "fakeUser", Gid: "fakeUser"}, + userObj: &user.User{Uid: "fakeUser", Gid: "fakeUser"}, expectedUID: "fakeUser:fakeUser", }, { user: "root:root", - userObj: user.User{Uid: "root", Gid: "some"}, + userObj: &user.User{Uid: "root", Gid: "some"}, expectedUID: "root:root", }, { user: "0:root", - userObj: user.User{Uid: "0"}, + userObj: &user.User{Uid: "0"}, expectedUID: "0:root", }, { user: "root:0", - userObj: user.User{Uid: "root"}, + userObj: &user.User{Uid: "root"}, expectedUID: "root:0", expectedGID: "f0", }, { user: "0:0", - userObj: user.User{Uid: "0"}, + userObj: &user.User{Uid: "0"}, expectedUID: "0:0", }, { user: "$envuser", - userObj: user.User{Uid: "root", Gid: "root"}, + userObj: &user.User{Uid: "root", Gid: "root"}, expectedUID: "root:root", }, { user: "root:$envgroup", - userObj: user.User{Uid: "root"}, + userObj: &user.User{Uid: "root"}, expectedUID: "root:grp", }, { user: "some:grp", - userObj: user.User{Uid: "some"}, + userObj: &user.User{Uid: "some"}, expectedUID: "some:grp", }, + { + user: "some", + expectedUID: "some:", + }, } func TestUpdateUser(t *testing.T) { @@ -105,7 +110,10 @@ func TestUpdateUser(t *testing.T) { }, } Lookup = func(_ string) (*user.User, error) { - return &test.userObj, nil + if test.userObj != nil { + return test.userObj, nil + } + return nil, fmt.Errorf("error while looking up user.") } defer func() { Lookup = util.Lookup }() buildArgs := dockerfile.NewBuildArgs([]string{}) From 18fec3ba38d3c64761f2ea4bfc58701d98156c15 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Mon, 3 Feb 2020 11:03:58 -0800 Subject: [PATCH 11/37] fix lint --- pkg/commands/user.go | 2 +- pkg/commands/user_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/commands/user.go b/pkg/commands/user.go index d1e4ceb94..702e5594d 100644 --- a/pkg/commands/user.go +++ b/pkg/commands/user.go @@ -18,13 +18,13 @@ package commands import ( "fmt" - "github.com/pkg/errors" "strings" "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "github.com/GoogleContainerTools/kaniko/pkg/util" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) diff --git a/pkg/commands/user_test.go b/pkg/commands/user_test.go index fc90e2ca9..455295f7c 100644 --- a/pkg/commands/user_test.go +++ b/pkg/commands/user_test.go @@ -113,7 +113,7 @@ func TestUpdateUser(t *testing.T) { if test.userObj != nil { return test.userObj, nil } - return nil, fmt.Errorf("error while looking up user.") + return nil, fmt.Errorf("error while looking up user") } defer func() { Lookup = util.Lookup }() buildArgs := dockerfile.NewBuildArgs([]string{}) From 42b1b68dfffdd4c203d4e101a8c142d0a92b480f Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Mon, 3 Feb 2020 09:36:02 -0800 Subject: [PATCH 12/37] Release v0.17.0 --- CHANGELOG.md | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fdbfa0e86..307d76722 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,42 @@ +# v0.17.0 Release - 2020-02-03 + +## New Features +* Expand build argument from environment when no value specified [#993](https://github.com/GoogleContainerTools/kaniko/pull/993) +* whitelist /tmp/apt-key-gpghome.* directory [#1000](https://github.com/GoogleContainerTools/kaniko/pull/1000) +* Add flag to `--whitelist-var-run` set to true to preserver default kani… [#1011](https://github.com/GoogleContainerTools/kaniko/pull/1011) +* Prefer platform that is currently running for pulling remote images and kaniko binary Makefile target [#980](https://github.com/GoogleContainerTools/kaniko/pull/980) + +## Bug Fixes +* Fix caching to respect .dockerignore [#854](https://github.com/GoogleContainerTools/kaniko/pull/854) +* Fixes #988 run_in_docker.sh only works with gcr.io [#990](https://github.com/GoogleContainerTools/kaniko/pull/990) +* Fix Symlinks not being copied across stages [#971](https://github.com/GoogleContainerTools/kaniko/pull/971) +* Fix home and group set for user command [#995](https://github.com/GoogleContainerTools/kaniko/pull/995) +* Fix COPY or ADD to symlink destination breaks image [#943](https://github.com/GoogleContainerTools/kaniko/pull/943) +* [Caching] Fix bug with deleted files and cached run and copy commands +* [Mutistage Build] Fix bug with capital letter in stage names [#983](https://github.com/GoogleContainerTools/kaniko/pull/983) +* Fix #940 set modtime when extracting [#981](https://github.com/GoogleContainerTools/kaniko/pull/981) +* Fix Ability for ADD to unTar a file [#792](https://github.com/GoogleContainerTools/kaniko/pull/792) + +# Updates and Refactors +* fix test flake [#1016](https://github.com/GoogleContainerTools/kaniko/pull/1016) +* Upgrade go-containerregistry third-party library [#957](https://github.com/GoogleContainerTools/kaniko/pull/957) +* Remove debug tag being built for every push to master [#1004](https://github.com/GoogleContainerTools/kaniko/pull/1004) +* Run integration tests in Travis CI [#979](https://github.com/GoogleContainerTools/kaniko/pull/979) + + +Huge thank you for this release towards our contributors: +- Anthony Davies +- Ben Einaudi +- Cole Wippern +- cvgw +- Logan.Price +- Moritz Wanzenböck +- ohchang-kwon +- Sam Stoelinga +- Tejal Desai +- Thomas Bonfort +- Wietse Muizelaar + # v0.16.0 Release - 2020-01-17 Happy New Year 2020! From ce00eaed3c5622ac3f20ebdbd12bce025c1c0a17 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Mon, 3 Feb 2020 13:17:02 -0800 Subject: [PATCH 13/37] bump release --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 132f992dd..f5442ac56 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ # Bump these on release VERSION_MAJOR ?= 0 -VERSION_MINOR ?= 16 +VERSION_MINOR ?= 17 VERSION_BUILD ?= 0 VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) From e3b5a7b85daa2209e697c82f44d41bff3966de00 Mon Sep 17 00:00:00 2001 From: xanonid Date: Fri, 10 Jan 2020 18:06:32 +0100 Subject: [PATCH 14/37] Support COPY --chown flag (Closes: #9) --- cmd/executor/cmd/root.go | 2 +- pkg/commands/copy.go | 20 ++++++++++++++++++-- pkg/commands/run.go | 16 ++-------------- pkg/util/command_util.go | 19 ++++++++++++++++++- pkg/util/fs_util.go | 37 ++++++++++++++++++++++++------------- pkg/util/fs_util_test.go | 2 +- 6 files changed, 64 insertions(+), 32 deletions(-) diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index 1d3717298..937e7f4a4 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -215,7 +215,7 @@ func resolveEnvironmentBuildArgs(arguments []string, resolver func(string) strin // copy Dockerfile to /kaniko/Dockerfile so that if it's specified in the .dockerignore // it won't be copied into the image func copyDockerfile() error { - if _, err := util.CopyFile(opts.DockerfilePath, constants.DockerfilePath, ""); err != nil { + if _, err := util.CopyFile(opts.DockerfilePath, constants.DockerfilePath, "", -1, -1); err != nil { return errors.Wrap(err, "copying dockerfile") } opts.DockerfilePath = constants.DockerfilePath diff --git a/pkg/commands/copy.go b/pkg/commands/copy.go index d94f3e10f..7af0b34f1 100644 --- a/pkg/commands/copy.go +++ b/pkg/commands/copy.go @@ -44,9 +44,25 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu if c.cmd.From != "" { c.buildcontext = filepath.Join(constants.KanikoDir, c.cmd.From) } + var uid, gid int64 + uid = -1 + gid = -1 replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + if c.cmd.Chown != "" { + chown, err := util.ResolveEnvironmentReplacement(c.cmd.Chown, replacementEnvs, false) + if err != nil { + return err + } + uid32, gid32, err := util.GetUIDAndGIDFromString(chown, true) + uid = int64(uid32) + gid = int64(gid32) + if err != nil { + return err + } + } + srcs, dest, err := util.ResolveEnvAndWildcards(c.cmd.SourcesAndDest, c.buildcontext, replacementEnvs) if err != nil { return err @@ -80,7 +96,7 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu } if fi.IsDir() { - copiedFiles, err := util.CopyDir(fullPath, destPath, c.buildcontext) + copiedFiles, err := util.CopyDir(fullPath, destPath, c.buildcontext, uid, gid) if err != nil { return err } @@ -97,7 +113,7 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu c.snapshotFiles = append(c.snapshotFiles, destPath) } else { // ... Else, we want to copy over a file - exclude, err := util.CopyFile(fullPath, destPath, c.buildcontext) + exclude, err := util.CopyFile(fullPath, destPath, c.buildcontext, uid, gid) if err != nil { return err } diff --git a/pkg/commands/run.go b/pkg/commands/run.go index 108cdc5f6..73ea4ca94 100644 --- a/pkg/commands/run.go +++ b/pkg/commands/run.go @@ -21,7 +21,6 @@ import ( "os" "os/exec" "os/user" - "strconv" "strings" "syscall" @@ -80,24 +79,13 @@ func (r *RunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui } uidStr, gidStr, err := util.GetUserFromUsername(userStr, groupStr) - if err != nil { + if err !=nil{ return err } - - // uid and gid need to be uint32 - uid64, err := strconv.ParseUint(uidStr, 10, 32) + uid, gid, err := util.GetUIDAndGIDFromString(uidStr, gidStr) if err != nil { return err } - uid := uint32(uid64) - var gid uint32 - if gidStr != "" { - gid64, err := strconv.ParseUint(gidStr, 10, 32) - if err != nil { - return err - } - gid = uint32(gid64) - } cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uid, Gid: gid} } cmd.Env = addDefaultHOME(userStr, replacementEnvs) diff --git a/pkg/util/command_util.go b/pkg/util/command_util.go index cadf6e564..2121f4055 100644 --- a/pkg/util/command_util.go +++ b/pkg/util/command_util.go @@ -23,6 +23,7 @@ import ( "os" "os/user" "path/filepath" + "strconv" "strings" "github.com/GoogleContainerTools/kaniko/pkg/constants" @@ -326,6 +327,22 @@ Loop: return nil } +// Extract user and group id from a string formatted 'user:group'. +// If gidEqualToUIDIfNotGiven is set, the gid is equal to uid if the group is not specified +// otherwise gid is set to zero. +func GetUIDAndGIDFromString(uidStr string, gidStr string) (uint32, uint32, error) { + // uid and gid need to be fit into uint32 + uid64, err := strconv.ParseUint(uidStr, 10, 32) + if err != nil { + return 0, 0, err + } + gid64, err := strconv.ParseUint(gidStr, 10, 32) + if err != nil { + return 0, 0, err + } + return uint32(uid64), uint32(gid64), nil +} + func GetUserFromUsername(userStr string, groupStr string) (string, string, error) { // Lookup by username userObj, err := Lookup(userStr) @@ -349,7 +366,7 @@ func GetUserFromUsername(userStr string, groupStr string) (string, string, error } uid := userObj.Uid - gid := "" + gid := userObj.Gid if group != nil { gid = group.Gid } diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index 010a14273..aff1f325c 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -23,6 +23,7 @@ import ( "fmt" "io" "io/ioutil" + "math" "net/http" "os" "path/filepath" @@ -303,7 +304,7 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error { currFile.Close() case tar.TypeDir: logrus.Tracef("creating dir %s", path) - if err := mkdirAllWithPermissions(path, mode, uid, gid); err != nil { + if err := mkdirAllWithPermissions(path, mode, int64(uid), int64(gid)); err != nil { return err } @@ -540,7 +541,7 @@ func DownloadFileToDest(rawurl, dest string) error { // CopyDir copies the file or directory at src to dest // It returns a list of files it copied over -func CopyDir(src, dest, buildcontext string) ([]string, error) { +func CopyDir(src, dest, buildcontext string, uid, gid int64) ([]string, error) { files, err := RelativeFiles("", src) if err != nil { return nil, err @@ -562,9 +563,12 @@ func CopyDir(src, dest, buildcontext string) ([]string, error) { logrus.Tracef("Creating directory %s", destPath) mode := fi.Mode() - uid := int(fi.Sys().(*syscall.Stat_t).Uid) - gid := int(fi.Sys().(*syscall.Stat_t).Gid) - + if uid < 0 { + uid = int64(int(fi.Sys().(*syscall.Stat_t).Uid)) + } + if gid < 0 { + gid = int64(int(fi.Sys().(*syscall.Stat_t).Gid)) + } if err := mkdirAllWithPermissions(destPath, mode, uid, gid); err != nil { return nil, err } @@ -575,7 +579,7 @@ func CopyDir(src, dest, buildcontext string) ([]string, error) { } } else { // ... Else, we want to copy over a file - if _, err := CopyFile(fullPath, destPath, buildcontext); err != nil { + if _, err := CopyFile(fullPath, destPath, buildcontext, uid, gid); err != nil { return nil, err } } @@ -606,7 +610,7 @@ func CopySymlink(src, dest, buildcontext string) (bool, error) { } // CopyFile copies the file at src to dest -func CopyFile(src, dest, buildcontext string) (bool, error) { +func CopyFile(src, dest, buildcontext string, uid, gid int64) (bool, error) { if ExcludeFile(src, buildcontext) { logrus.Debugf("%s found in .dockerignore, ignoring", src) return true, nil @@ -627,9 +631,13 @@ func CopyFile(src, dest, buildcontext string) (bool, error) { return false, err } defer srcFile.Close() - uid := fi.Sys().(*syscall.Stat_t).Uid - gid := fi.Sys().(*syscall.Stat_t).Gid - return false, CreateFile(dest, srcFile, fi.Mode(), uid, gid) + if uid < 0 { + uid = int64(fi.Sys().(*syscall.Stat_t).Uid) + } + if gid < 0 { + gid = int64(fi.Sys().(*syscall.Stat_t).Gid) + } + return false, CreateFile(dest, srcFile, fi.Mode(), uint32(uid), uint32(gid)) } // GetExcludedFiles gets a list of files to exclude from the .dockerignore @@ -699,12 +707,15 @@ func Volumes() []string { return volumes } -func mkdirAllWithPermissions(path string, mode os.FileMode, uid, gid int) error { +func mkdirAllWithPermissions(path string, mode os.FileMode, uid, gid int64) error { if err := os.MkdirAll(path, mode); err != nil { return err } - - if err := os.Chown(path, uid, gid); err != nil { + if uid > math.MaxUint32 || gid > math.MaxUint32 { + // due to https://github.com/golang/go/issues/8537 + return errors.New(fmt.Sprintf("Numeric User-ID or Group-ID greater than %v are not properly supported.", math.MaxUint32)) + } + if err := os.Chown(path, int(uid), int(gid)); err != nil { return err } // In some cases, MkdirAll doesn't change the permissions, so run Chmod diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index e2529ed97..109f2337a 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -963,7 +963,7 @@ func Test_CopyFile_skips_self(t *testing.T) { t.Fatal(err) } - ignored, err := CopyFile(tempFile, tempFile, "") + ignored, err := CopyFile(tempFile, tempFile, "", -1, -1) if err != nil { t.Fatal(err) } From 517cb50278b6eb6222feca9dd80bc1b8c737d4df Mon Sep 17 00:00:00 2001 From: xanonid Date: Fri, 24 Jan 2020 16:16:22 +0100 Subject: [PATCH 15/37] Add Unit test for GetUIDAndGIDFromString --- pkg/util/command_util_test.go | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/pkg/util/command_util_test.go b/pkg/util/command_util_test.go index 8e85b3e25..b5895e9a6 100644 --- a/pkg/util/command_util_test.go +++ b/pkg/util/command_util_test.go @@ -17,8 +17,11 @@ limitations under the License. package util import ( + "fmt" + "os/user" "reflect" "sort" + "strconv" "testing" "github.com/GoogleContainerTools/kaniko/testutil" @@ -526,3 +529,35 @@ func TestResolveEnvironmentReplacementList(t *testing.T) { }) } } + +func Test_GetUIDAndGIDFromString(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Fatalf("Cannot get current user: %s", err) + } + groups, err := currentUser.GroupIds() + if err != nil || len(groups) == 0 { + t.Fatalf("Cannot get groups for current user: %s", err) + } + primaryGroupObj, err := user.LookupGroupId(groups[0]) + if err != nil { + t.Fatalf("Could not lookup name of group %s: %s", groups[0], err) + } + primaryGroup := primaryGroupObj.Name + + testCases := []string{ + fmt.Sprintf("%s:%s", currentUser.Uid, currentUser.Gid), + fmt.Sprintf("%s:%s", currentUser.Username, currentUser.Gid), + fmt.Sprintf("%s:%s", currentUser.Uid, primaryGroup), + fmt.Sprintf("%s:%s", currentUser.Username, primaryGroup), + } + expectedUid, _ := strconv.ParseUint(currentUser.Uid, 10, 32) + expectedGid, _ := strconv.ParseUint(currentUser.Gid, 10, 32) + for _, tt := range testCases { + uid, gid, err := GetUIDAndGIDFromString(tt, false) + if uid != uint32(expectedUid) || gid != uint32(expectedGid) || err != nil { + t.Errorf("Could not correctly decode %s to uid/gid %d:%d. Result: %d:%d", tt, expectedUid, expectedGid, + uid, gid) + } + } +} From ac4c80d2416d7486c9a0bc9b95ef2cab31601370 Mon Sep 17 00:00:00 2001 From: xanonid Date: Fri, 24 Jan 2020 16:49:48 +0100 Subject: [PATCH 16/37] Refactor code, introduce constants for uid/gid = -1 --- cmd/executor/cmd/root.go | 2 +- pkg/commands/copy.go | 4 ++-- pkg/util/fs_util.go | 29 +++++++++++++++++------------ pkg/util/fs_util_test.go | 2 +- 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index 937e7f4a4..f39e011ed 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -215,7 +215,7 @@ func resolveEnvironmentBuildArgs(arguments []string, resolver func(string) strin // copy Dockerfile to /kaniko/Dockerfile so that if it's specified in the .dockerignore // it won't be copied into the image func copyDockerfile() error { - if _, err := util.CopyFile(opts.DockerfilePath, constants.DockerfilePath, "", -1, -1); err != nil { + if _, err := util.CopyFile(opts.DockerfilePath, constants.DockerfilePath, "", util.DoNotChangeUID, util.DoNotChangeGID); err != nil { return errors.Wrap(err, "copying dockerfile") } opts.DockerfilePath = constants.DockerfilePath diff --git a/pkg/commands/copy.go b/pkg/commands/copy.go index 7af0b34f1..25a78cdb9 100644 --- a/pkg/commands/copy.go +++ b/pkg/commands/copy.go @@ -45,8 +45,8 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu c.buildcontext = filepath.Join(constants.KanikoDir, c.cmd.From) } var uid, gid int64 - uid = -1 - gid = -1 + uid = util.DoNotChangeUID + gid = util.DoNotChangeGID replacementEnvs := buildArgs.ReplacementEnvs(config.Env) diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index aff1f325c..dca42b90d 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -41,6 +41,9 @@ import ( "github.com/sirupsen/logrus" ) +const DoNotChangeUID = -1 +const DoNotChangeGID = -1 + type WhitelistEntry struct { Path string PrefixMatchOnly bool @@ -539,6 +542,18 @@ func DownloadFileToDest(rawurl, dest string) error { return os.Chtimes(dest, mTime, mTime) } +// DetermineTargetFileOwnership returns the user provided uid/gid combination. +// If they are set to -1, the uid/gid from the original file is used. +func DetermineTargetFileOwnership(fi os.FileInfo, uid, gid int64) (int64, int64) { + if uid <= DoNotChangeUID { + uid = int64(fi.Sys().(*syscall.Stat_t).Uid) + } + if gid <= DoNotChangeGID { + gid = int64(fi.Sys().(*syscall.Stat_t).Gid) + } + return uid, gid +} + // CopyDir copies the file or directory at src to dest // It returns a list of files it copied over func CopyDir(src, dest, buildcontext string, uid, gid int64) ([]string, error) { @@ -563,12 +578,7 @@ func CopyDir(src, dest, buildcontext string, uid, gid int64) ([]string, error) { logrus.Tracef("Creating directory %s", destPath) mode := fi.Mode() - if uid < 0 { - uid = int64(int(fi.Sys().(*syscall.Stat_t).Uid)) - } - if gid < 0 { - gid = int64(int(fi.Sys().(*syscall.Stat_t).Gid)) - } + uid, gid = DetermineTargetFileOwnership(fi, uid, gid) if err := mkdirAllWithPermissions(destPath, mode, uid, gid); err != nil { return nil, err } @@ -631,12 +641,7 @@ func CopyFile(src, dest, buildcontext string, uid, gid int64) (bool, error) { return false, err } defer srcFile.Close() - if uid < 0 { - uid = int64(fi.Sys().(*syscall.Stat_t).Uid) - } - if gid < 0 { - gid = int64(fi.Sys().(*syscall.Stat_t).Gid) - } + uid, gid = DetermineTargetFileOwnership(fi, uid, gid) return false, CreateFile(dest, srcFile, fi.Mode(), uint32(uid), uint32(gid)) } diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index 109f2337a..ad0d935a2 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -963,7 +963,7 @@ func Test_CopyFile_skips_self(t *testing.T) { t.Fatal(err) } - ignored, err := CopyFile(tempFile, tempFile, "", -1, -1) + ignored, err := CopyFile(tempFile, tempFile, "", DoNotChangeUID, DoNotChangeGID) if err != nil { t.Fatal(err) } From 56125ff464219077beb512736670474288331e35 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Fri, 31 Jan 2020 13:48:14 -0800 Subject: [PATCH 17/37] Rebase from master with some changes --- pkg/commands/run.go | 13 +------------ pkg/util/command_util.go | 22 ++++++++++++++++++---- pkg/util/fs_util_test.go | 6 ++++++ 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/pkg/commands/run.go b/pkg/commands/run.go index 73ea4ca94..409060443 100644 --- a/pkg/commands/run.go +++ b/pkg/commands/run.go @@ -71,18 +71,7 @@ func (r *RunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui var userStr string // If specified, run the command as a specific user if config.User != "" { - userAndGroup := strings.Split(config.User, ":") - userStr = userAndGroup[0] - var groupStr string - if len(userAndGroup) > 1 { - groupStr = userAndGroup[1] - } - - uidStr, gidStr, err := util.GetUserFromUsername(userStr, groupStr) - if err !=nil{ - return err - } - uid, gid, err := util.GetUIDAndGIDFromString(uidStr, gidStr) + uid, gid, err := util.GetUIDAndGIDFromString(config.User, false) if err != nil { return err } diff --git a/pkg/util/command_util.go b/pkg/util/command_util.go index 2121f4055..c9ba9f4cf 100644 --- a/pkg/util/command_util.go +++ b/pkg/util/command_util.go @@ -328,9 +328,20 @@ Loop: } // Extract user and group id from a string formatted 'user:group'. -// If gidEqualToUIDIfNotGiven is set, the gid is equal to uid if the group is not specified +// If fallbackToUID is set, the gid is equal to uid if the group is not specified // otherwise gid is set to zero. -func GetUIDAndGIDFromString(uidStr string, gidStr string) (uint32, uint32, error) { +func GetUIDAndGIDFromString(userGroupString string, fallbackToUID bool) (uint32, uint32, error) { + userAndGroup := strings.Split(userGroupString, ":") + userStr := userAndGroup[0] + var groupStr string + if len(userAndGroup) > 1 { + groupStr = userAndGroup[1] + } + + uidStr, gidStr, err := GetUserFromUsername(userStr, groupStr, fallbackToUID) + if err != nil { + return 0, 0, err + } // uid and gid need to be fit into uint32 uid64, err := strconv.ParseUint(uidStr, 10, 32) if err != nil { @@ -343,7 +354,7 @@ func GetUIDAndGIDFromString(uidStr string, gidStr string) (uint32, uint32, error return uint32(uid64), uint32(gid64), nil } -func GetUserFromUsername(userStr string, groupStr string) (string, string, error) { +func GetUserFromUsername(userStr string, groupStr string, fallbackToUID bool) (string, string, error) { // Lookup by username userObj, err := Lookup(userStr) if err != nil { @@ -366,7 +377,10 @@ func GetUserFromUsername(userStr string, groupStr string) (string, string, error } uid := userObj.Uid - gid := userObj.Gid + gid := "" + if fallbackToUID { + gid = userObj.Gid + } if group != nil { gid = group.Gid } diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index ad0d935a2..361915350 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -1307,6 +1307,12 @@ func TestUpdateWhitelist(t *testing.T) { t.Run(tt.name, func(t *testing.T) { whitelist = initialWhitelist defer func() { whitelist = initialWhitelist }() + sort.Slice(tt.expected, func(i, j int) bool { + return tt.expected[i].Path < tt.expected[j].Path + }) + sort.Slice(whitelist, func(i, j int) bool { + return whitelist[i].Path < whitelist[j].Path + }) UpdateWhitelist(tt.whitelistVarRun) sort.Slice(tt.expected, func(i, j int) bool { return tt.expected[i].Path < tt.expected[j].Path From 31f626cb22507580b3728a031b0f0587c608a8ac Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Mon, 3 Feb 2020 15:23:16 -0800 Subject: [PATCH 18/37] initilize group to 0 --- pkg/util/command_util.go | 2 +- pkg/util/command_util_test.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/util/command_util.go b/pkg/util/command_util.go index c9ba9f4cf..23f035ec4 100644 --- a/pkg/util/command_util.go +++ b/pkg/util/command_util.go @@ -377,7 +377,7 @@ func GetUserFromUsername(userStr string, groupStr string, fallbackToUID bool) (s } uid := userObj.Uid - gid := "" + gid := "0" if fallbackToUID { gid = userObj.Gid } diff --git a/pkg/util/command_util_test.go b/pkg/util/command_util_test.go index b5895e9a6..472646388 100644 --- a/pkg/util/command_util_test.go +++ b/pkg/util/command_util_test.go @@ -551,12 +551,12 @@ func Test_GetUIDAndGIDFromString(t *testing.T) { fmt.Sprintf("%s:%s", currentUser.Uid, primaryGroup), fmt.Sprintf("%s:%s", currentUser.Username, primaryGroup), } - expectedUid, _ := strconv.ParseUint(currentUser.Uid, 10, 32) - expectedGid, _ := strconv.ParseUint(currentUser.Gid, 10, 32) + expectedU, _ := strconv.ParseUint(currentUser.Uid, 10, 32) + expectedG, _ := strconv.ParseUint(currentUser.Gid, 10, 32) for _, tt := range testCases { uid, gid, err := GetUIDAndGIDFromString(tt, false) - if uid != uint32(expectedUid) || gid != uint32(expectedGid) || err != nil { - t.Errorf("Could not correctly decode %s to uid/gid %d:%d. Result: %d:%d", tt, expectedUid, expectedGid, + if uid != uint32(expectedU) || gid != uint32(expectedG) || err != nil { + t.Errorf("Could not correctly decode %s to uid/gid %d:%d. Result: %d:%d", tt, expectedU, expectedG, uid, gid) } } From 2e95c3040c2df9731b27039712f2b3a4e4c7628c Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Tue, 4 Feb 2020 10:55:07 -0800 Subject: [PATCH 19/37] update initialWhitelist instead of whitelist --- pkg/util/fs_util.go | 3 ++- pkg/util/fs_util_test.go | 16 +++++----------- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index dca42b90d..9236effc8 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -817,7 +817,8 @@ func UpdateWhitelist(whitelistVarRun bool) { if !whitelistVarRun { return } - whitelist = append(initialWhitelist, WhitelistEntry{ + logrus.Trace("Adding /var/run to initialWhitelist ") + initialWhitelist = append(initialWhitelist, WhitelistEntry{ // /var/run is a special case. It's common to mount in /var/run/docker.sock or something similar // which leads to a special mount on the /var/run/docker.sock file itself, but the directory to exist // in the image with no way to tell if it came from the base image or not. diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index 361915350..388292da4 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -1305,22 +1305,16 @@ func TestUpdateWhitelist(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - whitelist = initialWhitelist - defer func() { whitelist = initialWhitelist }() - sort.Slice(tt.expected, func(i, j int) bool { - return tt.expected[i].Path < tt.expected[j].Path - }) - sort.Slice(whitelist, func(i, j int) bool { - return whitelist[i].Path < whitelist[j].Path - }) + original := initialWhitelist + defer func() { initialWhitelist = original }() UpdateWhitelist(tt.whitelistVarRun) sort.Slice(tt.expected, func(i, j int) bool { return tt.expected[i].Path < tt.expected[j].Path }) - sort.Slice(whitelist, func(i, j int) bool { - return whitelist[i].Path < whitelist[j].Path + sort.Slice(initialWhitelist, func(i, j int) bool { + return initialWhitelist[i].Path < initialWhitelist[j].Path }) - testutil.CheckDeepEqual(t, tt.expected, whitelist) + testutil.CheckDeepEqual(t, tt.expected, initialWhitelist) }) } } From ff646ae8903f598535d066204897d59b45154365 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Tue, 4 Feb 2020 11:12:24 -0800 Subject: [PATCH 20/37] release 0.17.01 --- CHANGELOG.md | 4 ++++ Makefile | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 307d76722..8ff0b5c96 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# v0.17.1 Release - 2020-02-04 + +This is minor patch release to fix [#1002](https://github.com/GoogleContainerTools/kaniko/issues/1002) + # v0.17.0 Release - 2020-02-03 ## New Features diff --git a/Makefile b/Makefile index f5442ac56..3f67aa797 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ # Bump these on release VERSION_MAJOR ?= 0 VERSION_MINOR ?= 17 -VERSION_BUILD ?= 0 +VERSION_BUILD ?= 1 VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) VERSION_PACKAGE = $(REPOPATH/pkg/version) From 2cd68d2e2f4ac4f9504514559cd676b53c91a507 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Wed, 5 Feb 2020 14:40:52 -0800 Subject: [PATCH 21/37] fix flake in copy symlink --- .../dockerfiles/Dockerfile_test_copy_symlink | 10 +++++++++- pkg/commands/copy.go | 4 ++-- pkg/executor/build.go | 4 +++- pkg/util/fs_util.go | 16 ++++++++++------ pkg/util/fs_util_test.go | 2 +- 5 files changed, 25 insertions(+), 11 deletions(-) diff --git a/integration/dockerfiles/Dockerfile_test_copy_symlink b/integration/dockerfiles/Dockerfile_test_copy_symlink index e517d8ce7..82f5774c6 100644 --- a/integration/dockerfiles/Dockerfile_test_copy_symlink +++ b/integration/dockerfiles/Dockerfile_test_copy_symlink @@ -2,5 +2,13 @@ FROM busybox as t RUN echo "hello" > /tmp/target RUN ln -s /tmp/target /tmp/link +## Relative link +RUN cd tmp && ln -s target relative_link + +## Relative link with paths +RUN mkdir workdir && cd workdir && ln -s ../tmp/target relative_dir_link + FROM scratch -COPY --from=t /tmp/link /tmp \ No newline at end of file +COPY --from=t /tmp/link /tmp/ +COPY --from=t /tmp/relative_link /tmp/ +COPY --from=t /workdir/relative_dir_link /workdir/ \ No newline at end of file diff --git a/pkg/commands/copy.go b/pkg/commands/copy.go index 25a78cdb9..aed03cb3a 100644 --- a/pkg/commands/copy.go +++ b/pkg/commands/copy.go @@ -102,8 +102,8 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu } c.snapshotFiles = append(c.snapshotFiles, copiedFiles...) } else if util.IsSymlink(fi) { - // If file is a symlink, we want to create the same relative symlink - exclude, err := util.CopySymlink(fullPath, destPath, c.buildcontext) + // If file is a symlink, we want to copy the target file to destPath + exclude, err := util.CopySymlink(fullPath, destPath, c.buildcontext, uid, gid) if err != nil { return err } diff --git a/pkg/executor/build.go b/pkg/executor/build.go index b8e7f85b8..a4f85a6c7 100644 --- a/pkg/executor/build.go +++ b/pkg/executor/build.go @@ -575,7 +575,9 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) { } for _, p := range filesToSave { logrus.Infof("Saving file %s for later use", p) - util.CopyFileOrSymlink(p, dstDir) + if err := util.CopyFileOrSymlink(p, dstDir); err != nil { + return nil, err + } } // Delete the filesystem diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index 9236effc8..afd20e65b 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -584,7 +584,7 @@ func CopyDir(src, dest, buildcontext string, uid, gid int64) ([]string, error) { } } else if IsSymlink(fi) { // If file is a symlink, we want to create the same relative symlink - if _, err := CopySymlink(fullPath, destPath, buildcontext); err != nil { + if _, err := CopySymlink(fullPath, destPath, buildcontext, uid, gid); err != nil { return nil, err } } else { @@ -599,12 +599,12 @@ func CopyDir(src, dest, buildcontext string, uid, gid int64) ([]string, error) { } // CopySymlink copies the symlink at src to dest -func CopySymlink(src, dest, buildcontext string) (bool, error) { +func CopySymlink(src, dest, buildcontext string, uid int64, gid int64) (bool, error) { if ExcludeFile(src, buildcontext) { logrus.Debugf("%s found in .dockerignore, ignoring", src) return true, nil } - link, err := os.Readlink(src) + link, err := filepath.EvalSymlinks(src) if err != nil { return false, err } @@ -616,7 +616,7 @@ func CopySymlink(src, dest, buildcontext string) (bool, error) { if err := createParentDirectory(dest); err != nil { return false, err } - return false, os.Symlink(link, dest) + return CopyFile(link, dest, buildcontext, uid, gid) } // CopyFile copies the file at src to dest @@ -789,11 +789,15 @@ func getSymlink(path string) error { func CopyFileOrSymlink(src string, destDir string) error { destFile := filepath.Join(destDir, src) if fi, _ := os.Lstat(src); IsSymlink(fi) { - link, err := os.Readlink(src) + link, err := EvalSymLink(src) if err != nil { return err } - return os.Symlink(link, destFile) + linkPath := filepath.Join(destDir, link) + if err := createParentDirectory(destFile); err != nil { + return err + } + return os.Symlink(linkPath, destFile) } return otiai10Cpy.Copy(src, destFile) } diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index 388292da4..b9c113184 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -835,7 +835,7 @@ func TestCopySymlink(t *testing.T) { if err := os.Symlink(tc.linkTarget, link); err != nil { t.Fatal(err) } - if _, err := CopySymlink(link, dest, ""); err != nil { + if _, err := CopySymlink(link, dest, "", 0, 0); err != nil { t.Fatal(err) } got, err := os.Readlink(dest) From 9e17ffd6cbbc1d9d9b25a83184412d5049b23eeb Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Wed, 5 Feb 2020 15:59:07 -0800 Subject: [PATCH 22/37] fix unit tests --- pkg/util/fs_util_test.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index b9c113184..dcb17771e 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -835,16 +835,12 @@ func TestCopySymlink(t *testing.T) { if err := os.Symlink(tc.linkTarget, link); err != nil { t.Fatal(err) } - if _, err := CopySymlink(link, dest, "", 0, 0); err != nil { + if _, err := CopySymlink(link, dest, "", DoNotChangeUID, DoNotChangeGID); err != nil { t.Fatal(err) } - got, err := os.Readlink(dest) - if err != nil { + if _, err := os.Lstat(dest); err != nil { t.Fatalf("error reading link %s: %s", link, err) } - if got != tc.linkTarget { - t.Errorf("link target does not match: %s != %s", got, tc.linkTarget) - } }) } } From 3e2221cf6f7ad6486df7d9b61accd8c4cbcbaeb8 Mon Sep 17 00:00:00 2001 From: Ben Einaudi Date: Thu, 30 Jan 2020 18:33:21 +0100 Subject: [PATCH 23/37] Allow contributors to launch integration tests against local registry This change allows user to launch integration tests with a local registry Fixes #1012 --- .travis.yml | 4 +- DEVELOPMENT.md | 68 ++++++-- integration/config.go | 34 ++++ integration/images.go | 4 +- integration/integration_test.go | 109 ++++++------ integration/migrate_gcr.go | 155 ++++++++++++++++++ .../replace-gcr-with-local-registry.sh | 43 ----- 7 files changed, 308 insertions(+), 109 deletions(-) create mode 100644 integration/config.go create mode 100644 integration/migrate_gcr.go delete mode 100755 integration/replace-gcr-with-local-registry.sh diff --git a/.travis.yml b/.travis.yml index e19669cc7..11dcbe8fd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,9 +13,7 @@ before_install: - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce - curl -LO https://storage.googleapis.com/container-diff/latest/container-diff-linux-amd64 && chmod +x container-diff-linux-amd64 && sudo mv container-diff-linux-amd64 /usr/local/bin/container-diff - docker run -d -p 5000:5000 --restart always --name registry registry:2 - - ./integration/replace-gcr-with-local-registry.sh integration/dockerfiles - script: - make test - - ./integration-test.sh --uploadToGCS=false + - make integration-test - make images diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index b395d2d9a..69a629f54 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -67,18 +67,39 @@ _These tests will not run correctly unless you have [checked out your fork into ### Integration tests -The integration tests live in [`integration`](./integration) and can be run with: +Currently the integration tests that live in [`integration`](./integration) can be run against your own gcloud space or a local registry. + +In either case, you will need the following tools: + +* [`container-diff`](https://github.com/GoogleContainerTools/container-diff#installation) + +#### GCloud + +To run integration tests with your GCloud Storage, you will also need the following tools: + +* [`gcloud`](https://cloud.google.com/sdk/install) +* [`gsutil`](https://cloud.google.com/storage/docs/gsutil_install) +* A bucket in [GCS](https://cloud.google.com/storage/) which you have write access to via + the user currently logged into `gcloud` +* An image repo which you have write access to via the user currently logged into `gcloud` + +Once this step done, you must override the project using environment variables: + +* `GCS_BUCKET` - The name of your GCS bucket +* `IMAGE_REPO` - The path to your docker image repo + +This can be done as follows: ```shell export GCS_BUCKET="gs://" export IMAGE_REPO="gcr.io/somerepo" -make integration-test ``` -If you want to run `make integration-test`, you must override the project using environment variables: +Then you can launch integration tests as follows: -* `GCS_BUCKET` - The name of your GCS bucket -* `IMAGE_REPO` - The path to your docker image repo +```shell +make integration-test +``` You can also run tests with `go test`, for example to run tests individually: @@ -86,16 +107,37 @@ You can also run tests with `go test`, for example to run tests individually: go test ./integration -v --bucket $GCS_BUCKET --repo $IMAGE_REPO -run TestLayers/test_layer_Dockerfile_test_copy_bucket ``` -Requirements: +These tests will be kicked off by [reviewers](#reviews) for submitted PRs by the kokoro task. + +#### Local repository + +To run integration tests locally against a local registry, install a local docker registry + +```shell +docker run --rm -d -p 5000:5000 --name registry registry:2 +``` + +Then export the `IMAGE_REPO` variable with the `localhost:5000`value + +```shell +export IMAGE_REPO=localhost:5000 +``` + +And run the integration tests + +```shell +make integration-test +``` + +You can also run tests with `go test`, for example to run tests individually: + +```shell +go test ./integration -v --repo localhost:5000 -run TestLayers/test_layer_Dockerfile_test_copy_bucket +``` + +These tests will be kicked off by [reviewers](#reviews) for submitted PRs by the travis task. -* [`gcloud`](https://cloud.google.com/sdk/install) -* [`gsutil`](https://cloud.google.com/storage/docs/gsutil_install) -* [`container-diff`](https://github.com/GoogleContainerTools/container-diff#installation) -* A bucket in [GCS](https://cloud.google.com/storage/) which you have write access to via - the user currently logged into `gcloud` -* An image repo which you have write access to via the user currently logged into `gcloud` -These tests will be kicked off by [reviewers](#reviews) for submitted PRs. ### Benchmarking diff --git a/integration/config.go b/integration/config.go new file mode 100644 index 000000000..2a6df72a2 --- /dev/null +++ b/integration/config.go @@ -0,0 +1,34 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import "strings" + +type integrationTestConfig struct { + gcsBucket string + imageRepo string + onbuildBaseImage string + hardlinkBaseImage string + serviceAccount string + dockerMajorVersion int +} + +const gcrRepoPrefix string = "gcr.io/" + +func (config *integrationTestConfig) isGcrRepository() bool { + return strings.HasPrefix(config.imageRepo, gcrRepoPrefix) +} diff --git a/integration/images.go b/integration/images.go index 13bf6d7ec..78b0c98bc 100644 --- a/integration/images.go +++ b/integration/images.go @@ -191,7 +191,7 @@ func addServiceAccountFlags(flags []string, serviceAccount string) []string { // BuildImage will build dockerfile (located at dockerfilesPath) using both kaniko and docker. // The resulting image will be tagged with imageRepo. If the dockerfile will be built with // context (i.e. it is in `buildContextTests`) the context will be pulled from gcsBucket. -func (d *DockerFileBuilder) BuildImage(config *gcpConfig, dockerfilesPath, dockerfile string) error { +func (d *DockerFileBuilder) BuildImage(config *integrationTestConfig, dockerfilesPath, dockerfile string) error { gcsBucket, serviceAccount, imageRepo := config.gcsBucket, config.serviceAccount, config.imageRepo _, ex, _, _ := runtime.Caller(0) cwd := filepath.Dir(ex) @@ -317,7 +317,7 @@ func populateVolumeCache() error { } // buildCachedImages builds the images for testing caching via kaniko where version is the nth time this image has been built -func (d *DockerFileBuilder) buildCachedImages(config *gcpConfig, cacheRepo, dockerfilesPath string, version int) error { +func (d *DockerFileBuilder) buildCachedImages(config *integrationTestConfig, cacheRepo, dockerfilesPath string, version int) error { imageRepo, serviceAccount := config.imageRepo, config.serviceAccount _, ex, _, _ := runtime.Caller(0) cwd := filepath.Dir(ex) diff --git a/integration/integration_test.go b/integration/integration_test.go index c5c910d88..bfc27fef0 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -33,12 +33,14 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/v1/daemon" + "github.com/pkg/errors" + "github.com/GoogleContainerTools/kaniko/pkg/timing" "github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/GoogleContainerTools/kaniko/testutil" ) -var config *gcpConfig +var config *integrationTestConfig var imageBuilder *DockerFileBuilder const ( @@ -80,37 +82,65 @@ func getDockerMajorVersion() int { } return ver } +func launchTests(m *testing.M, dockerfiles []string) (int, error) { + + if config.isGcrRepository() { + contextFile, err := CreateIntegrationTarball() + if err != nil { + return 1, errors.Wrap(err, "Failed to create tarball of integration files for build context") + } + + fileInBucket, err := UploadFileToBucket(config.gcsBucket, contextFile, contextFile) + if err != nil { + return 1, errors.Wrap(err, "Failed to upload build context") + } + + if err = os.Remove(contextFile); err != nil { + return 1, errors.Wrap(err, fmt.Sprintf("Failed to remove tarball at %s", contextFile)) + } + + RunOnInterrupt(func() { DeleteFromBucket(fileInBucket) }) + defer DeleteFromBucket(fileInBucket) + } else { + var err error + var migratedFiles []string + if migratedFiles, err = MigrateGCRRegistry(dockerfilesPath, dockerfiles, config.imageRepo); err != nil { + RollbackMigratedFiles(dockerfilesPath, migratedFiles) + return 1, errors.Wrap(err, "Fail to migrate dockerfiles from gcs") + } + RunOnInterrupt(func() { RollbackMigratedFiles(dockerfilesPath, migratedFiles) }) + defer RollbackMigratedFiles(dockerfilesPath, migratedFiles) + } + if err := buildRequiredImages(); err != nil { + return 1, errors.Wrap(err, "Error while building images") + } + + imageBuilder = NewDockerFileBuilder(dockerfiles) + + return m.Run(), nil +} func TestMain(m *testing.M) { if !meetsRequirements() { fmt.Println("Missing required tools") os.Exit(1) } - config = initGCPConfig() - if config.uploadToGCS { - contextFile, err := CreateIntegrationTarball() + if dockerfiles, err := FindDockerFiles(dockerfilesPath); err != nil { + fmt.Println("Coudn't create map of dockerfiles", err) + os.Exit(1) + } else { + config = initIntegrationTestConfig() + exitCode, err := launchTests(m, dockerfiles) if err != nil { - fmt.Println("Failed to create tarball of integration files for build context", err) - os.Exit(1) + fmt.Println(err) } - - fileInBucket, err := UploadFileToBucket(config.gcsBucket, contextFile, contextFile) - if err != nil { - fmt.Println("Failed to upload build context", err) - os.Exit(1) - } - - err = os.Remove(contextFile) - if err != nil { - fmt.Printf("Failed to remove tarball at %s: %s\n", contextFile, err) - os.Exit(1) - } - - RunOnInterrupt(func() { DeleteFromBucket(fileInBucket) }) - defer DeleteFromBucket(fileInBucket) + os.Exit(exitCode) } +} + +func buildRequiredImages() error { setupCommands := []struct { name string command []string @@ -145,20 +175,10 @@ func TestMain(m *testing.M) { fmt.Println(setupCmd.name) cmd := exec.Command(setupCmd.command[0], setupCmd.command[1:]...) if out, err := RunCommandWithoutTest(cmd); err != nil { - fmt.Printf("%s failed: %s", setupCmd.name, err) - fmt.Println(string(out)) - os.Exit(1) + return errors.Wrap(err, fmt.Sprintf("%s failed: %s", setupCmd.name, string(out))) } } - - dockerfiles, err := FindDockerFiles(dockerfilesPath) - if err != nil { - fmt.Printf("Coudn't create map of dockerfiles: %s", err) - os.Exit(1) - } - imageBuilder = NewDockerFileBuilder(dockerfiles) - - os.Exit(m.Run()) + return nil } func TestRun(t *testing.T) { @@ -535,16 +555,6 @@ func logBenchmarks(benchmark string) error { return nil } -type gcpConfig struct { - gcsBucket string - imageRepo string - onbuildBaseImage string - hardlinkBaseImage string - serviceAccount string - dockerMajorVersion int - uploadToGCS bool -} - type imageDetails struct { name string numLayers int @@ -555,12 +565,11 @@ func (i imageDetails) String() string { return fmt.Sprintf("Image: [%s] Digest: [%s] Number of Layers: [%d]", i.name, i.digest, i.numLayers) } -func initGCPConfig() *gcpConfig { - var c gcpConfig +func initIntegrationTestConfig() *integrationTestConfig { + var c integrationTestConfig flag.StringVar(&c.gcsBucket, "bucket", "gs://kaniko-test-bucket", "The gcs bucket argument to uploaded the tar-ed contents of the `integration` dir to.") flag.StringVar(&c.imageRepo, "repo", "gcr.io/kaniko-test", "The (docker) image repo to build and push images to during the test. `gcloud` must be authenticated with this repo or serviceAccount must be set.") flag.StringVar(&c.serviceAccount, "serviceAccount", "", "The path to the service account push images to GCR and upload/download files to GCS.") - flag.BoolVar(&c.uploadToGCS, "uploadToGCS", true, "Upload the tar-ed contents of `integration` dir to GCS bucket. Default is true. Set this to false to prevent uploading.") flag.Parse() if len(c.serviceAccount) > 0 { @@ -575,8 +584,12 @@ func initGCPConfig() *gcpConfig { os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", absPath) } - if c.gcsBucket == "" || c.imageRepo == "" { - log.Fatalf("You must provide a gcs bucket (\"%s\" was provided) and a docker repo (\"%s\" was provided)", c.gcsBucket, c.imageRepo) + if c.imageRepo == "" { + log.Fatal("You must provide a image repository") + } + + if c.isGcrRepository() && c.gcsBucket == "" { + log.Fatalf("You must provide a gcs bucket when using a Google Container Registry (\"%s\" was provided)", c.imageRepo) } if !strings.HasSuffix(c.imageRepo, "/") { c.imageRepo = c.imageRepo + "/" diff --git a/integration/migrate_gcr.go b/integration/migrate_gcr.go new file mode 100644 index 000000000..c056f5f6e --- /dev/null +++ b/integration/migrate_gcr.go @@ -0,0 +1,155 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "bufio" + "fmt" + "os" + "os/exec" + "path" + "regexp" + "strings" +) + +// This function crawl through dockerfiles and replace all reference to "gcr.io/" in FROM instruction and replace it targetRepo +// The result is the array containing all modified file +// Each of this file is saved +func MigrateGCRRegistry(dockerfilesPath string, dockerfiles []string, targetRepo string) ([]string, error) { + var savedFiles []string + importedImages := map[string]interface{}{} + + for _, dockerfile := range dockerfiles { + if referencedImages, savedFile, err := migrateFile(dockerfilesPath, dockerfile, targetRepo); err != nil { + if savedFile { + savedFiles = append(savedFiles, dockerfile) + } + return savedFiles, err + } else if savedFile { + savedFiles = append(savedFiles, dockerfile) + for _, referencedImage := range referencedImages { + importedImages[referencedImage] = nil + } + } + } + for image := range importedImages { + if err := importImage(image, targetRepo); err != nil { + return savedFiles, err + } + } + return savedFiles, nil +} + +// This function rollback all previously modified files +func RollbackMigratedFiles(dockerfilesPath string, dockerfiles []string) []error { + var result []error + for _, dockerfile := range dockerfiles { + fmt.Printf("Rolling back %s\n", dockerfile) + if err := recoverDockerfile(dockerfilesPath, dockerfile); err != nil { + result = append(result, err) + } + } + return result +} + +// Import the gcr.io image such as gcr.io/my-image to targetRepo +func importImage(image string, targetRepo string) error { + fmt.Printf("Importing %s to %s\n", image, targetRepo) + targetImage := strings.ReplaceAll(image, "gcr.io/", targetRepo) + pullCmd := exec.Command("docker", "pull", image) + if out, err := RunCommandWithoutTest(pullCmd); err != nil { + return fmt.Errorf("Failed to pull image %s with docker command \"%s\": %s %s", image, pullCmd.Args, err, string(out)) + } + + tagCmd := exec.Command("docker", "tag", image, targetImage) + if out, err := RunCommandWithoutTest(tagCmd); err != nil { + return fmt.Errorf("Failed to tag image %s to %s with docker command \"%s\": %s %s", image, targetImage, tagCmd.Args, err, string(out)) + } + + pushCmd := exec.Command("docker", "push", targetImage) + if out, err := RunCommandWithoutTest(pushCmd); err != nil { + return fmt.Errorf("Failed to push image %s with docker command \"%s\": %s %s", targetImage, pushCmd.Args, err, string(out)) + } + return nil +} + +// takes a dockerfile and replace each gcr.io/ occurrence in FROM instruction and replace it with imageRepo +// return true if the file was saved +// if so, the array is non nil and contains each gcr image name +func migrateFile(dockerfilesPath string, dockerfile string, imageRepo string) ([]string, bool, error) { + var input *os.File + var output *os.File + var err error + var referencedImages []string + + if input, err = os.Open(path.Join(dockerfilesPath, dockerfile)); err != nil { + return nil, false, err + } + defer input.Close() + + var lines []string + scanner := bufio.NewScanner(input) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + line := scanner.Text() + if isFromGcrBaseImageInstruction(line) { + referencedImages = append(referencedImages, strings.Trim(strings.Split(line, " ")[1], " ")) + lines = append(lines, strings.ReplaceAll(line, gcrRepoPrefix, imageRepo)) + } else { + lines = append(lines, line) + } + } + rawOutput := []byte(strings.Join(append(lines, ""), "\n")) + + if len(referencedImages) == 0 { + return nil, false, nil + } + + if err = saveDockerfile(dockerfilesPath, dockerfile); err != nil { + return nil, false, err + } + + if output, err = os.Create(path.Join(dockerfilesPath, dockerfile)); err != nil { + return nil, true, err + } + defer output.Close() + + if written, err := output.Write(rawOutput); err != nil { + return nil, true, err + } else if written != len(rawOutput) { + return nil, true, fmt.Errorf("invalid number of byte written. Got %d, expected %d", written, len(rawOutput)) + } + return referencedImages, true, nil + +} + +func isFromGcrBaseImageInstruction(line string) bool { + result, _ := regexp.MatchString(fmt.Sprintf("FROM +%s", gcrRepoPrefix), line) + return result +} + +func saveDockerfile(dockerfilesPath string, dockerfile string) error { + return os.Rename(path.Join(dockerfilesPath, dockerfile), path.Join(dockerfilesPath, saveName(dockerfile))) +} + +func recoverDockerfile(dockerfilesPath string, dockerfile string) error { + return os.Rename(path.Join(dockerfilesPath, saveName(dockerfile)), path.Join(dockerfilesPath, dockerfile)) +} + +func saveName(dockerfile string) string { + return fmt.Sprintf("%s_save_%d", dockerfile, os.Getpid()) +} diff --git a/integration/replace-gcr-with-local-registry.sh b/integration/replace-gcr-with-local-registry.sh deleted file mode 100755 index e40337fde..000000000 --- a/integration/replace-gcr-with-local-registry.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# This script is needed due to the following bug: -# https://github.com/GoogleContainerTools/kaniko/issues/966 - - -if [ "$#" -ne 1 ]; then - echo "Please specify path to dockerfiles as first argument." - echo "Usage: `basename $0` integration/dockerfiles" - exit 2 -fi - -dir_with_docker_files=$1 - -for dockerfile in $dir_with_docker_files/*; do - cat $dockerfile | grep '^FROM' | grep "gcr" | while read -r line; do - gcr_repo=$(echo "$line" | awk '{ print $2 }') - local_repo=$(echo "$gcr_repo" | sed -e "s/^.*gcr.io\(\/.*\)$/localhost:5000\1/") - remove_digest=$(echo "$local_repo" | cut -f1 -d"@") - echo "Running docker pull $gcr_repo" - docker pull "$gcr_repo" - echo "Running docker tag $gcr_repo $remove_digest" - docker tag "$gcr_repo" "$remove_digest" - echo "Running docker push $remove_digest" - docker push "$remove_digest" - echo "Updating dockerfile $dockerfile to use local repo $local_repo" - sed -i -e "s/^\(FROM \).*gcr.io\(.*\)$/\1localhost:5000\2/" $dockerfile - done -done From cc2c9a0663a64e3a2c2e22eac9d987b998daa48a Mon Sep 17 00:00:00 2001 From: tinkerborg <15373049+tinkerborg@users.noreply.github.com> Date: Tue, 14 Jan 2020 14:55:06 -0500 Subject: [PATCH 24/37] sort filesToAdd in TakeSnapshot filesToAdd is sorted in TakeSnapshotFS, but not here. This makes ordering unpredictable within the layer's tarball, causing the SHA to differ even if layer contents haven't changed --- pkg/snapshot/snapshot.go | 2 + pkg/snapshot/snapshot_test.go | 69 +++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/pkg/snapshot/snapshot.go b/pkg/snapshot/snapshot.go index 80b5c5ce0..59be2f0e0 100644 --- a/pkg/snapshot/snapshot.go +++ b/pkg/snapshot/snapshot.go @@ -79,6 +79,8 @@ func (s *Snapshotter) TakeSnapshot(files []string) (string, error) { // Also add parent directories to keep the permission of them correctly. filesToAdd := filesWithParentDirs(files) + sort.Strings(filesToAdd) + // Add files to the layered map for _, file := range filesToAdd { if err := s.l.Add(file); err != nil { diff --git a/pkg/snapshot/snapshot_test.go b/pkg/snapshot/snapshot_test.go index 8e3f16598..3a1ec7353 100644 --- a/pkg/snapshot/snapshot_test.go +++ b/pkg/snapshot/snapshot_test.go @@ -302,6 +302,75 @@ func TestFileWithLinks(t *testing.T) { } } +func TestSnasphotPreservesFileOrder(t *testing.T) { + newFiles := map[string]string{ + "foo": "newbaz1", + "bar/bat": "baz", + "bar/qux": "quuz", + "qux": "quuz", + "corge": "grault", + "garply": "waldo", + "fred": "plugh", + "xyzzy": "thud", + } + + newFileNames := []string{} + + for fileName := range newFiles { + newFileNames = append(newFileNames, fileName) + } + + filesInTars := [][]string{} + + for i := 0; i<= 2; i++ { + testDir, snapshotter, cleanup, err := setUpTestDir() + testDirWithoutLeadingSlash := strings.TrimLeft(testDir, "/") + defer cleanup() + + if err != nil { + t.Fatal(err) + } + // Make some changes to the filesystem + if err := testutil.SetupFiles(testDir, newFiles); err != nil { + t.Fatalf("Error setting up fs: %s", err) + } + + filesToSnapshot := []string{} + for _, file := range newFileNames { + filesToSnapshot = append(filesToSnapshot, filepath.Join(testDir, file)) + } + + // Take a snapshot + tarPath, err := snapshotter.TakeSnapshot(filesToSnapshot) + + if err != nil { + t.Fatalf("Error taking snapshot of fs: %s", err) + } + + f, err := os.Open(tarPath) + if err != nil { + t.Fatal(err) + } + tr := tar.NewReader(f) + filesInTars = append(filesInTars, []string{}) + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + filesInTars[i] = append(filesInTars[i], strings.TrimPrefix(hdr.Name, testDirWithoutLeadingSlash)) + } + } + + // Check contents of all snapshots, make sure files appear in consistent order + for i := 1; i Date: Thu, 6 Feb 2020 12:40:19 -0500 Subject: [PATCH 25/37] omit uname/gname in tar headers When using cache, the rootfs may not have been extracted. This prevents uname/gname from resolving as there is no /etc/password or /etc/group. This makes this layer unnecessarily differ from a cached layer which does contain this information. Omitting these should be consistent with Docker's behavior. --- pkg/snapshot/snapshot_test.go | 30 ++++++++++++++++++++++++++++++ pkg/util/tar_util.go | 5 +++++ 2 files changed, 35 insertions(+) diff --git a/pkg/snapshot/snapshot_test.go b/pkg/snapshot/snapshot_test.go index 3a1ec7353..2fa6bb104 100644 --- a/pkg/snapshot/snapshot_test.go +++ b/pkg/snapshot/snapshot_test.go @@ -371,6 +371,36 @@ func TestSnasphotPreservesFileOrder(t *testing.T) { } } +func TestSnapshotOmitsUnameGname(t *testing.T) { + testDir, snapshotter, cleanup, err := setUpTestDir() + testDirWithoutLeadingSlash := strings.TrimLeft(testDir, "/") + defer cleanup() + if err != nil { + t.Fatal(err) + } + + tarPath, err := snapshotter.TakeSnapshotFS() + + f, err := os.Open(tarPath) + if err != nil { + t.Fatal(err) + } + tr := tar.NewReader(f) + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + if hdr.Uname != "" || hdr.Gname != "" { + t.Fatalf("Expected Uname/Gname for %s to be empty: Uname = '%s', Gname = '%s'", hdr.Name, hdr.Uname, hdr.Gname) + } + } + +} + func setupSymlink(dir string, link string, target string) error { return os.Symlink(target, filepath.Join(dir, link)) } diff --git a/pkg/util/tar_util.go b/pkg/util/tar_util.go index 213ef68e3..15c4e933d 100644 --- a/pkg/util/tar_util.go +++ b/pkg/util/tar_util.go @@ -84,6 +84,11 @@ func (t *Tar) AddFileToTar(p string) error { hdr.Name = p } + // rootfs may not have been extracted when using cache, preventing uname/gname from resolving + // this makes this layer unnecessarily differ from a cached layer which does contain this information + hdr.Uname = "" + hdr.Gname = "" + hardlink, linkDst := t.checkHardlink(p, i) if hardlink { hdr.Linkname = linkDst From 9dba097a7e168c5431c8629a0fc0c60c035694f9 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Tue, 4 Feb 2020 16:34:52 -0800 Subject: [PATCH 26/37] refactor and add more tests --- pkg/commands/copy.go | 37 +++++++++++++++++----------- pkg/commands/copy_test.go | 52 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 14 deletions(-) diff --git a/pkg/commands/copy.go b/pkg/commands/copy.go index aed03cb3a..441fdbbd0 100644 --- a/pkg/commands/copy.go +++ b/pkg/commands/copy.go @@ -32,6 +32,11 @@ import ( v1 "github.com/google/go-containerregistry/pkg/v1" ) +// for testing +var ( + getUIDAndGID = util.GetUIDAndGIDFromString +) + type CopyCommand struct { BaseCommand cmd *instructions.CopyCommand @@ -44,23 +49,12 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu if c.cmd.From != "" { c.buildcontext = filepath.Join(constants.KanikoDir, c.cmd.From) } - var uid, gid int64 - uid = util.DoNotChangeUID - gid = util.DoNotChangeGID replacementEnvs := buildArgs.ReplacementEnvs(config.Env) - if c.cmd.Chown != "" { - chown, err := util.ResolveEnvironmentReplacement(c.cmd.Chown, replacementEnvs, false) - if err != nil { - return err - } - uid32, gid32, err := util.GetUIDAndGIDFromString(chown, true) - uid = int64(uid32) - gid = int64(gid32) - if err != nil { - return err - } + uid, gid, err := getUserGroup(c.cmd.Chown, replacementEnvs) + if err != nil { + return err } srcs, dest, err := util.ResolveEnvAndWildcards(c.cmd.SourcesAndDest, c.buildcontext, replacementEnvs) @@ -126,6 +120,21 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu return nil } +func getUserGroup(chownStr string, env []string) (int64, int64, error) { + if chownStr == "" { + return util.DoNotChangeUID, util.DoNotChangeGID, nil + } + chown, err := util.ResolveEnvironmentReplacement(chownStr, env, false) + if err != nil { + return -1, -1, err + } + uid32, gid32, err := getUIDAndGID(chown, true) + if err != nil { + return -1, -1, err + } + return int64(uid32), int64(gid32), nil +} + // FilesToSnapshot should return an empty array if still nil; no files were changed func (c *CopyCommand) FilesToSnapshot() []string { return c.snapshotFiles diff --git a/pkg/commands/copy_test.go b/pkg/commands/copy_test.go index 4e0f3533f..9331b4e69 100755 --- a/pkg/commands/copy_test.go +++ b/pkg/commands/copy_test.go @@ -378,3 +378,55 @@ func Test_CachingCopyCommand_ExecuteCommand(t *testing.T) { }) } } + +func TestGetUserGroup(t *testing.T) { + tests := []struct { + description string + chown string + env []string + mock func(string, bool) (uint32, uint32, error) + expectedU int64 + expectedG int64 + shdErr bool + }{ + { + description: "non empty chown", + chown: "some:some", + env: []string{}, + mock: func(string, bool) (uint32, uint32, error) { return 100, 1000, nil }, + expectedU: 100, + expectedG: 1000, + }, + { + description: "non empty chown with env replacement", + chown: "some:$foo", + env: []string{"foo=key"}, + mock: func(c string, t bool) (uint32, uint32, error) { + if c == "some:key" { + return 10, 100, nil + } + return 0, 0, fmt.Errorf("did not resolve environment variable") + }, + expectedU: 10, + expectedG: 100, + }, + { + description: "empty chown string", + mock: func(c string, t bool) (uint32, uint32, error) { + return 0, 0, fmt.Errorf("should not be called.") + }, + expectedU: -1, + expectedG: -1, + }, + } + for _, tc := range tests { + t.Run(tc.description, func(t *testing.T) { + original := getUIDAndGID + defer func() { getUIDAndGID = original }() + getUIDAndGID = tc.mock + uid, gid, err := getUserGroup(tc.chown, tc.env) + testutil.CheckErrorAndDeepEqual(t, tc.shdErr, err, uid, tc.expectedU) + testutil.CheckErrorAndDeepEqual(t, tc.shdErr, err, gid, tc.expectedG) + }) + } +} From d94a1ed53bd81896cfc40ad40bf6b7ea28a8de70 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Tue, 4 Feb 2020 18:11:18 -0800 Subject: [PATCH 27/37] fix linter --- pkg/commands/copy_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/commands/copy_test.go b/pkg/commands/copy_test.go index 9331b4e69..702591a8f 100755 --- a/pkg/commands/copy_test.go +++ b/pkg/commands/copy_test.go @@ -413,7 +413,7 @@ func TestGetUserGroup(t *testing.T) { { description: "empty chown string", mock: func(c string, t bool) (uint32, uint32, error) { - return 0, 0, fmt.Errorf("should not be called.") + return 0, 0, fmt.Errorf("should not be called") }, expectedU: -1, expectedG: -1, From 9dd050b8929fbca26e5e2ace065e5b2ba154f9d0 Mon Sep 17 00:00:00 2001 From: Or Sela Date: Thu, 6 Feb 2020 13:15:16 +0200 Subject: [PATCH 28/37] set log format using a flag --- cmd/executor/cmd/root.go | 10 ++--- cmd/warmer/cmd/root.go | 10 ++--- pkg/constants/constants.go | 3 -- pkg/logging/logging.go | 76 ++++++++++++++++++++++++++++++++++++++ pkg/util/util.go | 15 -------- 5 files changed, 85 insertions(+), 29 deletions(-) create mode 100644 pkg/logging/logging.go diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index f39e011ed..c5cbd57fe 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -28,6 +28,7 @@ import ( "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/constants" "github.com/GoogleContainerTools/kaniko/pkg/executor" + "github.com/GoogleContainerTools/kaniko/pkg/logging" "github.com/GoogleContainerTools/kaniko/pkg/timing" "github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/genuinetools/amicontained/container" @@ -38,13 +39,12 @@ import ( ) var ( - opts = &config.KanikoOptions{} - logLevel string - force bool + opts = &config.KanikoOptions{} + force bool ) func init() { - RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", constants.DefaultLogLevel, "Log level (debug, info, warn, error, fatal, panic") + logging.AddFlags(RootCmd.PersistentFlags()) RootCmd.PersistentFlags().BoolVarP(&force, "force", "", false, "Force building outside of a container") addKanikoOptionsFlags() addHiddenFlags(RootCmd) @@ -56,7 +56,7 @@ var RootCmd = &cobra.Command{ PersistentPreRunE: func(cmd *cobra.Command, args []string) error { if cmd.Use == "executor" { resolveEnvironmentBuildArgs(opts.BuildArgs, os.Getenv) - if err := util.ConfigureLogging(logLevel); err != nil { + if err := logging.Configure(); err != nil { return err } if !opts.NoPush && len(opts.Destinations) == 0 { diff --git a/cmd/warmer/cmd/root.go b/cmd/warmer/cmd/root.go index b0e3c9076..8ced40ac0 100644 --- a/cmd/warmer/cmd/root.go +++ b/cmd/warmer/cmd/root.go @@ -23,19 +23,17 @@ import ( "github.com/GoogleContainerTools/kaniko/pkg/cache" "github.com/GoogleContainerTools/kaniko/pkg/config" - "github.com/GoogleContainerTools/kaniko/pkg/constants" - "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/GoogleContainerTools/kaniko/pkg/logging" "github.com/pkg/errors" "github.com/spf13/cobra" ) var ( - opts = &config.WarmerOptions{} - logLevel string + opts = &config.WarmerOptions{} ) func init() { - RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", constants.DefaultLogLevel, "Log level (debug, info, warn, error, fatal, panic") + logging.AddFlags(RootCmd.PersistentFlags()) addKanikoOptionsFlags() addHiddenFlags() } @@ -43,7 +41,7 @@ func init() { var RootCmd = &cobra.Command{ Use: "cache warmer", PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := util.ConfigureLogging(logLevel); err != nil { + if err := logging.Configure(); err != nil { return err } if len(opts.Images) == 0 { diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 9de493eb2..8dd5893ea 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -17,9 +17,6 @@ limitations under the License. package constants const ( - // DefaultLogLevel is the default log level - DefaultLogLevel = "info" - // RootDir is the path to the root directory RootDir = "/" diff --git a/pkg/logging/logging.go b/pkg/logging/logging.go new file mode 100644 index 000000000..5ed79df98 --- /dev/null +++ b/pkg/logging/logging.go @@ -0,0 +1,76 @@ +/* +Copyright 2020 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logging + +import ( + "fmt" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" +) + +const ( + // Default log level + DefaultLevel = "info" + + // Text format + FormatText = "text" + // Colored text format + FormatColor = "color" + // JSON format + FormatJSON = "json" +) + +var ( + logLevel string + logFormat string +) + +// AddFlags injects logging-related flags into the given FlagSet +func AddFlags(fs *pflag.FlagSet) { + fs.StringVarP(&logLevel, "verbosity", "v", DefaultLevel, "Log level (debug, info, warn, error, fatal, panic") + fs.StringVar(&logFormat, "log-format", FormatColor, "Log format (text, color, json)") +} + +// Configure sets the logrus logging level and formatter +func Configure() error { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + return errors.Wrap(err, "parsing log level") + } + logrus.SetLevel(lvl) + + var formatter logrus.Formatter + switch logFormat { + case FormatText: + formatter = &logrus.TextFormatter{ + DisableColors: true, + } + case FormatColor: + formatter = &logrus.TextFormatter{ + ForceColors: true, + } + case FormatJSON: + formatter = &logrus.JSONFormatter{} + default: + return fmt.Errorf("not a valid log format: %q. Please specify one of (text, color, json)", logFormat) + } + logrus.SetFormatter(formatter) + + return nil +} diff --git a/pkg/util/util.go b/pkg/util/util.go index 3d3755ae8..83f709801 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -28,25 +28,10 @@ import ( "syscall" "github.com/minio/highwayhash" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" v1 "github.com/google/go-containerregistry/pkg/v1" ) -// ConfigureLogging sets the logrus logging level and forces logs to be colorful (!) -func ConfigureLogging(logLevel string) error { - lvl, err := logrus.ParseLevel(logLevel) - if err != nil { - return errors.Wrap(err, "parsing log level") - } - logrus.SetLevel(lvl) - logrus.SetFormatter(&logrus.TextFormatter{ - ForceColors: true, - }) - return nil -} - // Hasher returns a hash function, used in snapshotting to determine if a file has changed func Hasher() func(string) (string, error) { pool := sync.Pool{ From 49fc5c032ded929c9cb559febd0e93838aa1cf3b Mon Sep 17 00:00:00 2001 From: cvgw Date: Thu, 6 Feb 2020 14:48:33 -0800 Subject: [PATCH 29/37] update instructions for running integration tests --- DEVELOPMENT.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 69a629f54..18b69b3e3 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -95,6 +95,12 @@ export GCS_BUCKET="gs://" export IMAGE_REPO="gcr.io/somerepo" ``` +Login for both user and application credentials +```shell +gcloud auth login +gcloud auth application-default login +``` + Then you can launch integration tests as follows: ```shell From 674f5bda5dd4b1f61b1a5ee5e9fa25b2a19b10f5 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Thu, 6 Feb 2020 15:17:09 -0800 Subject: [PATCH 30/37] fix tests --- pkg/snapshot/snapshot_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/snapshot/snapshot_test.go b/pkg/snapshot/snapshot_test.go index 2fa6bb104..d99e1e19a 100644 --- a/pkg/snapshot/snapshot_test.go +++ b/pkg/snapshot/snapshot_test.go @@ -323,7 +323,7 @@ func TestSnasphotPreservesFileOrder(t *testing.T) { filesInTars := [][]string{} for i := 0; i<= 2; i++ { - testDir, snapshotter, cleanup, err := setUpTestDir() + testDir, snapshotter, cleanup, err := setUpTest() testDirWithoutLeadingSlash := strings.TrimLeft(testDir, "/") defer cleanup() @@ -372,8 +372,8 @@ func TestSnasphotPreservesFileOrder(t *testing.T) { } func TestSnapshotOmitsUnameGname(t *testing.T) { - testDir, snapshotter, cleanup, err := setUpTestDir() - testDirWithoutLeadingSlash := strings.TrimLeft(testDir, "/") + _, snapshotter, cleanup, err := setUpTest() + defer cleanup() if err != nil { t.Fatal(err) From 8b69a13641e2ed1ba7cdf9f71688d51c88ff4e5f Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Thu, 6 Feb 2020 15:37:47 -0800 Subject: [PATCH 31/37] fix commut --- pkg/snapshot/snapshot_test.go | 4 ++-- pkg/util/tar_util.go | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/snapshot/snapshot_test.go b/pkg/snapshot/snapshot_test.go index d99e1e19a..4cf698c52 100644 --- a/pkg/snapshot/snapshot_test.go +++ b/pkg/snapshot/snapshot_test.go @@ -322,7 +322,7 @@ func TestSnasphotPreservesFileOrder(t *testing.T) { filesInTars := [][]string{} - for i := 0; i<= 2; i++ { + for i := 0; i <= 2; i++ { testDir, snapshotter, cleanup, err := setUpTest() testDirWithoutLeadingSlash := strings.TrimLeft(testDir, "/") defer cleanup() @@ -366,7 +366,7 @@ func TestSnasphotPreservesFileOrder(t *testing.T) { } // Check contents of all snapshots, make sure files appear in consistent order - for i := 1; i Date: Fri, 7 Feb 2020 13:47:17 -0800 Subject: [PATCH 32/37] fix linter --- pkg/snapshot/snapshot_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/snapshot/snapshot_test.go b/pkg/snapshot/snapshot_test.go index 4cf698c52..67b70b9c5 100644 --- a/pkg/snapshot/snapshot_test.go +++ b/pkg/snapshot/snapshot_test.go @@ -380,6 +380,9 @@ func TestSnapshotOmitsUnameGname(t *testing.T) { } tarPath, err := snapshotter.TakeSnapshotFS() + if err != nil { + t.Fatal(err) + } f, err := os.Open(tarPath) if err != nil { From 7e2009b5e2338c6021d8ec16f0b6473257c4bdb4 Mon Sep 17 00:00:00 2001 From: cvgw Date: Fri, 7 Feb 2020 15:11:47 -0800 Subject: [PATCH 33/37] More idiomatic logging config Update logging configuration to be more idiomatic for a Cobra application --- cmd/executor/cmd/root.go | 15 +++++++++++---- cmd/warmer/cmd/root.go | 11 ++++++++--- pkg/logging/logging.go | 20 ++++---------------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index c5cbd57fe..d145c9256 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -39,13 +39,18 @@ import ( ) var ( - opts = &config.KanikoOptions{} - force bool + opts = &config.KanikoOptions{} + force bool + logLevel string + logFormat string ) func init() { - logging.AddFlags(RootCmd.PersistentFlags()) + RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", logging.DefaultLevel, "Log level (debug, info, warn, error, fatal, panic") + RootCmd.PersistentFlags().StringVar(&logFormat, "log-format", logging.FormatColor, "Log format (text, color, json)") + RootCmd.PersistentFlags().BoolVarP(&force, "force", "", false, "Force building outside of a container") + addKanikoOptionsFlags() addHiddenFlags(RootCmd) } @@ -56,9 +61,11 @@ var RootCmd = &cobra.Command{ PersistentPreRunE: func(cmd *cobra.Command, args []string) error { if cmd.Use == "executor" { resolveEnvironmentBuildArgs(opts.BuildArgs, os.Getenv) - if err := logging.Configure(); err != nil { + + if err := logging.Configure(logLevel, logFormat); err != nil { return err } + if !opts.NoPush && len(opts.Destinations) == 0 { return errors.New("You must provide --destination, or use --no-push") } diff --git a/cmd/warmer/cmd/root.go b/cmd/warmer/cmd/root.go index 8ced40ac0..9bbd08e21 100644 --- a/cmd/warmer/cmd/root.go +++ b/cmd/warmer/cmd/root.go @@ -29,11 +29,15 @@ import ( ) var ( - opts = &config.WarmerOptions{} + opts = &config.WarmerOptions{} + logLevel string + logFormat string ) func init() { - logging.AddFlags(RootCmd.PersistentFlags()) + RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", logging.DefaultLevel, "Log level (debug, info, warn, error, fatal, panic") + RootCmd.PersistentFlags().StringVar(&logFormat, "log-format", logging.FormatColor, "Log format (text, color, json)") + addKanikoOptionsFlags() addHiddenFlags() } @@ -41,9 +45,10 @@ func init() { var RootCmd = &cobra.Command{ Use: "cache warmer", PersistentPreRunE: func(cmd *cobra.Command, args []string) error { - if err := logging.Configure(); err != nil { + if err := logging.Configure(logLevel, logFormat); err != nil { return err } + if len(opts.Images) == 0 { return errors.New("You must select at least one image to cache") } diff --git a/pkg/logging/logging.go b/pkg/logging/logging.go index 5ed79df98..9a475cb29 100644 --- a/pkg/logging/logging.go +++ b/pkg/logging/logging.go @@ -21,7 +21,6 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/spf13/pflag" ) const ( @@ -36,27 +35,16 @@ const ( FormatJSON = "json" ) -var ( - logLevel string - logFormat string -) - -// AddFlags injects logging-related flags into the given FlagSet -func AddFlags(fs *pflag.FlagSet) { - fs.StringVarP(&logLevel, "verbosity", "v", DefaultLevel, "Log level (debug, info, warn, error, fatal, panic") - fs.StringVar(&logFormat, "log-format", FormatColor, "Log format (text, color, json)") -} - // Configure sets the logrus logging level and formatter -func Configure() error { - lvl, err := logrus.ParseLevel(logLevel) +func Configure(level, format string) error { + lvl, err := logrus.ParseLevel(level) if err != nil { return errors.Wrap(err, "parsing log level") } logrus.SetLevel(lvl) var formatter logrus.Formatter - switch logFormat { + switch format { case FormatText: formatter = &logrus.TextFormatter{ DisableColors: true, @@ -68,7 +56,7 @@ func Configure() error { case FormatJSON: formatter = &logrus.JSONFormatter{} default: - return fmt.Errorf("not a valid log format: %q. Please specify one of (text, color, json)", logFormat) + return fmt.Errorf("not a valid log format: %q. Please specify one of (text, color, json)", format) } logrus.SetFormatter(formatter) From 8e2b9ba1054a1666da8ff8db186b4e4f69860c59 Mon Sep 17 00:00:00 2001 From: James Ravn Date: Mon, 10 Feb 2020 11:19:04 +0000 Subject: [PATCH 34/37] Fix example pod.yml to not mount to root As mounting to root makes `/root` a read only filesystem, which breaks a lot of builds that try to add files to `/root` (like `/root/.npm`). --- examples/pod.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/pod.yaml b/examples/pod.yaml index 6b6692721..27f40a4bd 100644 --- a/examples/pod.yaml +++ b/examples/pod.yaml @@ -11,7 +11,7 @@ spec: "--destination=/"] # replace with your dockerhub account volumeMounts: - name: kaniko-secret - mountPath: /root + mountPath: /kaniko/.docker - name: dockerfile-storage mountPath: /workspace restartPolicy: Never @@ -21,7 +21,7 @@ spec: secretName: regcred items: - key: .dockerconfigjson - path: .docker/config.json + path: config.json - name: dockerfile-storage persistentVolumeClaim: claimName: dockerfile-claim From dfd3030c6939631c95f3694e60b4fbdfda8e5b2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Str=C3=B6mberg?= Date: Mon, 10 Feb 2020 16:50:34 -0800 Subject: [PATCH 35/37] Document that Kaniko is not officially supported This is in-line with what I will be doing with our other GoogleContainerTools projects. Thoughts? --- README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 6474abdec..469967e44 100644 --- a/README.md +++ b/README.md @@ -4,19 +4,18 @@ ![kaniko logo](logo/Kaniko-Logo.png) -kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster. +kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster. kaniko doesn't depend on a Docker daemon and executes each command within a Dockerfile completely in userspace. This enables building container images in environments that can't easily or securely run a Docker daemon, such as a standard Kubernetes cluster. -kaniko is meant to be run as an image, `gcr.io/kaniko-project/executor`. -We do **not** recommend running the kaniko executor binary in another image, as it might not work. - +kaniko is meant to be run as an image: `gcr.io/kaniko-project/executor`. We do **not** recommend running the kaniko executor binary in another image, as it might not work. We'd love to hear from you! Join us on [#kaniko Kubernetes Slack](https://kubernetes.slack.com/messages/CQDCHGX7Y/) :mega: **Please fill out our [quick 5-question survey](https://forms.gle/HhZGEM33x4FUz9Qa6)** so that we can learn how satisfied you are with Kaniko, and what improvements we should make. Thank you! :dancers: +Kaniko is not an officially supported Google project. _If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPMENT.md) and [CONTRIBUTING.md](CONTRIBUTING.md)._ From 08e968a57f6c1b618e2d1d64f622953f446a95c5 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Wed, 12 Feb 2020 13:38:25 -0800 Subject: [PATCH 36/37] add design proposal template --- .../design-proposal-template.md | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 docs/design_proposals/design-proposal-template.md diff --git a/docs/design_proposals/design-proposal-template.md b/docs/design_proposals/design-proposal-template.md new file mode 100644 index 000000000..124881249 --- /dev/null +++ b/docs/design_proposals/design-proposal-template.md @@ -0,0 +1,72 @@ +# Title + +* Author(s): \ +* Reviewers: \ + + If you are already working with someone mention their name. + If not, please leave this empty, some one from the core team with assign it to themselves. +* Date: \ +* Status: [Reviewed/Cancelled/Under implementation/Complete] + +Here is a brief explanation of the Statuses + +1. Reviewed: The proposal PR has been accepted, merged and ready for + implementation. +2. Under implementation: An accepted proposal is being implemented by actual work. + Note: The design might change in this phase based on issues during + implementation. +3. Cancelled: During or before implementation the proposal was cancelled. + It could be due to: + * other features added which made the current design proposal obsolete. + * No longer a priority. +4. Complete: This feature/change is implemented. + +## Background + +In this section, please mention and describe the new feature, redesign +or refactor. + +Please provide a brief explanation for the following questions: + +1. Why is this required? +2. If this is a redesign, what are the drawbacks of the current implementation? +3. Is there any another workaround, and if so, what are its drawbacks? +4. Mention related issues, if there are any. + +Here is an example snippet for an enhancement: + +___ +Currently, Kaniko includes `build-args` when calculating layer cache key even if they are not used +in the corresponding dockerfile command. + +This causes a 100% cache miss rate even if the layer contents are same. +Change layer caching to include `build-args` in cache key computation only if they are used in command. +___ + +## Design + +Please describe your solution. Please list any: + +* new command line flags +* interface changes +* design assumptions + +### Open Issues/Questions + +Please list any open questions here in the following format: + +**\** + +Resolution: Please list the resolution if resolved during the design process or +specify __Not Yet Resolved__ + +## Implementation plan +As a team, we've noticed that larger PRs can go unreviewed for long periods of +time. Small incremental changes get reviewed faster and are also easier for +reviewers. +___ + + +## Integration test plan + +Please describe what new test cases you are going to consider. \ No newline at end of file From 6a85c8e89eee24e70350a1dfaef26099f8019d1c Mon Sep 17 00:00:00 2001 From: cvgw Date: Fri, 14 Feb 2020 09:15:00 -0800 Subject: [PATCH 37/37] separate travis into multiple jobs Parallelize travis by splitting the integration and unit tests into their own jobs. Make images remains as part of the integration tests. --- .travis.yml | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/.travis.yml b/.travis.yml index 11dcbe8fd..9c29dcebd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,14 +6,19 @@ env: go: - "1.13.3" go_import_path: github.com/GoogleContainerTools/kaniko -before_install: - - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - - - sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - sudo apt-get update - - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce - - curl -LO https://storage.googleapis.com/container-diff/latest/container-diff-linux-amd64 && chmod +x container-diff-linux-amd64 && sudo mv container-diff-linux-amd64 /usr/local/bin/container-diff - - docker run -d -p 5000:5000 --restart always --name registry registry:2 -script: - - make test - - make integration-test - - make images +jobs: + include: + - name: unit-test + script: + - make test + - name: integration-test + before_install: + - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - + - sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" + - sudo apt-get update + - sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce + - curl -LO https://storage.googleapis.com/container-diff/latest/container-diff-linux-amd64 && chmod +x container-diff-linux-amd64 && sudo mv container-diff-linux-amd64 /usr/local/bin/container-diff + - docker run -d -p 5000:5000 --restart always --name registry registry:2 + script: + - make integration-test + - make images