From 254ba7be68335630ed082173194b08c38768825e Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Thu, 7 May 2020 19:31:25 -0700 Subject: [PATCH 01/26] add 64 busybox --- deploy/Dockerfile_debug | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deploy/Dockerfile_debug b/deploy/Dockerfile_debug index 725b0467a..b842c9597 100644 --- a/deploy/Dockerfile_debug +++ b/deploy/Dockerfile_debug @@ -46,6 +46,9 @@ COPY --from=0 /usr/local/bin/docker-credential-gcr /kaniko/docker-credential-gcr COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /kaniko/docker-credential-ecr-login COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr COPY --from=1 /distroless/bazel-bin/experimental/busybox/busybox/ /busybox/ +# Workaround See #https://github.com/GoogleContainerTools/kaniko/issues/656#issuecomment-564035645 +COPY --from=amd64/busybox:1.31.0 /bin/busybox /busybox/busybox + # Declare /busybox as a volume to get it automatically whitelisted VOLUME /busybox COPY files/ca-certificates.crt /kaniko/ssl/certs/ From 3dcac1b906e8001945456e3c4aace22790c8b4f4 Mon Sep 17 00:00:00 2001 From: Lukasz Jakimczuk Date: Thu, 12 Mar 2020 22:23:54 +0100 Subject: [PATCH 02/26] Resolving nested meta ARGs against themselves and build ARGs --- pkg/dockerfile/dockerfile.go | 24 ++++++++++++++++++++++++ pkg/dockerfile/dockerfile_test.go | 7 +++++-- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/pkg/dockerfile/dockerfile.go b/pkg/dockerfile/dockerfile.go index 5ca8f1eea..777178f0c 100644 --- a/pkg/dockerfile/dockerfile.go +++ b/pkg/dockerfile/dockerfile.go @@ -58,6 +58,11 @@ func ParseStages(opts *config.KanikoOptions) ([]instructions.Stage, []instructio return nil, nil, errors.Wrap(err, "parsing dockerfile") } + metaArgs, err = expandNested(metaArgs, opts.BuildArgs) + if err != nil { + return nil, nil, errors.Wrap(err, "expanding meta ARGs") + } + return stages, metaArgs, nil } @@ -97,6 +102,25 @@ func Parse(b []byte) ([]instructions.Stage, []instructions.ArgCommand, error) { return stages, metaArgs, nil } +// expandNestedArgs tries to resolve nested ARG value against the previously defined ARGs +func expandNested(metaArgs []instructions.ArgCommand, buildArgs []string) ([]instructions.ArgCommand, error) { + prevArgs := make([]string, 0) + for i := range metaArgs { + arg := metaArgs[i] + v := arg.Value + if v != nil { + val, err := util.ResolveEnvironmentReplacement(*v, append(prevArgs, buildArgs...), false) + if err != nil { + return nil, err + } + prevArgs = append(prevArgs, arg.Key+"="+val) + arg.Value = &val + metaArgs[i] = arg + } + } + return metaArgs, nil +} + // stripEnclosingQuotes removes quotes enclosing the value of each instructions.ArgCommand in a slice // if the quotes are escaped it leaves them func stripEnclosingQuotes(metaArgs []instructions.ArgCommand) ([]instructions.ArgCommand, error) { diff --git a/pkg/dockerfile/dockerfile_test.go b/pkg/dockerfile/dockerfile_test.go index 63c66f4b2..39204139b 100644 --- a/pkg/dockerfile/dockerfile_test.go +++ b/pkg/dockerfile/dockerfile_test.go @@ -33,6 +33,9 @@ func Test_ParseStages_ArgValueWithQuotes(t *testing.T) { dockerfile := ` ARG IMAGE="ubuntu:16.04" ARG FOO=bar + ARG HELLO="Hello" + ARG WORLD="World" + ARG NESTED="$HELLO $WORLD" FROM ${IMAGE} RUN echo hi > /hi @@ -65,11 +68,11 @@ func Test_ParseStages_ArgValueWithQuotes(t *testing.T) { t.Fatal("length of stages expected to be greater than zero, but was zero") } - if len(metaArgs) != 2 { + if len(metaArgs) != 5 { t.Fatalf("length of stage meta args expected to be 2, but was %d", len(metaArgs)) } - for i, expectedVal := range []string{"ubuntu:16.04", "bar"} { + for i, expectedVal := range []string{"ubuntu:16.04", "bar", "Hello", "World", "Hello World"} { if metaArgs[i].ValueString() != expectedVal { t.Fatalf("expected metaArg %d val to be %s but was %s", i, expectedVal, metaArgs[i].ValueString()) } From ba0cb81823879b8c824716cd45331ca8e46c294c Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Wed, 20 May 2020 01:00:55 -0700 Subject: [PATCH 03/26] Update Dockerfile_debug --- deploy/Dockerfile_debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/Dockerfile_debug b/deploy/Dockerfile_debug index b842c9597..77821495b 100644 --- a/deploy/Dockerfile_debug +++ b/deploy/Dockerfile_debug @@ -47,7 +47,7 @@ COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux- COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr COPY --from=1 /distroless/bazel-bin/experimental/busybox/busybox/ /busybox/ # Workaround See #https://github.com/GoogleContainerTools/kaniko/issues/656#issuecomment-564035645 -COPY --from=amd64/busybox:1.31.0 /bin/busybox /busybox/busybox +COPY --from=amd64/busybox:1.31.1 /bin/busybox /busybox/busybox # Declare /busybox as a volume to get it automatically whitelisted VOLUME /busybox From 503aad17b7a9eee9ebc6c4a6d3f7d82f06ad0255 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Sat, 23 May 2020 14:18:02 -0700 Subject: [PATCH 04/26] Fix looping through files twice --- pkg/snapshot/snapshot.go | 64 +++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 34 deletions(-) diff --git a/pkg/snapshot/snapshot.go b/pkg/snapshot/snapshot.go index 449ae918a..05e01c6e3 100644 --- a/pkg/snapshot/snapshot.go +++ b/pkg/snapshot/snapshot.go @@ -157,56 +157,52 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) { }, ) timing.DefaultRun.Stop(timer) - - resolvedFiles, err := filesystem.ResolvePaths(foundPaths, s.whitelist) - if err != nil { - return nil, nil, err - } - - resolvedMemFs := make(map[string]bool) - for _, f := range resolvedFiles { - resolvedMemFs[f] = true - } - // First handle whiteouts // Get a list of all the files that existed before this layer existingPaths := s.l.getFlattenedPathsForWhiteOut() - // Find the delta by removing everything left in this layer. - for p := range resolvedMemFs { - delete(existingPaths, p) + filesToAdd := []string{} + resolvedMemFs := make(map[string]bool) + + for _, path := range foundPaths { + delete(existingPaths, path) + resolvedFiles, err := filesystem.ResolvePaths([]string{path}, s.whitelist) + if err != nil { + return nil, nil, err + } + for _, path := range resolvedFiles { + // Continue if this path is already processed + if _, ok := resolvedMemFs[path]; ok { + continue + } + if util.CheckWhitelist(path) { + logrus.Tracef("Not adding %s to layer, as it's whitelisted", path) + continue + } + // Only add changed files. + fileChanged, err := s.l.CheckFileChange(path) + if err != nil { + return nil, nil, fmt.Errorf("could not check if file has changed %s %s", path, err) + } + if fileChanged { + logrus.Tracef("Adding file %s to layer, because it was changed.", path) + filesToAdd = append(filesToAdd, path) + } + } } - // The paths left here are the ones that have been deleted in this layer. + // The paths left here are the ones that have been deleted in this layer. filesToWhiteOut := []string{} for path := range existingPaths { // Only add the whiteout if the directory for the file still exists. dir := filepath.Dir(path) - if _, ok := resolvedMemFs[dir]; ok { + if _, ok := existingPaths[dir]; !ok { if s.l.MaybeAddWhiteout(path) { logrus.Debugf("Adding whiteout for %s", path) filesToWhiteOut = append(filesToWhiteOut, path) } } } - - filesToAdd := []string{} - for path := range resolvedMemFs { - if util.CheckWhitelist(path) { - logrus.Tracef("Not adding %s to layer, as it's whitelisted", path) - continue - } - // Only add changed files. - fileChanged, err := s.l.CheckFileChange(path) - if err != nil { - return nil, nil, fmt.Errorf("could not check if file has changed %s %s", path, err) - } - if fileChanged { - logrus.Tracef("Adding file %s to layer, because it was changed.", path) - filesToAdd = append(filesToAdd, path) - } - } - sort.Strings(filesToAdd) // Add files to the layered map for _, file := range filesToAdd { From 3462b7585b2c2854c03447e1272bb9b673297271 Mon Sep 17 00:00:00 2001 From: Ben Einaudi Date: Sun, 24 May 2020 13:04:27 +0200 Subject: [PATCH 05/26] Added integration test for multi level argument Added integration test to check that following pattern is supported ARG TAG=1.2 ARG IMAGE=my-image:${TAG} FROM ${IMAGE} ... --- integration/dockerfiles/Dockerfile_test_arg_two_level | 3 +++ integration/integration_test.go | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 integration/dockerfiles/Dockerfile_test_arg_two_level diff --git a/integration/dockerfiles/Dockerfile_test_arg_two_level b/integration/dockerfiles/Dockerfile_test_arg_two_level new file mode 100644 index 000000000..8dc9d6ad6 --- /dev/null +++ b/integration/dockerfiles/Dockerfile_test_arg_two_level @@ -0,0 +1,3 @@ +ARG A=3.9 +ARG B=alpine:${A} +FROM ${B} diff --git a/integration/integration_test.go b/integration/integration_test.go index 7156591d1..3d60caa06 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -306,7 +306,7 @@ func TestGitBuildcontextSubPath(t *testing.T) { func TestBuildViaRegistryMirror(t *testing.T) { repo := getGitRepo() - dockerfile := "integration/dockerfiles/Dockerfile_registry_mirror" + dockerfile := fmt.Sprintf("%s/%s/Dockerfile_registry_mirror", integrationPath, dockerfilesPath) // Build with docker dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_registry_mirror") @@ -345,7 +345,7 @@ func TestBuildViaRegistryMirror(t *testing.T) { func TestBuildWithLabels(t *testing.T) { repo := getGitRepo() - dockerfile := "integration/dockerfiles/Dockerfile_test_label" + dockerfile := fmt.Sprintf("%s/%s/Dockerfile_test_label", integrationPath, dockerfilesPath) testLabel := "mylabel=myvalue" From 5d013626fc2ef97363d2e2699913490a9fd9219d Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Mon, 25 May 2020 21:21:33 -0700 Subject: [PATCH 06/26] benchmark project added --- .gitignore | 1 + integration/benchmark/Dockerfile_FS_benchmark | 9 ++ integration/benchmark/context.txt | 1 + integration/benchmark/make.sh | 10 ++ integration/benchmark_test.go | 51 ++++++++ integration/images.go | 114 ++++++++++-------- scripts/integration-test.sh | 13 -- 7 files changed, 134 insertions(+), 65 deletions(-) create mode 100644 integration/benchmark/Dockerfile_FS_benchmark create mode 100644 integration/benchmark/context.txt create mode 100755 integration/benchmark/make.sh create mode 100644 integration/benchmark_test.go diff --git a/.gitignore b/.gitignore index c58a5b6d0..d80906dc9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ out/ .idea +*.iml diff --git a/integration/benchmark/Dockerfile_FS_benchmark b/integration/benchmark/Dockerfile_FS_benchmark new file mode 100644 index 000000000..b589757dd --- /dev/null +++ b/integration/benchmark/Dockerfile_FS_benchmark @@ -0,0 +1,9 @@ +FROM bash:4.4 + +ARG NUM +COPY context.txt . +COPY make.sh . +RUN ls -al make.sh +SHELL ["/usr/local/bin/bash", "-c"] +RUN ./make.sh $NUM +RUN ls -al /workdir | wc diff --git a/integration/benchmark/context.txt b/integration/benchmark/context.txt new file mode 100644 index 000000000..3b18e512d --- /dev/null +++ b/integration/benchmark/context.txt @@ -0,0 +1 @@ +hello world diff --git a/integration/benchmark/make.sh b/integration/benchmark/make.sh new file mode 100755 index 000000000..525f84e94 --- /dev/null +++ b/integration/benchmark/make.sh @@ -0,0 +1,10 @@ +#!/usr/local/bin/bash + +mkdir /workdir + +i=1 +while [ $i -le $1 ] +do + cat context.txt > /workdir/somefile$i + i=$(( $i + 1 )) +done diff --git a/integration/benchmark_test.go b/integration/benchmark_test.go new file mode 100644 index 000000000..0092782a1 --- /dev/null +++ b/integration/benchmark_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "fmt" + "os" + "path/filepath" + "testing" +) + +func TestSnapshotBenchmark(t *testing.T) { + cwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + contextDir := filepath.Join(cwd, "benchmark") + + nums := []int{10000, 50000, 100000, 200000, 300000, 500000, 700000, 800000} + + for _, num := range nums { + + t.Run("test_benchmark"+string(num), func(t *testing.T) { + t.Parallel() + dockerfile := "Dockerfile_FS_benchmark" + kanikoImage := GetKanikoImage(config.imageRepo, dockerfile) + buildArgs := []string{"--build-arg", fmt.Sprintf("NUM=%d", num)} + if _, err := buildKanikoImage("", dockerfile, + buildArgs, []string{}, kanikoImage, contextDir, config.gcsBucket, config.serviceAccount); err != nil { + t.Errorf("could not run benchmark results for num %d", num) + } + }) + } + if err := logBenchmarks("benchmark"); err != nil { + t.Logf("Failed to create benchmark file: %v", err) + } +} diff --git a/integration/images.go b/integration/images.go index bb254f110..82e8244b6 100644 --- a/integration/images.go +++ b/integration/images.go @@ -259,70 +259,25 @@ func (d *DockerFileBuilder) BuildImageWithContext(config *integrationTestConfig, } } - reproducibleFlag := "" + additionalKanikoFlags := additionalDockerFlagsMap[dockerfile] + additionalKanikoFlags = append(additionalKanikoFlags, contextFlag, contextPath) for _, d := range reproducibleTests { if d == dockerfile { - reproducibleFlag = "--reproducible" + additionalKanikoFlags = append(additionalKanikoFlags, "--reproducible") break } } - benchmarkEnv := "BENCHMARK_FILE=false" - benchmarkDir, err := ioutil.TempDir("", "") + kanikoImage := GetKanikoImage(imageRepo, dockerfile) + out, err := buildKanikoImage(dockerfilesPath, dockerfile, buildArgs, additionalKanikoFlags, kanikoImage, + contextDir, gcsBucket, serviceAccount) if err != nil { return err } - if b, err := strconv.ParseBool(os.Getenv("BENCHMARK")); err == nil && b { - benchmarkEnv = "BENCHMARK_FILE=/kaniko/benchmarks/" + dockerfile - benchmarkFile := path.Join(benchmarkDir, dockerfile) - fileName := fmt.Sprintf("run_%s_%s", time.Now().Format("2006-01-02-15:04"), dockerfile) - dst := path.Join("benchmarks", fileName) - defer UploadFileToBucket(gcsBucket, benchmarkFile, dst) - } - - // build kaniko image - additionalFlags := append(buildArgs, additionalKanikoFlagsMap[dockerfile]...) - kanikoImage := GetKanikoImage(imageRepo, dockerfile) - fmt.Printf("Going to build image with kaniko: %s, flags: %s \n", kanikoImage, additionalFlags) - - dockerRunFlags := []string{"run", "--net=host", - "-e", benchmarkEnv, - "-v", contextDir + ":/workspace", - "-v", benchmarkDir + ":/kaniko/benchmarks", - } - - if env, ok := envsMap[dockerfile]; ok { - for _, envVariable := range env { - dockerRunFlags = append(dockerRunFlags, "-e", envVariable) - } - } - - dockerRunFlags = addServiceAccountFlags(dockerRunFlags, serviceAccount) - - kanikoDockerfilePath := path.Join(buildContextPath, dockerfilesPath, dockerfile) - if dockerfilesPath == "" { - kanikoDockerfilePath = path.Join(buildContextPath, "Dockerfile") - } - - dockerRunFlags = append(dockerRunFlags, ExecutorImage, - "-f", kanikoDockerfilePath, - "-d", kanikoImage, reproducibleFlag, - contextFlag, contextPath) - dockerRunFlags = append(dockerRunFlags, additionalFlags...) - - kanikoCmd := exec.Command("docker", dockerRunFlags...) - - timer = timing.Start(dockerfile + "_kaniko") - out, err := RunCommandWithoutTest(kanikoCmd) - timing.DefaultRun.Stop(timer) - - if err != nil { - return fmt.Errorf("Failed to build image %s with kaniko command \"%s\": %s %s", kanikoImage, kanikoCmd.Args, err, string(out)) - } if outputCheck := outputChecks[dockerfile]; outputCheck != nil { if err := outputCheck(dockerfile, out); err != nil { - return fmt.Errorf("Output check failed for image %s with kaniko command \"%s\": %s %s", kanikoImage, kanikoCmd.Args, err, string(out)) + return fmt.Errorf("Output check failed for image %s with kaniko command : %s %s", kanikoImage, err, string(out)) } } @@ -435,3 +390,58 @@ func (d *DockerFileBuilder) buildRelativePathsImage(imageRepo, dockerfile, servi return nil } + +func buildKanikoImage(dockerfilesPath string, dockerfile string, buildArgs []string, kanikoArgs []string, kanikoImage string, + contextDir string, gcsBucket string, serviceAccount string) ([]byte, error) { + benchmarkEnv := "BENCHMARK_FILE=false" + benchmarkDir, err := ioutil.TempDir("", "") + if err != nil { + return nil, err + } + if b, err := strconv.ParseBool(os.Getenv("BENCHMARK")); err == nil && b { + benchmarkEnv = "BENCHMARK_FILE=/kaniko/benchmarks/" + dockerfile + benchmarkFile := path.Join(benchmarkDir, dockerfile) + fileName := fmt.Sprintf("run_%s_%s", time.Now().Format("2006-01-02-15:04"), dockerfile) + dst := path.Join("benchmarks", fileName) + defer UploadFileToBucket(gcsBucket, benchmarkFile, dst) + } + + // build kaniko image + additionalFlags := append(buildArgs, kanikoArgs...) + fmt.Printf("Going to build image with kaniko: %s, flags: %s \n", kanikoImage, additionalFlags) + + dockerRunFlags := []string{"run", "--net=host", + "-e", benchmarkEnv, + "-v", contextDir + ":/workspace", + "-v", benchmarkDir + ":/kaniko/benchmarks", + } + + if env, ok := envsMap[dockerfile]; ok { + for _, envVariable := range env { + dockerRunFlags = append(dockerRunFlags, "-e", envVariable) + } + } + + dockerRunFlags = addServiceAccountFlags(dockerRunFlags, serviceAccount) + + kanikoDockerfilePath := path.Join(buildContextPath, dockerfilesPath, dockerfile) + if dockerfilesPath == "" { + kanikoDockerfilePath = path.Join(buildContextPath, "Dockerfile") + } + + dockerRunFlags = append(dockerRunFlags, ExecutorImage, + "-f", kanikoDockerfilePath, + "-d", kanikoImage) + dockerRunFlags = append(dockerRunFlags, additionalFlags...) + + kanikoCmd := exec.Command("docker", dockerRunFlags...) + + timer := timing.Start(dockerfile + "_kaniko") + out, err := RunCommandWithoutTest(kanikoCmd) + timing.DefaultRun.Stop(timer) + + if err != nil { + return nil, fmt.Errorf("Failed to build image %s with kaniko command \"%s\": %s %s", kanikoImage, kanikoCmd.Args, err, string(out)) + } + return out, nil +} diff --git a/scripts/integration-test.sh b/scripts/integration-test.sh index 3af24c922..07ac75bdb 100755 --- a/scripts/integration-test.sh +++ b/scripts/integration-test.sh @@ -21,19 +21,6 @@ IMAGE_REPO="${IMAGE_REPO:-gcr.io/kaniko-test}" docker version # Sets up a kokoro (Google internal integration testing tool) environment -if [ -f "$KOKORO_GFILE_DIR"/common.sh ]; then - echo "Installing dependencies..." - source "$KOKORO_GFILE_DIR/common.sh" - mkdir -p /usr/local/go/src/github.com/GoogleContainerTools/ - cp -r github/kaniko /usr/local/go/src/github.com/GoogleContainerTools/ - pushd /usr/local/go/src/github.com/GoogleContainerTools/kaniko - echo "Installing container-diff..." - mv $KOKORO_GFILE_DIR/container-diff-linux-amd64 $KOKORO_GFILE_DIR/container-diff - chmod +x $KOKORO_GFILE_DIR/container-diff - export PATH=$PATH:$KOKORO_GFILE_DIR - cp $KOKORO_ROOT/src/keystore/72508_gcr_application_creds $HOME/.config/gcloud/application_default_credentials.json -fi - echo "Running integration tests..." make out/executor make out/warmer From 4129c17d12c0a39d6f9b721943d76a1aed42f403 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Mon, 25 May 2020 23:36:59 -0700 Subject: [PATCH 07/26] more changes --- .../Dockerfile_fs_benchmark} | 0 .../{benchmark => benchmark_fs}/context.txt | 0 .../{benchmark => benchmark_fs}/make.sh | 0 integration/benchmark_test.go | 78 +++++++++++++++---- integration/images.go | 52 +++++++------ 5 files changed, 94 insertions(+), 36 deletions(-) rename integration/{benchmark/Dockerfile_FS_benchmark => benchmark_fs/Dockerfile_fs_benchmark} (100%) rename integration/{benchmark => benchmark_fs}/context.txt (100%) rename integration/{benchmark => benchmark_fs}/make.sh (100%) diff --git a/integration/benchmark/Dockerfile_FS_benchmark b/integration/benchmark_fs/Dockerfile_fs_benchmark similarity index 100% rename from integration/benchmark/Dockerfile_FS_benchmark rename to integration/benchmark_fs/Dockerfile_fs_benchmark diff --git a/integration/benchmark/context.txt b/integration/benchmark_fs/context.txt similarity index 100% rename from integration/benchmark/context.txt rename to integration/benchmark_fs/context.txt diff --git a/integration/benchmark/make.sh b/integration/benchmark_fs/make.sh similarity index 100% rename from integration/benchmark/make.sh rename to integration/benchmark_fs/make.sh diff --git a/integration/benchmark_test.go b/integration/benchmark_test.go index 0092782a1..a5e1d08be 100644 --- a/integration/benchmark_test.go +++ b/integration/benchmark_test.go @@ -17,35 +17,85 @@ limitations under the License. package integration import ( + "encoding/json" "fmt" + "io/ioutil" "os" "path/filepath" + "sync" "testing" + "time" ) +type result struct { + totalBuildTime float64 + resolvingFiles float64 + walkingFiles float64 +} + func TestSnapshotBenchmark(t *testing.T) { cwd, err := os.Getwd() if err != nil { t.Fatal(err) } - contextDir := filepath.Join(cwd, "benchmark") + contextDir := filepath.Join(cwd, "benchmark_fs") nums := []int{10000, 50000, 100000, 200000, 300000, 500000, 700000, 800000} + var timeMap sync.Map + var wg sync.WaitGroup for _, num := range nums { - - t.Run("test_benchmark"+string(num), func(t *testing.T) { - t.Parallel() - dockerfile := "Dockerfile_FS_benchmark" - kanikoImage := GetKanikoImage(config.imageRepo, dockerfile) - buildArgs := []string{"--build-arg", fmt.Sprintf("NUM=%d", num)} - if _, err := buildKanikoImage("", dockerfile, - buildArgs, []string{}, kanikoImage, contextDir, config.gcsBucket, config.serviceAccount); err != nil { - t.Errorf("could not run benchmark results for num %d", num) - } + t.Run(fmt.Sprintf("test_benchmark_%d", num), func(t *testing.T) { + wg.Add(1) + go func(num int) { + dockerfile := "Dockerfile_fs_benchmark" + kanikoImage := fmt.Sprintf("%s_%d", GetKanikoImage(config.imageRepo, dockerfile), num) + buildArgs := []string{"--build-arg", fmt.Sprintf("NUM=%d", num)} + benchmarkDir, err := buildKanikoImage("", dockerfile, + buildArgs, []string{}, kanikoImage, contextDir, config.gcsBucket, + config.serviceAccount, false) + if err != nil { + t.Errorf("could not run benchmark results for num %d", num) + } + r := newResult(t, filepath.Join(benchmarkDir, dockerfile)) + timeMap.Store(num, r) + wg.Done() + defer os.Remove(benchmarkDir) + }(num) }) } - if err := logBenchmarks("benchmark"); err != nil { - t.Logf("Failed to create benchmark file: %v", err) - } + wg.Wait() + + fmt.Println("Number of Files,Total Build Time,Walking Filesystem, Resolving Files") + timeMap.Range(func(key interface{}, value interface{}) bool { + d, _ := key.(int) + v, _ := value.(result) + fmt.Println(fmt.Sprintf("%d,%f,%f,%f", d, v.totalBuildTime, v.walkingFiles, v.resolvingFiles)) + return true + }) + +} + +func newResult(t *testing.T, f string) result { + var current map[string]time.Duration + jsonFile, err := os.Open(f) + defer jsonFile.Close() + if err != nil { + t.Errorf("could not read benchmark file %s", f) + } + byteValue, _ := ioutil.ReadAll(jsonFile) + if err := json.Unmarshal(byteValue, ¤t); err != nil { + t.Errorf("could not unmarshal benchmark file") + } + r := result{} + if c, ok := current["Resolving Paths"]; ok { + r.resolvingFiles = c.Seconds() + } + if c, ok := current["Walking filesystem"]; ok { + r.walkingFiles = c.Seconds() + } + if c, ok := current["Total Build Time"]; ok { + r.totalBuildTime = c.Seconds() + } + return r } diff --git a/integration/images.go b/integration/images.go index 82e8244b6..0d2d63e70 100644 --- a/integration/images.go +++ b/integration/images.go @@ -269,17 +269,12 @@ func (d *DockerFileBuilder) BuildImageWithContext(config *integrationTestConfig, } kanikoImage := GetKanikoImage(imageRepo, dockerfile) - out, err := buildKanikoImage(dockerfilesPath, dockerfile, buildArgs, additionalKanikoFlags, kanikoImage, - contextDir, gcsBucket, serviceAccount) - if err != nil { + timer = timing.Start(dockerfile + "_kaniko") + if _, err := buildKanikoImage(dockerfilesPath, dockerfile, buildArgs, additionalKanikoFlags, kanikoImage, + contextDir, gcsBucket, serviceAccount, true); err != nil { return err } - - if outputCheck := outputChecks[dockerfile]; outputCheck != nil { - if err := outputCheck(dockerfile, out); err != nil { - return fmt.Errorf("Output check failed for image %s with kaniko command : %s %s", kanikoImage, err, string(out)) - } - } + timing.DefaultRun.Stop(timer) d.filesBuilt[dockerfile] = struct{}{} @@ -336,9 +331,7 @@ func (d *DockerFileBuilder) buildCachedImages(config *integrationTestConfig, cac "--cache-dir", cacheDir) kanikoCmd := exec.Command("docker", dockerRunFlags...) - timer := timing.Start(dockerfile + "_kaniko_cached_" + strconv.Itoa(version)) _, err := RunCommandWithoutTest(kanikoCmd) - timing.DefaultRun.Stop(timer) if err != nil { return fmt.Errorf("Failed to build cached image %s with kaniko command \"%s\": %s", kanikoImage, kanikoCmd.Args, err) } @@ -391,19 +384,30 @@ func (d *DockerFileBuilder) buildRelativePathsImage(imageRepo, dockerfile, servi return nil } -func buildKanikoImage(dockerfilesPath string, dockerfile string, buildArgs []string, kanikoArgs []string, kanikoImage string, - contextDir string, gcsBucket string, serviceAccount string) ([]byte, error) { +func buildKanikoImage( + dockerfilesPath string, + dockerfile string, + buildArgs []string, + kanikoArgs []string, + kanikoImage string, + contextDir string, + gcsBucket string, + serviceAccount string, + shdUpload bool, +) (string, error) { benchmarkEnv := "BENCHMARK_FILE=false" benchmarkDir, err := ioutil.TempDir("", "") if err != nil { - return nil, err + return "", err } if b, err := strconv.ParseBool(os.Getenv("BENCHMARK")); err == nil && b { benchmarkEnv = "BENCHMARK_FILE=/kaniko/benchmarks/" + dockerfile - benchmarkFile := path.Join(benchmarkDir, dockerfile) - fileName := fmt.Sprintf("run_%s_%s", time.Now().Format("2006-01-02-15:04"), dockerfile) - dst := path.Join("benchmarks", fileName) - defer UploadFileToBucket(gcsBucket, benchmarkFile, dst) + if shdUpload { + benchmarkFile := path.Join(benchmarkDir, dockerfile) + fileName := fmt.Sprintf("run_%s_%s", time.Now().Format("2006-01-02-15:04"), dockerfile) + dst := path.Join("benchmarks", fileName) + defer UploadFileToBucket(gcsBucket, benchmarkFile, dst) + } } // build kaniko image @@ -426,7 +430,7 @@ func buildKanikoImage(dockerfilesPath string, dockerfile string, buildArgs []str kanikoDockerfilePath := path.Join(buildContextPath, dockerfilesPath, dockerfile) if dockerfilesPath == "" { - kanikoDockerfilePath = path.Join(buildContextPath, "Dockerfile") + kanikoDockerfilePath = path.Join(buildContextPath, dockerfile) } dockerRunFlags = append(dockerRunFlags, ExecutorImage, @@ -439,9 +443,13 @@ func buildKanikoImage(dockerfilesPath string, dockerfile string, buildArgs []str timer := timing.Start(dockerfile + "_kaniko") out, err := RunCommandWithoutTest(kanikoCmd) timing.DefaultRun.Stop(timer) - if err != nil { - return nil, fmt.Errorf("Failed to build image %s with kaniko command \"%s\": %s %s", kanikoImage, kanikoCmd.Args, err, string(out)) + return "", fmt.Errorf("Failed to build image %s with kaniko command \"%s\": %s %s", kanikoImage, kanikoCmd.Args, err, string(out)) } - return out, nil + if outputCheck := outputChecks[dockerfile]; outputCheck != nil { + if err := outputCheck(dockerfile, out); err != nil { + return "", fmt.Errorf("Output check failed for image %s with kaniko command : %s %s", kanikoImage, err, string(out)) + } + } + return benchmarkDir, nil } From 48421f1126b5bc7067535d90559401c47f13670d Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Tue, 26 May 2020 00:16:03 -0700 Subject: [PATCH 08/26] more fixes --- integration/benchmark_test.go | 13 +++++++++---- integration/images.go | 2 -- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/integration/benchmark_test.go b/integration/benchmark_test.go index a5e1d08be..2b236fa54 100644 --- a/integration/benchmark_test.go +++ b/integration/benchmark_test.go @@ -47,21 +47,26 @@ func TestSnapshotBenchmark(t *testing.T) { for _, num := range nums { t.Run(fmt.Sprintf("test_benchmark_%d", num), func(t *testing.T) { wg.Add(1) - go func(num int) { + var err error + go func(num int, err error) { dockerfile := "Dockerfile_fs_benchmark" kanikoImage := fmt.Sprintf("%s_%d", GetKanikoImage(config.imageRepo, dockerfile), num) buildArgs := []string{"--build-arg", fmt.Sprintf("NUM=%d", num)} - benchmarkDir, err := buildKanikoImage("", dockerfile, + var benchmarkDir string + benchmarkDir, err = buildKanikoImage("", dockerfile, buildArgs, []string{}, kanikoImage, contextDir, config.gcsBucket, config.serviceAccount, false) if err != nil { - t.Errorf("could not run benchmark results for num %d", num) + return } r := newResult(t, filepath.Join(benchmarkDir, dockerfile)) timeMap.Store(num, r) wg.Done() defer os.Remove(benchmarkDir) - }(num) + }(num, err) + if err != nil { + t.Errorf("could not run benchmark results for num %d due to %s", num, err) + } }) } wg.Wait() diff --git a/integration/images.go b/integration/images.go index 0d2d63e70..476012f1e 100644 --- a/integration/images.go +++ b/integration/images.go @@ -440,9 +440,7 @@ func buildKanikoImage( kanikoCmd := exec.Command("docker", dockerRunFlags...) - timer := timing.Start(dockerfile + "_kaniko") out, err := RunCommandWithoutTest(kanikoCmd) - timing.DefaultRun.Stop(timer) if err != nil { return "", fmt.Errorf("Failed to build image %s with kaniko command \"%s\": %s %s", kanikoImage, kanikoCmd.Args, err, string(out)) } From a60a097c9b562f7263b0556b7dc7277fdac8d106 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Tue, 26 May 2020 00:22:50 -0700 Subject: [PATCH 09/26] one more fix --- integration/images.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/images.go b/integration/images.go index 476012f1e..97f3b6fec 100644 --- a/integration/images.go +++ b/integration/images.go @@ -259,7 +259,7 @@ func (d *DockerFileBuilder) BuildImageWithContext(config *integrationTestConfig, } } - additionalKanikoFlags := additionalDockerFlagsMap[dockerfile] + additionalKanikoFlags := additionalKanikoFlagsMap[dockerfile] additionalKanikoFlags = append(additionalKanikoFlags, contextFlag, contextPath) for _, d := range reproducibleTests { if d == dockerfile { From b98f55a41d2a4d1950ccdb5d695a0c8b36881533 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Tue, 26 May 2020 08:40:25 -0700 Subject: [PATCH 10/26] some more fixes --- integration/benchmark_fs/Dockerfile_fs_benchmark | 1 - integration/benchmark_test.go | 12 ++++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/integration/benchmark_fs/Dockerfile_fs_benchmark b/integration/benchmark_fs/Dockerfile_fs_benchmark index b589757dd..4065467c4 100644 --- a/integration/benchmark_fs/Dockerfile_fs_benchmark +++ b/integration/benchmark_fs/Dockerfile_fs_benchmark @@ -3,7 +3,6 @@ FROM bash:4.4 ARG NUM COPY context.txt . COPY make.sh . -RUN ls -al make.sh SHELL ["/usr/local/bin/bash", "-c"] RUN ./make.sh $NUM RUN ls -al /workdir | wc diff --git a/integration/benchmark_test.go b/integration/benchmark_test.go index 2b236fa54..036e96beb 100644 --- a/integration/benchmark_test.go +++ b/integration/benchmark_test.go @@ -22,6 +22,7 @@ import ( "io/ioutil" "os" "path/filepath" + "strconv" "sync" "testing" "time" @@ -34,6 +35,9 @@ type result struct { } func TestSnapshotBenchmark(t *testing.T) { + if b, err := strconv.ParseBool(os.Getenv("BENCHMARK")); err != nil || !b { + t.SkipNow() + } cwd, err := os.Getwd() if err != nil { t.Fatal(err) @@ -48,22 +52,22 @@ func TestSnapshotBenchmark(t *testing.T) { t.Run(fmt.Sprintf("test_benchmark_%d", num), func(t *testing.T) { wg.Add(1) var err error - go func(num int, err error) { + go func(num int, err *error) { dockerfile := "Dockerfile_fs_benchmark" kanikoImage := fmt.Sprintf("%s_%d", GetKanikoImage(config.imageRepo, dockerfile), num) buildArgs := []string{"--build-arg", fmt.Sprintf("NUM=%d", num)} var benchmarkDir string - benchmarkDir, err = buildKanikoImage("", dockerfile, + benchmarkDir, *err = buildKanikoImage("", dockerfile, buildArgs, []string{}, kanikoImage, contextDir, config.gcsBucket, config.serviceAccount, false) - if err != nil { + if *err != nil { return } r := newResult(t, filepath.Join(benchmarkDir, dockerfile)) timeMap.Store(num, r) wg.Done() defer os.Remove(benchmarkDir) - }(num, err) + }(num, &err) if err != nil { t.Errorf("could not run benchmark results for num %d due to %s", num, err) } From 362c8dd5196cba3cd872c09cea490bbaf92c75a8 Mon Sep 17 00:00:00 2001 From: Gabriel Virga Date: Wed, 27 May 2020 17:51:44 -0400 Subject: [PATCH 11/26] Instructions to run kaniko using kubectl and STDIN Example on README of how to run kaniko using kubectl and standard input --- README.md | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/README.md b/README.md index 118d34038..5e0b7bdb7 100644 --- a/README.md +++ b/README.md @@ -192,6 +192,48 @@ echo -e 'FROM alpine \nRUN echo "created from standard input"' > Dockerfile | ta --destination= ``` +Complete example of how to interactively run kaniko with `.tar.gz` Standard Input data, using Kubernetes command line with a temporary container and completely dockerless: +```shell +echo -e 'FROM alpine \nRUN echo "created from standard input"' > Dockerfile | tar -cf - Dockerfile | gzip -9 | kubectl run kaniko \ +--rm --stdin=true \ +--image=gcr.io/kaniko-project/executor:latest --restart=Never \ +--overrides='{ + "apiVersion": "v1", + "spec": { + "containers": [ + { + "name": "kaniko", + "image": "gcr.io/kaniko-project/executor:latest", + "stdin": true, + "stdinOnce": true, + "args": [ + "--dockerfile=Dockerfile", + "--context=tar://stdin", + "--destination=gcr.io/my-repo/my-image" ], + "volumeMounts": [ + { + "name": "cabundle", + "mountPath": "/kaniko/ssl/certs/" + }, + { + "name": "docker-config", + "mountPath": "/kaniko/.docker/" + }] + }], + "volumes": [ + { + "name": "cabundle", + "configMap": { + "name": "cabundle"}}, + { + "name": "docker-config", + "configMap": { + "name": "docker-config" }} + ] + } +}' +``` + ### Running kaniko There are several different ways to deploy and run kaniko: From cbf3073fdaf499519a7caacfb578ce1f5c85746e Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Tue, 2 Jun 2020 15:56:27 -0700 Subject: [PATCH 12/26] rename whitelist to ignorelist --- cmd/executor/cmd/root.go | 6 +- deploy/Dockerfile_debug | 2 +- .../filesystem-resolution-proposal-01.md | 2 +- integration/dockerfiles/Dockerfile_test_add | 2 +- .../dockerfiles/Dockerfile_test_multistage | 2 +- integration/integration_test.go | 8 +- pkg/commands/volume.go | 2 +- pkg/config/init.go | 4 +- pkg/config/options.go | 2 +- pkg/constants/constants.go | 2 +- pkg/dockerfile/dockerfile_test.go | 2 +- pkg/executor/build.go | 4 +- pkg/executor/composite_cache.go | 2 +- pkg/executor/copy_multistage_test.go | 6 +- pkg/filesystem/resolve.go | 20 +-- pkg/filesystem/resolve_test.go | 14 +-- pkg/snapshot/snapshot.go | 24 ++-- pkg/util/fs_util.go | 84 ++++++------- pkg/util/fs_util_test.go | 114 +++++++++--------- 19 files changed, 151 insertions(+), 151 deletions(-) diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index 36cd3fdeb..a31cd0168 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -83,8 +83,8 @@ var RootCmd = &cobra.Command{ if len(opts.Destinations) == 0 && opts.ImageNameDigestFile != "" { return errors.New("You must provide --destination if setting ImageNameDigestFile") } - // Update whitelisted paths - util.UpdateWhitelist(opts.WhitelistVarRun) + // Update skipped paths + util.UpdateInitialIgnoreList(opts.SkipVarRun) } return nil }, @@ -160,7 +160,7 @@ func addKanikoOptionsFlags() { opts.RegistriesCertificates = make(map[string]string) RootCmd.PersistentFlags().VarP(&opts.RegistriesCertificates, "registry-certificate", "", "Use the provided certificate for TLS communication with the given registry. Expected format is 'my.registry.url=/path/to/the/server/certificate'.") RootCmd.PersistentFlags().StringVarP(&opts.RegistryMirror, "registry-mirror", "", "", "Registry mirror to use has pull-through cache instead of docker.io.") - RootCmd.PersistentFlags().BoolVarP(&opts.WhitelistVarRun, "whitelist-var-run", "", true, "Ignore /var/run directory when taking image snapshot. Set it to false to preserve /var/run/ in destination image. (Default true).") + RootCmd.PersistentFlags().BoolVarP(&opts.SkipVarRun, "whitelist-var-run", "", true, "Ignore /var/run directory when taking image snapshot. Set it to false to preserve /var/run/ in destination image. (Default true).") RootCmd.PersistentFlags().VarP(&opts.Labels, "label", "", "Set metadata for an image. Set it repeatedly for multiple labels.") RootCmd.PersistentFlags().BoolVarP(&opts.SkipUnusedStages, "skip-unused-stages", "", false, "Build only used stages if defined to true. Otherwise it builds by default all stages, even the unnecessaries ones until it reaches the target stage / end of Dockerfile") } diff --git a/deploy/Dockerfile_debug b/deploy/Dockerfile_debug index 4480cf00c..66ca8d4b5 100644 --- a/deploy/Dockerfile_debug +++ b/deploy/Dockerfile_debug @@ -46,7 +46,7 @@ COPY --from=0 /usr/local/bin/docker-credential-gcr /kaniko/docker-credential-gcr COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /kaniko/docker-credential-ecr-login COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr COPY --from=1 /distroless/bazel-bin/experimental/busybox/busybox/ /busybox/ -# Declare /busybox as a volume to get it automatically whitelisted +# Declare /busybox as a volume to get it automatically in the path to skip VOLUME /busybox COPY files/ca-certificates.crt /kaniko/ssl/certs/ COPY --from=0 /kaniko/.docker /kaniko/.docker diff --git a/docs/design_proposals/filesystem-resolution-proposal-01.md b/docs/design_proposals/filesystem-resolution-proposal-01.md index e3f87008d..89bdf2a34 100644 --- a/docs/design_proposals/filesystem-resolution-proposal-01.md +++ b/docs/design_proposals/filesystem-resolution-proposal-01.md @@ -19,7 +19,7 @@ To accomplish this, Kaniko walks the entire filesystem to discover every object. Some of these objects may actually be a symlink to another object in the filesystem; in these cases we must consider both the link and the target object. -Kaniko also maintains a set of whitelisted (aka ignored) filepaths. Any object +Kaniko also maintains a set of ignored (aka ignored) filepaths. Any object which matches one of these filepaths should be ignored by kaniko. This results in a 3 dimensional search space diff --git a/integration/dockerfiles/Dockerfile_test_add b/integration/dockerfiles/Dockerfile_test_add index dfb748d8a..02596b0d3 100644 --- a/integration/dockerfiles/Dockerfile_test_add +++ b/integration/dockerfiles/Dockerfile_test_add @@ -14,7 +14,7 @@ ADD $contextenv/* /tmp/${contextenv}/ ADD context/tars/fil* /tars/ ADD context/tars/file.tar /tars_again -# This tar has some directories that should be whitelisted inside it. +# This tar has some directories that should be ignored inside it. ADD context/tars/sys.tar.gz / diff --git a/integration/dockerfiles/Dockerfile_test_multistage b/integration/dockerfiles/Dockerfile_test_multistage index df8f05aa5..56768cb58 100644 --- a/integration/dockerfiles/Dockerfile_test_multistage +++ b/integration/dockerfiles/Dockerfile_test_multistage @@ -13,7 +13,7 @@ FROM base as fourth RUN date > /date ENV foo bar -# This base image contains symlinks with relative paths to whitelisted directories +# This base image contains symlinks with relative paths to ignored directories # We need to test they're extracted correctly FROM fedora@sha256:c4cc32b09c6ae3f1353e7e33a8dda93dc41676b923d6d89afa996b421cc5aa48 diff --git a/integration/integration_test.go b/integration/integration_test.go index 7156591d1..979274eb1 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -556,7 +556,7 @@ func checkContainerDiffOutput(t *testing.T, diff []byte, expected string) { t.Error(err) } - // Some differences (whitelisted paths, etc.) are known and expected. + // Some differences (ignored paths, etc.) are known and expected. fdr := diffInt[0].Diff.(*fileDiffResult) fdr.Adds = filterFileDiff(fdr.Adds) fdr.Dels = filterFileDiff(fdr.Dels) @@ -588,14 +588,14 @@ func filterMetaDiff(metaDiff []string) []string { func filterFileDiff(f []fileDiff) []fileDiff { var newDiffs []fileDiff for _, diff := range f { - isWhitelisted := false + isSkipped := false for _, p := range allowedDiffPaths { if util.HasFilepathPrefix(diff.Name, p, false) { - isWhitelisted = true + isSkipped = true break } } - if !isWhitelisted { + if !isSkipped { newDiffs = append(newDiffs, diff) } } diff --git a/pkg/commands/volume.go b/pkg/commands/volume.go index 476a2c52a..94acd09e1 100644 --- a/pkg/commands/volume.go +++ b/pkg/commands/volume.go @@ -48,7 +48,7 @@ func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile. for _, volume := range resolvedVolumes { var x struct{} existingVolumes[volume] = x - util.AddVolumePathToWhitelist(volume) + util.AddVolumePathToIgnoreList(volume) // Only create and snapshot the dir if it didn't exist already if _, err := os.Stat(volume); os.IsNotExist(err) { diff --git a/pkg/config/init.go b/pkg/config/init.go index 3fca483f7..2e08b975c 100644 --- a/pkg/config/init.go +++ b/pkg/config/init.go @@ -22,10 +22,10 @@ import ( var RootDir string var KanikoDir string -var WhitelistPath string +var IgnoreListPath string func init() { RootDir = constants.RootDir KanikoDir = constants.KanikoDir - WhitelistPath = constants.WhitelistPath + IgnoreListPath = constants.IgnoreListPath } diff --git a/pkg/config/options.go b/pkg/config/options.go index 6c8690284..7732f206f 100644 --- a/pkg/config/options.go +++ b/pkg/config/options.go @@ -55,7 +55,7 @@ type KanikoOptions struct { NoPush bool Cache bool Cleanup bool - WhitelistVarRun bool + SkipVarRun bool SkipUnusedStages bool } diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index 8dd5893ea..aaa0c815a 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -26,7 +26,7 @@ const ( //KanikoDir is the path to the Kaniko directory KanikoDir = "/kaniko" - WhitelistPath = "/proc/self/mountinfo" + IgnoreListPath = "/proc/self/mountinfo" Author = "kaniko" diff --git a/pkg/dockerfile/dockerfile_test.go b/pkg/dockerfile/dockerfile_test.go index 63c66f4b2..79cb87291 100644 --- a/pkg/dockerfile/dockerfile_test.go +++ b/pkg/dockerfile/dockerfile_test.go @@ -565,7 +565,7 @@ func Test_SkipingUnusedStages(t *testing.T) { # Make sure that we snapshot intermediate images correctly RUN date > /date ENV foo bar - # This base image contains symlinks with relative paths to whitelisted directories + # This base image contains symlinks with relative paths to ignored directories # We need to test they're extracted correctly FROM fedora@sha256:c4cc32b09c6ae3f1353e7e33a8dda93dc41676b923d6d89afa996b421cc5aa48 FROM fourth diff --git a/pkg/executor/build.go b/pkg/executor/build.go index 761c09e72..d05ddecaa 100644 --- a/pkg/executor/build.go +++ b/pkg/executor/build.go @@ -314,8 +314,8 @@ func (s *stageBuilder) build() error { logrus.Info("Skipping unpacking as no commands require it.") } - if err := util.DetectFilesystemWhitelist(config.WhitelistPath); err != nil { - return errors.Wrap(err, "failed to check filesystem whitelist") + if err := util.DetectFilesystemIgnoreList(config.IgnoreListPath); err != nil { + return errors.Wrap(err, "failed to check filesystem mount paths") } initSnapshotTaken := false diff --git a/pkg/executor/composite_cache.go b/pkg/executor/composite_cache.go index d8ebfda94..d6a624481 100644 --- a/pkg/executor/composite_cache.go +++ b/pkg/executor/composite_cache.go @@ -69,7 +69,7 @@ func (s *CompositeCache) AddPath(p, context string) error { } // Only add the hash of this directory to the key - // if there is any whitelisted content. + // if there is any ignored content. if !empty || !util.ExcludeFile(p, context) { s.keys = append(s.keys, k) } diff --git a/pkg/executor/copy_multistage_test.go b/pkg/executor/copy_multistage_test.go index f28e6f16e..186d099ad 100644 --- a/pkg/executor/copy_multistage_test.go +++ b/pkg/executor/copy_multistage_test.go @@ -166,7 +166,7 @@ func setupMultistageTests(t *testing.T) (string, func()) { // set up config config.RootDir = testDir config.KanikoDir = fmt.Sprintf("%s/%s", testDir, "kaniko") - // Write a whitelist path + // Write path to ignore list if err := os.MkdirAll(filepath.Join(testDir, "proc"), 0755); err != nil { t.Fatal(err) } @@ -178,10 +178,10 @@ func setupMultistageTests(t *testing.T) (string, func()) { if err := ioutil.WriteFile(mFile, []byte(mountInfo), 0644); err != nil { t.Fatal(err) } - config.WhitelistPath = mFile + config.IgnoreListPath = mFile return testDir, func() { config.KanikoDir = constants.KanikoDir config.RootDir = constants.RootDir - config.WhitelistPath = constants.WhitelistPath + config.IgnoreListPath = constants.IgnoreListPath } } diff --git a/pkg/filesystem/resolve.go b/pkg/filesystem/resolve.go index ab80d0eb2..64121146a 100644 --- a/pkg/filesystem/resolve.go +++ b/pkg/filesystem/resolve.go @@ -26,24 +26,24 @@ import ( "github.com/sirupsen/logrus" ) -// ResolvePaths takes a slice of file paths and a slice of whitelist entries. It resolve each +// ResolvePaths takes a slice of file paths and a list of skipped file paths. It resolve each // file path according to a set of rules and then returns a slice of resolved paths or error. // File paths are resolved according to the following rules: -// * If path is whitelisted, skip it. +// * If path is in ignorelist, skip it. // * If path is a symlink, resolve it's ancestor link and add it to the output set. -// * If path is a symlink, resolve it's target. If the target is not whitelisted add it to the +// * If path is a symlink, resolve it's target. If the target is not ignored add it to the // output set. // * Add all ancestors of each path to the output set. -func ResolvePaths(paths []string, wl []util.WhitelistEntry) (pathsToAdd []string, err error) { +func ResolvePaths(paths []string, wl []util.IgnoreListEntry) (pathsToAdd []string, err error) { logrus.Infof("Resolving %d paths", len(paths)) logrus.Tracef("Resolving paths %s", paths) fileSet := make(map[string]bool) for _, f := range paths { - // If the given path is part of the whitelist ignore it - if util.IsInProvidedWhitelist(f, wl) { - logrus.Debugf("path %s is whitelisted, ignoring it", f) + // If the given path is part of the ignorelist ignore it + if util.IsInProvidedIgnoreList(f, wl) { + logrus.Debugf("path %s is in list to ignore, ignoring it", f) continue } @@ -76,10 +76,10 @@ func ResolvePaths(paths []string, wl []util.WhitelistEntry) (pathsToAdd []string continue } - // If the given path is a symlink and the target is part of the whitelist + // If the given path is a symlink and the target is part of the ignorelist // ignore the target - if util.IsInProvidedWhitelist(evaled, wl) { - logrus.Debugf("path %s is whitelisted, ignoring it", evaled) + if util.IsInProvidedIgnoreList(evaled, wl) { + logrus.Debugf("path %s is ignored, ignoring it", evaled) continue } diff --git a/pkg/filesystem/resolve_test.go b/pkg/filesystem/resolve_test.go index af402a86e..368aee1d2 100644 --- a/pkg/filesystem/resolve_test.go +++ b/pkg/filesystem/resolve_test.go @@ -85,8 +85,8 @@ func Test_ResolvePaths(t *testing.T) { } } - t.Run("none are whitelisted", func(t *testing.T) { - wl := []util.WhitelistEntry{} + t.Run("none are ignored", func(t *testing.T) { + wl := []util.IgnoreListEntry{} inputFiles := []string{} expectedFiles := []string{} @@ -107,8 +107,8 @@ func Test_ResolvePaths(t *testing.T) { validateResults(t, files, expectedFiles, err) }) - t.Run("some are whitelisted", func(t *testing.T) { - wl := []util.WhitelistEntry{ + t.Run("some are ignored", func(t *testing.T) { + wl := []util.IgnoreListEntry{ { Path: filepath.Join(dir, "link", "baz"), }, @@ -124,7 +124,7 @@ func Test_ResolvePaths(t *testing.T) { link := filepath.Join(dir, "link", f) inputFiles = append(inputFiles, link) - if util.IsInProvidedWhitelist(link, wl) { + if util.IsInProvidedIgnoreList(link, wl) { t.Logf("skipping %s", link) continue } @@ -133,7 +133,7 @@ func Test_ResolvePaths(t *testing.T) { target := filepath.Join(dir, "target", f) - if util.IsInProvidedWhitelist(target, wl) { + if util.IsInProvidedIgnoreList(target, wl) { t.Logf("skipping %s", target) continue } @@ -177,7 +177,7 @@ func Test_ResolvePaths(t *testing.T) { inputFiles := []string{} expectedFiles := []string{} - wl := []util.WhitelistEntry{} + wl := []util.IgnoreListEntry{} files, err := ResolvePaths(inputFiles, wl) diff --git a/pkg/snapshot/snapshot.go b/pkg/snapshot/snapshot.go index 6415af870..0a887d303 100644 --- a/pkg/snapshot/snapshot.go +++ b/pkg/snapshot/snapshot.go @@ -39,14 +39,14 @@ var snapshotPathPrefix = config.KanikoDir // Snapshotter holds the root directory from which to take snapshots, and a list of snapshots taken type Snapshotter struct { - l *LayeredMap - directory string - whitelist []util.WhitelistEntry + l *LayeredMap + directory string + ignorelist []util.IgnoreListEntry } // NewSnapshotter creates a new snapshotter rooted at d func NewSnapshotter(l *LayeredMap, d string) *Snapshotter { - return &Snapshotter{l: l, directory: d, whitelist: util.Whitelist()} + return &Snapshotter{l: l, directory: d, ignorelist: util.IgnoreList()} } // Init initializes a new snapshotter @@ -60,7 +60,7 @@ func (s *Snapshotter) Key() (string, error) { return s.l.Key() } -// TakeSnapshot takes a snapshot of the specified files, avoiding directories in the whitelist, and creates +// TakeSnapshot takes a snapshot of the specified files, avoiding directories in the ignorelist, and creates // a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed func (s *Snapshotter) TakeSnapshot(files []string) (string, error) { f, err := ioutil.TempFile(config.KanikoDir, "") @@ -75,7 +75,7 @@ func (s *Snapshotter) TakeSnapshot(files []string) (string, error) { return "", nil } - filesToAdd, err := filesystem.ResolvePaths(files, s.whitelist) + filesToAdd, err := filesystem.ResolvePaths(files, s.ignorelist) if err != nil { return "", nil } @@ -100,7 +100,7 @@ func (s *Snapshotter) TakeSnapshot(files []string) (string, error) { return f.Name(), nil } -// TakeSnapshotFS takes a snapshot of the filesystem, avoiding directories in the whitelist, and creates +// TakeSnapshotFS takes a snapshot of the filesystem, avoiding directories in the ignorelist, and creates // a tarball of the changed files. func (s *Snapshotter) TakeSnapshotFS() (string, error) { f, err := ioutil.TempFile(snapshotPathPrefix, "") @@ -139,9 +139,9 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) { godirwalk.Walk(s.directory, &godirwalk.Options{ Callback: func(path string, ent *godirwalk.Dirent) error { - if util.IsInWhitelist(path) { + if util.IsInIgnoreList(path) { if util.IsDestDir(path) { - logrus.Tracef("Skipping paths under %s, as it is a whitelisted directory", path) + logrus.Tracef("Skipping paths under %s, as it is a ignored directory", path) return filepath.SkipDir } @@ -159,7 +159,7 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) { timing.DefaultRun.Stop(timer) timer = timing.Start("Resolving Paths") - resolvedFiles, err := filesystem.ResolvePaths(foundPaths, s.whitelist) + resolvedFiles, err := filesystem.ResolvePaths(foundPaths, s.ignorelist) if err != nil { return nil, nil, err } @@ -193,8 +193,8 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) { filesToAdd := []string{} for path := range resolvedMemFs { - if util.CheckWhitelist(path) { - logrus.Tracef("Not adding %s to layer, as it's whitelisted", path) + if util.CheckIgnoreList(path) { + logrus.Tracef("Not adding %s to layer, as it's ignored", path) continue } // Only add changed files. diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index 67a3f1d87..e390c7e3a 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -44,31 +44,31 @@ import ( const DoNotChangeUID = -1 const DoNotChangeGID = -1 -type WhitelistEntry struct { +type IgnoreListEntry struct { Path string PrefixMatchOnly bool } -var initialWhitelist = []WhitelistEntry{ +var initialIgnoreList = []IgnoreListEntry{ { Path: config.KanikoDir, PrefixMatchOnly: false, }, { - // similarly, we whitelist /etc/mtab, since there is no way to know if the file was mounted or came + // similarly, we skip /etc/mtab, since there is no way to know if the file was mounted or came // from the base image Path: "/etc/mtab", PrefixMatchOnly: false, }, { - // we whitelist /tmp/apt-key-gpghome, since the apt keys are added temporarily in this directory. + // we skip /tmp/apt-key-gpghome, since the apt keys are added temporarily in this directory. // from the base image Path: "/tmp/apt-key-gpghome", PrefixMatchOnly: true, }, } -var whitelist = initialWhitelist +var ignorelist = initialIgnoreList var volumes = []string{} @@ -84,8 +84,8 @@ type FSConfig struct { type FSOpt func(*FSConfig) -func Whitelist() []WhitelistEntry { - return whitelist +func IgnoreList() []IgnoreListEntry { + return ignorelist } func IncludeWhiteout() FSOpt { @@ -126,11 +126,11 @@ func GetFSFromLayers(root string, layers []v1.Layer, opts ...FSOpt) ([]string, e return nil, errors.New("must supply an extract function") } - if err := DetectFilesystemWhitelist(config.WhitelistPath); err != nil { + if err := DetectFilesystemIgnoreList(config.IgnoreListPath); err != nil { return nil, err } - logrus.Debugf("Mounted directories: %v", whitelist) + logrus.Debugf("Mounted directories: %v", ignorelist) extractedFiles := []string{} for i, l := range layers { @@ -195,19 +195,19 @@ func DeleteFilesystem() error { return nil } - if CheckWhitelist(path) { + if CheckIgnoreList(path) { if !isExist(path) { - logrus.Debugf("Path %s whitelisted, but not exists", path) + logrus.Debugf("Path %s ignored, but not exists", path) return nil } if info.IsDir() { return filepath.SkipDir } - logrus.Debugf("Not deleting %s, as it's whitelisted", path) + logrus.Debugf("Not deleting %s, as it's ignored", path) return nil } - if childDirInWhitelist(path) { - logrus.Debugf("Not deleting %s, as it contains a whitelisted path", path) + if childDirInIgnoreList(path) { + logrus.Debugf("Not deleting %s, as it contains a ignored path", path) return nil } if path == config.RootDir { @@ -225,9 +225,9 @@ func isExist(path string) bool { return false } -// ChildDirInWhitelist returns true if there is a child file or directory of the path in the whitelist -func childDirInWhitelist(path string) bool { - for _, d := range whitelist { +// childDirInIgnoreList returns true if there is a child file or directory of the path in the ignorelist +func childDirInIgnoreList(path string) bool { + for _, d := range ignorelist { if HasFilepathPrefix(d.Path, path, d.PrefixMatchOnly) { return true } @@ -268,8 +268,8 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error { return err } - if CheckWhitelist(abs) && !checkWhitelistRoot(dest) { - logrus.Debugf("Not adding %s because it is whitelisted", path) + if CheckIgnoreList(abs) && !checkIgnoreListRoot(dest) { + logrus.Debugf("Not adding %s because it is ignored", path) return nil } switch hdr.Typeflag { @@ -325,8 +325,8 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error { if err != nil { return err } - if CheckWhitelist(abs) { - logrus.Tracef("skipping symlink from %s to %s because %s is whitelisted", hdr.Linkname, path, hdr.Linkname) + if CheckIgnoreList(abs) { + logrus.Tracef("skipping symlink from %s to %s because %s is ignored", hdr.Linkname, path, hdr.Linkname) return nil } // The base directory for a link may not exist before it is created. @@ -365,11 +365,11 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error { return nil } -func IsInWhitelist(path string) bool { - return IsInProvidedWhitelist(path, whitelist) +func IsInIgnoreList(path string) bool { + return IsInProvidedIgnoreList(path, ignorelist) } -func IsInProvidedWhitelist(path string, wl []WhitelistEntry) bool { +func IsInProvidedIgnoreList(path string, wl []IgnoreListEntry) bool { for _, entry := range wl { if !entry.PrefixMatchOnly && path == entry.Path { return true @@ -378,8 +378,8 @@ func IsInProvidedWhitelist(path string, wl []WhitelistEntry) bool { return false } -func CheckWhitelist(path string) bool { - for _, wl := range whitelist { +func CheckIgnoreList(path string) bool { + for _, wl := range ignorelist { if HasFilepathPrefix(path, wl.Path, wl.PrefixMatchOnly) { return true } @@ -388,21 +388,21 @@ func CheckWhitelist(path string) bool { return false } -func checkWhitelistRoot(root string) bool { +func checkIgnoreListRoot(root string) bool { if root == config.RootDir { return false } - return CheckWhitelist(root) + return CheckIgnoreList(root) } -// Get whitelist from roots of mounted files +// Get ignorelist from roots of mounted files // Each line of /proc/self/mountinfo is in the form: // 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue // (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) // Where (5) is the mount point relative to the process's root // From: https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func DetectFilesystemWhitelist(path string) error { - whitelist = initialWhitelist +func DetectFilesystemIgnoreList(path string) error { + ignorelist = initialIgnoreList volumes = []string{} f, err := os.Open(path) if err != nil { @@ -426,7 +426,7 @@ func DetectFilesystemWhitelist(path string) error { } if lineArr[4] != config.RootDir { logrus.Tracef("Appending %s from line: %s", lineArr[4], line) - whitelist = append(whitelist, WhitelistEntry{ + ignorelist = append(ignorelist, IgnoreListEntry{ Path: lineArr[4], PrefixMatchOnly: false, }) @@ -448,7 +448,7 @@ func RelativeFiles(fp string, root string) ([]string, error) { if err != nil { return err } - if CheckWhitelist(path) && !HasFilepathPrefix(path, root, false) { + if CheckIgnoreList(path) && !HasFilepathPrefix(path, root, false) { return nil } relPath, err := filepath.Rel(root, path) @@ -522,10 +522,10 @@ func CreateFile(path string, reader io.Reader, perm os.FileMode, uid uint32, gid return setFilePermissions(path, perm, int(uid), int(gid)) } -// AddVolumePath adds the given path to the volume whitelist. -func AddVolumePathToWhitelist(path string) { - logrus.Infof("adding volume %s to whitelist", path) - whitelist = append(whitelist, WhitelistEntry{ +// AddVolumePath adds the given path to the volume ignorelist. +func AddVolumePathToIgnoreList(path string) { + logrus.Infof("adding volume %s to ignorelist", path) + ignorelist = append(ignorelist, IgnoreListEntry{ Path: path, PrefixMatchOnly: true, }) @@ -861,13 +861,13 @@ func createParentDirectory(path string) error { return nil } -// UpdateInitialWhitelist will add /var/run to whitelisted paths if -func UpdateWhitelist(whitelistVarRun bool) { - if !whitelistVarRun { +// UpdateInitialIgnoreList will add /var/run to ignored paths if +func UpdateInitialIgnoreList(ignoreVarRun bool) { + if !ignoreVarRun { return } - logrus.Trace("Adding /var/run to initialWhitelist ") - initialWhitelist = append(initialWhitelist, WhitelistEntry{ + logrus.Trace("Adding /var/run to initialIgnoreList ") + initialIgnoreList = append(initialIgnoreList, IgnoreListEntry{ // /var/run is a special case. It's common to mount in /var/run/docker.sock or something similar // which leads to a special mount on the /var/run/docker.sock file itself, but the directory to exist // in the image with no way to tell if it came from the base image or not. diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index 2e0dcc8c5..df4958142 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -38,7 +38,7 @@ import ( "github.com/google/go-containerregistry/pkg/v1/types" ) -func Test_DetectFilesystemWhitelist(t *testing.T) { +func Test_DetectFilesystemSkiplist(t *testing.T) { testDir, err := ioutil.TempDir("", "") if err != nil { t.Fatalf("Error creating tempdir: %s", err) @@ -58,8 +58,8 @@ func Test_DetectFilesystemWhitelist(t *testing.T) { t.Fatalf("Error writing file contents to %s: %s", path, err) } - err = DetectFilesystemWhitelist(path) - expectedWhitelist := []WhitelistEntry{ + err = DetectFilesystemIgnoreList(path) + expectedSkiplist := []IgnoreListEntry{ {"/kaniko", false}, {"/proc", false}, {"/dev", false}, @@ -68,14 +68,14 @@ func Test_DetectFilesystemWhitelist(t *testing.T) { {"/etc/mtab", false}, {"/tmp/apt-key-gpghome", true}, } - actualWhitelist := whitelist - sort.Slice(actualWhitelist, func(i, j int) bool { - return actualWhitelist[i].Path < actualWhitelist[j].Path + actualSkiplist := ignorelist + sort.Slice(actualSkiplist, func(i, j int) bool { + return actualSkiplist[i].Path < actualSkiplist[j].Path }) - sort.Slice(expectedWhitelist, func(i, j int) bool { - return expectedWhitelist[i].Path < expectedWhitelist[j].Path + sort.Slice(expectedSkiplist, func(i, j int) bool { + return expectedSkiplist[i].Path < expectedSkiplist[j].Path }) - testutil.CheckErrorAndDeepEqual(t, false, err, expectedWhitelist, actualWhitelist) + testutil.CheckErrorAndDeepEqual(t, false, err, expectedSkiplist, actualSkiplist) } var tests = []struct { @@ -251,10 +251,10 @@ func Test_ParentDirectoriesWithoutLeadingSlash(t *testing.T) { } } -func Test_CheckWhitelist(t *testing.T) { +func Test_CheckIgnoreList(t *testing.T) { type args struct { - path string - whitelist []WhitelistEntry + path string + ignorelist []IgnoreListEntry } tests := []struct { name string @@ -262,56 +262,56 @@ func Test_CheckWhitelist(t *testing.T) { want bool }{ { - name: "file whitelisted", + name: "file ignored", args: args{ - path: "/foo", - whitelist: []WhitelistEntry{{"/foo", false}}, + path: "/foo", + ignorelist: []IgnoreListEntry{{"/foo", false}}, }, want: true, }, { - name: "directory whitelisted", + name: "directory ignored", args: args{ - path: "/foo/bar", - whitelist: []WhitelistEntry{{"/foo", false}}, + path: "/foo/bar", + ignorelist: []IgnoreListEntry{{"/foo", false}}, }, want: true, }, { - name: "grandparent whitelisted", + name: "grandparent ignored", args: args{ - path: "/foo/bar/baz", - whitelist: []WhitelistEntry{{"/foo", false}}, + path: "/foo/bar/baz", + ignorelist: []IgnoreListEntry{{"/foo", false}}, }, want: true, }, { - name: "sibling whitelisted", + name: "sibling ignored", args: args{ - path: "/foo/bar/baz", - whitelist: []WhitelistEntry{{"/foo/bat", false}}, + path: "/foo/bar/baz", + ignorelist: []IgnoreListEntry{{"/foo/bat", false}}, }, want: false, }, { name: "prefix match only ", args: args{ - path: "/tmp/apt-key-gpghome.xft/gpg.key", - whitelist: []WhitelistEntry{{"/tmp/apt-key-gpghome.*", true}}, + path: "/tmp/apt-key-gpghome.xft/gpg.key", + ignorelist: []IgnoreListEntry{{"/tmp/apt-key-gpghome.*", true}}, }, want: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - original := whitelist + original := ignorelist defer func() { - whitelist = original + ignorelist = original }() - whitelist = tt.args.whitelist - got := CheckWhitelist(tt.args.path) + ignorelist = tt.args.ignorelist + got := CheckIgnoreList(tt.args.path) if got != tt.want { - t.Errorf("CheckWhitelist() = %v, want %v", got, tt.want) + t.Errorf("CheckIgnoreList() = %v, want %v", got, tt.want) } }) } @@ -879,10 +879,10 @@ func TestCopySymlink(t *testing.T) { } } -func Test_childDirInWhitelist(t *testing.T) { +func Test_childDirInSkiplist(t *testing.T) { type args struct { - path string - whitelist []WhitelistEntry + path string + ignorelist []IgnoreListEntry } tests := []struct { name string @@ -890,17 +890,17 @@ func Test_childDirInWhitelist(t *testing.T) { want bool }{ { - name: "not in whitelist", + name: "not in ignorelist", args: args{ path: "/foo", }, want: false, }, { - name: "child in whitelist", + name: "child in ignorelist", args: args{ path: "/foo", - whitelist: []WhitelistEntry{ + ignorelist: []IgnoreListEntry{ { Path: "/foo/bar", }, @@ -909,16 +909,16 @@ func Test_childDirInWhitelist(t *testing.T) { want: true, }, } - oldWhitelist := whitelist + oldIgnoreList := ignorelist defer func() { - whitelist = oldWhitelist + ignorelist = oldIgnoreList }() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - whitelist = tt.args.whitelist - if got := childDirInWhitelist(tt.args.path); got != tt.want { - t.Errorf("childDirInWhitelist() = %v, want %v", got, tt.want) + ignorelist = tt.args.ignorelist + if got := childDirInIgnoreList(tt.args.path); got != tt.want { + t.Errorf("childDirInIgnoreList() = %v, want %v", got, tt.want) } }) } @@ -1315,16 +1315,16 @@ func assertGetFSFromLayers( } } -func TestUpdateWhitelist(t *testing.T) { +func TestUpdateSkiplist(t *testing.T) { tests := []struct { - name string - whitelistVarRun bool - expected []WhitelistEntry + name string + skipVarRun bool + expected []IgnoreListEntry }{ { - name: "var/run whitelisted", - whitelistVarRun: true, - expected: []WhitelistEntry{ + name: "var/run ignored", + skipVarRun: true, + expected: []IgnoreListEntry{ { Path: "/kaniko", PrefixMatchOnly: false, @@ -1344,8 +1344,8 @@ func TestUpdateWhitelist(t *testing.T) { }, }, { - name: "var/run not whitelisted", - expected: []WhitelistEntry{ + name: "var/run not ignored", + expected: []IgnoreListEntry{ { Path: "/kaniko", PrefixMatchOnly: false, @@ -1363,16 +1363,16 @@ func TestUpdateWhitelist(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - original := initialWhitelist - defer func() { initialWhitelist = original }() - UpdateWhitelist(tt.whitelistVarRun) + original := initialIgnoreList + defer func() { initialIgnoreList = original }() + UpdateInitialIgnoreList(tt.skipVarRun) sort.Slice(tt.expected, func(i, j int) bool { return tt.expected[i].Path < tt.expected[j].Path }) - sort.Slice(initialWhitelist, func(i, j int) bool { - return initialWhitelist[i].Path < initialWhitelist[j].Path + sort.Slice(initialIgnoreList, func(i, j int) bool { + return initialIgnoreList[i].Path < initialIgnoreList[j].Path }) - testutil.CheckDeepEqual(t, tt.expected, initialWhitelist) + testutil.CheckDeepEqual(t, tt.expected, initialIgnoreList) }) } } From 994a412d0b761c92812a23dac1898eec2f1eb8a1 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Tue, 2 Jun 2020 16:08:46 -0700 Subject: [PATCH 13/26] some more renames --- cmd/executor/cmd/root.go | 6 +++--- deploy/Dockerfile_debug | 2 +- integration/integration_test.go | 6 +++--- pkg/config/options.go | 2 +- pkg/util/fs_util.go | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index a31cd0168..77afc92dd 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -83,8 +83,8 @@ var RootCmd = &cobra.Command{ if len(opts.Destinations) == 0 && opts.ImageNameDigestFile != "" { return errors.New("You must provide --destination if setting ImageNameDigestFile") } - // Update skipped paths - util.UpdateInitialIgnoreList(opts.SkipVarRun) + // Update ignored paths + util.UpdateInitialIgnoreList(opts.IgnoreVarRun) } return nil }, @@ -160,7 +160,7 @@ func addKanikoOptionsFlags() { opts.RegistriesCertificates = make(map[string]string) RootCmd.PersistentFlags().VarP(&opts.RegistriesCertificates, "registry-certificate", "", "Use the provided certificate for TLS communication with the given registry. Expected format is 'my.registry.url=/path/to/the/server/certificate'.") RootCmd.PersistentFlags().StringVarP(&opts.RegistryMirror, "registry-mirror", "", "", "Registry mirror to use has pull-through cache instead of docker.io.") - RootCmd.PersistentFlags().BoolVarP(&opts.SkipVarRun, "whitelist-var-run", "", true, "Ignore /var/run directory when taking image snapshot. Set it to false to preserve /var/run/ in destination image. (Default true).") + RootCmd.PersistentFlags().BoolVarP(&opts.IgnoreVarRun, "whitelist-var-run", "", true, "Ignore /var/run directory when taking image snapshot. Set it to false to preserve /var/run/ in destination image. (Default true).") RootCmd.PersistentFlags().VarP(&opts.Labels, "label", "", "Set metadata for an image. Set it repeatedly for multiple labels.") RootCmd.PersistentFlags().BoolVarP(&opts.SkipUnusedStages, "skip-unused-stages", "", false, "Build only used stages if defined to true. Otherwise it builds by default all stages, even the unnecessaries ones until it reaches the target stage / end of Dockerfile") } diff --git a/deploy/Dockerfile_debug b/deploy/Dockerfile_debug index 66ca8d4b5..02da41550 100644 --- a/deploy/Dockerfile_debug +++ b/deploy/Dockerfile_debug @@ -46,7 +46,7 @@ COPY --from=0 /usr/local/bin/docker-credential-gcr /kaniko/docker-credential-gcr COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /kaniko/docker-credential-ecr-login COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr COPY --from=1 /distroless/bazel-bin/experimental/busybox/busybox/ /busybox/ -# Declare /busybox as a volume to get it automatically in the path to skip +# Declare /busybox as a volume to get it automatically in the path to ignore VOLUME /busybox COPY files/ca-certificates.crt /kaniko/ssl/certs/ COPY --from=0 /kaniko/.docker /kaniko/.docker diff --git a/integration/integration_test.go b/integration/integration_test.go index 979274eb1..73881fc69 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -588,14 +588,14 @@ func filterMetaDiff(metaDiff []string) []string { func filterFileDiff(f []fileDiff) []fileDiff { var newDiffs []fileDiff for _, diff := range f { - isSkipped := false + isIgnored := false for _, p := range allowedDiffPaths { if util.HasFilepathPrefix(diff.Name, p, false) { - isSkipped = true + isIgnored = true break } } - if !isSkipped { + if !isIgnored { newDiffs = append(newDiffs, diff) } } diff --git a/pkg/config/options.go b/pkg/config/options.go index 7732f206f..576d42f09 100644 --- a/pkg/config/options.go +++ b/pkg/config/options.go @@ -55,7 +55,7 @@ type KanikoOptions struct { NoPush bool Cache bool Cleanup bool - SkipVarRun bool + IgnoreVarRun bool SkipUnusedStages bool } diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index e390c7e3a..d957e64be 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -55,13 +55,13 @@ var initialIgnoreList = []IgnoreListEntry{ PrefixMatchOnly: false, }, { - // similarly, we skip /etc/mtab, since there is no way to know if the file was mounted or came + // similarly, we ignore /etc/mtab, since there is no way to know if the file was mounted or came // from the base image Path: "/etc/mtab", PrefixMatchOnly: false, }, { - // we skip /tmp/apt-key-gpghome, since the apt keys are added temporarily in this directory. + // we ingore /tmp/apt-key-gpghome, since the apt keys are added temporarily in this directory. // from the base image Path: "/tmp/apt-key-gpghome", PrefixMatchOnly: true, From c0f699d3f15074ceabc25907ccc1a6819d15a7aa Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Thu, 4 Jun 2020 11:59:41 -0700 Subject: [PATCH 14/26] removed ununsed stage 1 --- deploy/Dockerfile_debug | 7 ------- 1 file changed, 7 deletions(-) diff --git a/deploy/Dockerfile_debug b/deploy/Dockerfile_debug index fa2952dbf..1e8374f82 100644 --- a/deploy/Dockerfile_debug +++ b/deploy/Dockerfile_debug @@ -33,13 +33,6 @@ RUN mkdir -p /kaniko/.docker COPY . . RUN make GOARCH=${GOARCH} && make out/warmer -# Stage 1: Get the busybox shell -FROM gcr.io/cloud-builders/bazel:latest -RUN git clone https://github.com/GoogleContainerTools/distroless.git -WORKDIR /distroless -RUN bazel build //experimental/busybox:busybox_tar -RUN tar -C /distroless/bazel-bin/experimental/busybox/ -xf /distroless/bazel-bin/experimental/busybox/busybox.tar - FROM scratch COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/* /kaniko/ COPY --from=0 /usr/local/bin/docker-credential-gcr /kaniko/docker-credential-gcr From 2214da37e282a4ff81c7e7106576953372c7797b Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Thu, 4 Jun 2020 12:11:15 -0700 Subject: [PATCH 15/26] Update Dockerfile_debug --- deploy/Dockerfile_debug | 1 - 1 file changed, 1 deletion(-) diff --git a/deploy/Dockerfile_debug b/deploy/Dockerfile_debug index 1e8374f82..88a248de2 100644 --- a/deploy/Dockerfile_debug +++ b/deploy/Dockerfile_debug @@ -38,7 +38,6 @@ COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/* /kaniko/ COPY --from=0 /usr/local/bin/docker-credential-gcr /kaniko/docker-credential-gcr COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /kaniko/docker-credential-ecr-login COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr -COPY --from=1 /distroless/bazel-bin/experimental/busybox/busybox/ /busybox/ COPY --from=amd64/busybox:1.31.1 /bin/busybox /busybox/busybox # Declare /busybox as a volume to get it automatically in the path to ignore From 8a585829b8e737f72ea2e902fb1932dff45b4318 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Thu, 4 Jun 2020 12:28:51 -0700 Subject: [PATCH 16/26] Update pkg/dockerfile/dockerfile_test.go --- pkg/dockerfile/dockerfile_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/dockerfile/dockerfile_test.go b/pkg/dockerfile/dockerfile_test.go index 39204139b..31a965241 100644 --- a/pkg/dockerfile/dockerfile_test.go +++ b/pkg/dockerfile/dockerfile_test.go @@ -69,7 +69,7 @@ func Test_ParseStages_ArgValueWithQuotes(t *testing.T) { } if len(metaArgs) != 5 { - t.Fatalf("length of stage meta args expected to be 2, but was %d", len(metaArgs)) + t.Fatalf("length of stage meta args expected to be 5, but was %d", len(metaArgs)) } for i, expectedVal := range []string{"ubuntu:16.04", "bar", "Hello", "World", "Hello World"} { From 9ec838bd9b30fd040b1e1f9443fdc0147f548513 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Thu, 4 Jun 2020 12:53:40 -0700 Subject: [PATCH 17/26] copy all files from busybox image --- deploy/Dockerfile_debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/Dockerfile_debug b/deploy/Dockerfile_debug index 88a248de2..0d1726f40 100644 --- a/deploy/Dockerfile_debug +++ b/deploy/Dockerfile_debug @@ -38,7 +38,7 @@ COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/* /kaniko/ COPY --from=0 /usr/local/bin/docker-credential-gcr /kaniko/docker-credential-gcr COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /kaniko/docker-credential-ecr-login COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr -COPY --from=amd64/busybox:1.31.1 /bin/busybox /busybox/busybox +COPY --from=amd64/busybox:1.31.1 /bin /busybox # Declare /busybox as a volume to get it automatically in the path to ignore VOLUME /busybox From 2095c9d68f2f92eb76693072fe53aadd99a88746 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Thu, 4 Jun 2020 13:55:25 -0700 Subject: [PATCH 18/26] prepare for release --- CHANGELOG.md | 75 ++++++++++++++++++++++++++++++++++++++++++++++++++++ Makefile | 2 +- 2 files changed, 76 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 366d953f7..b97a00174 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,78 @@ +# v0.23.0 Release 2020-06-04 +This is the 23rd release of Kaniko! + +# Bug Fixes +* Resolving nested meta ARGs [#1260](https://github.com/GoogleContainerTools/kaniko/pull/1260) +* add 64 busybox [#1254](https://github.com/GoogleContainerTools/kaniko/pull/1254) +* Apply dockefile exclude only for first stage [#1234](https://github.com/GoogleContainerTools/kaniko/pull/1234) + +# New Features +* Add /etc/nsswitch.conf for /etc/hosts name resolution [#1251](https://github.com/GoogleContainerTools/kaniko/pull/1251) +* Add ability to set git auth token using environment variables [#1263](https://github.com/GoogleContainerTools/kaniko/pull/1263) +* Add retries to image push. [#1258](https://github.com/GoogleContainerTools/kaniko/pull/1258) +* Update docker-credential-gcr to support auth with GCP Artifact Registry [#1255](https://github.com/GoogleContainerTools/kaniko/pull/1255) + +# Updates and Refactors +* Added integration test for multi level argument [#1285](https://github.com/GoogleContainerTools/kaniko/pull/1285) +* rename whitelist to ignorelist [#1295](https://github.com/GoogleContainerTools/kaniko/pull/1295) +* Remove direct use of DefaultTransport [#1221](https://github.com/GoogleContainerTools/kaniko/pull/1221) +* fix switching to non existent workdir [#1253](https://github.com/GoogleContainerTools/kaniko/pull/1253) +* remove duplicates save for the same dir [#1252](https://github.com/GoogleContainerTools/kaniko/pull/1252) +* add timings for resolving paths [#1284](https://github.com/GoogleContainerTools/kaniko/pull/1284) + +# Documentation +* Instructions for using stdin with kubectl [#1289](https://github.com/GoogleContainerTools/kaniko/pull/1289) +* Add GoReportCard badge to README [#1249](https://github.com/GoogleContainerTools/kaniko/pull/1249) +* Make support clause more bold. [#1273](https://github.com/GoogleContainerTools/kaniko/pull/1273) +* Correct typo [#1250](https://github.com/GoogleContainerTools/kaniko/pull/1250) +* docs: add registry-certificate flag to readme [#1276](https://github.com/GoogleContainerTools/kaniko/pull/1276) + +Huge thank you for this release towards our contributors: +- Anthony Davies +- Art Begolli +- Batuhan Apaydın +- Ben Einaudi +- Carlos Alexandro Becker +- Carlos Sanchez +- Chris Sng +- Cole Wippern +- Dani Raznikov +- Daniel Marks +- David Dooling +- DracoBlue +- Gabriel Virga +- Gilbert Gilb's +- Giovan Isa Musthofa +- Gábor Lipták +- James Ravn +- Jon Henrik Bjørnstad +- Jordan GOASDOUE +- Liubov Grinkevich +- Logan.Price +- Lukasz Jakimczuk +- Mehdi Abaakouk +- Michel Hollands +- Mitchell Friedman +- Moritz Wanzenböck +- Or Sela +- PhoenixMage +- Sam Stoelinga +- Tejal Desai +- Thomas Bonfort +- Thomas Stromberg +- Thomas Strömberg +- Tom Prince +- Vincent Latombe +- Wietse Muizelaar +- Yoan Blanc +- Yoriyasu Yano +- Yuheng Zhang +- cvgw +- ohchang-kwon +- tinkerborg +- xanonid +- yw-liu + # v0.22.0 Release 2020-05-07 This is a minor release of kaniko fixing: - GCB Authentication issue diff --git a/Makefile b/Makefile index cc372b64d..0966fe693 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ # Bump these on release VERSION_MAJOR ?= 0 -VERSION_MINOR ?= 22 +VERSION_MINOR ?= 23 VERSION_BUILD ?= 0 VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) From 85c40c45b3b49b8d85da969a3854f8dca9b5784a Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Thu, 4 Jun 2020 15:10:01 -0700 Subject: [PATCH 19/26] fix dockerfile --- integration/images.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/images.go b/integration/images.go index 97f3b6fec..8865e836a 100644 --- a/integration/images.go +++ b/integration/images.go @@ -430,7 +430,7 @@ func buildKanikoImage( kanikoDockerfilePath := path.Join(buildContextPath, dockerfilesPath, dockerfile) if dockerfilesPath == "" { - kanikoDockerfilePath = path.Join(buildContextPath, dockerfile) + kanikoDockerfilePath = path.Join(buildContextPath, "Dockerfile") } dockerRunFlags = append(dockerRunFlags, ExecutorImage, From 18dbb0e12aae99ee73832691445a9676ba261d69 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Thu, 4 Jun 2020 15:11:24 -0700 Subject: [PATCH 20/26] add biolerplate --- integration/benchmark_fs/make.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/integration/benchmark_fs/make.sh b/integration/benchmark_fs/make.sh index 525f84e94..96a5f36b4 100755 --- a/integration/benchmark_fs/make.sh +++ b/integration/benchmark_fs/make.sh @@ -1,5 +1,19 @@ #!/usr/local/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + mkdir /workdir i=1 From 92a5c89b97d5fe15be3af0fc54cc9b7b4c141694 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Thu, 4 Jun 2020 15:12:57 -0700 Subject: [PATCH 21/26] update year in license. --- integration/benchmark_fs/make.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration/benchmark_fs/make.sh b/integration/benchmark_fs/make.sh index 96a5f36b4..828f420ba 100755 --- a/integration/benchmark_fs/make.sh +++ b/integration/benchmark_fs/make.sh @@ -1,6 +1,6 @@ #!/usr/local/bin/bash -# Copyright 2018 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 6ab97e2b5f07319a856a5fd1f9d10ff60ba14878 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Fri, 5 Jun 2020 12:46:47 -0700 Subject: [PATCH 22/26] ADD GCB benchmark code --- cmd/executor/cmd/root.go | 23 ++++-- .../{Dockerfile_fs_benchmark => Dockerfile} | 0 integration/benchmark_fs/cloudbuild.yaml | 12 ++++ integration/benchmark_fs/make.sh | 3 +- integration/benchmark_test.go | 72 ++++++++++++++++++- pkg/buildcontext/gcs.go | 21 ++++++ pkg/filesystem/resolve.go | 1 - 7 files changed, 122 insertions(+), 10 deletions(-) rename integration/benchmark_fs/{Dockerfile_fs_benchmark => Dockerfile} (100%) create mode 100644 integration/benchmark_fs/cloudbuild.yaml diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index 77afc92dd..8b60da706 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -115,16 +115,27 @@ var RootCmd = &cobra.Command{ benchmarkFile := os.Getenv("BENCHMARK_FILE") // false is a keyword for integration tests to turn off benchmarking if benchmarkFile != "" && benchmarkFile != "false" { - f, err := os.Create(benchmarkFile) - if err != nil { - logrus.Warnf("Unable to create benchmarking file %s: %s", benchmarkFile, err) - } - defer f.Close() s, err := timing.JSON() if err != nil { logrus.Warnf("Unable to write benchmark file: %s", err) + return + } + if strings.HasPrefix(benchmarkFile, "gs://") { + logrus.Info("uploading to gcs") + if err := buildcontext.UploadToBucket(strings.NewReader(s), benchmarkFile); err != nil { + logrus.Infof("Unable to upload %s due to %v", benchmarkFile, err) + } + logrus.Infof("benchmark file written at %s", benchmarkFile) + } else { + f, err := os.Create(benchmarkFile) + if err != nil { + logrus.Warnf("Unable to create benchmarking file %s: %s", benchmarkFile, err) + return + } + defer f.Close() + f.WriteString(s) + logrus.Infof("benchmark file written at %s", benchmarkFile) } - f.WriteString(s) } }, } diff --git a/integration/benchmark_fs/Dockerfile_fs_benchmark b/integration/benchmark_fs/Dockerfile similarity index 100% rename from integration/benchmark_fs/Dockerfile_fs_benchmark rename to integration/benchmark_fs/Dockerfile diff --git a/integration/benchmark_fs/cloudbuild.yaml b/integration/benchmark_fs/cloudbuild.yaml new file mode 100644 index 000000000..9f2e7204e --- /dev/null +++ b/integration/benchmark_fs/cloudbuild.yaml @@ -0,0 +1,12 @@ +steps: +- name: 'gcr.io/kaniko-project/executor:perf-latest' + args: + - --build-arg=NUM=${_COUNT} + - --no-push + - --snapshotMode=redo + env: + - 'BENCHMARK_FILE=gs://tejal-test/redo_gcb/benchmark_file_${_COUNT}' + timeout: 2400s +timeout: 2400s +substitutions: + _COUNT: "10000" # default value \ No newline at end of file diff --git a/integration/benchmark_fs/make.sh b/integration/benchmark_fs/make.sh index 828f420ba..e336af448 100755 --- a/integration/benchmark_fs/make.sh +++ b/integration/benchmark_fs/make.sh @@ -17,7 +17,8 @@ mkdir /workdir i=1 -while [ $i -le $1 ] +targetCnt=$(( $1 + 0 )) +while [ $i -le $targetCnt ] do cat context.txt > /workdir/somefile$i i=$(( $i + 1 )) diff --git a/integration/benchmark_test.go b/integration/benchmark_test.go index 036e96beb..fe3286e35 100644 --- a/integration/benchmark_test.go +++ b/integration/benchmark_test.go @@ -21,6 +21,7 @@ import ( "fmt" "io/ioutil" "os" + "os/exec" "path/filepath" "strconv" "sync" @@ -32,6 +33,7 @@ type result struct { totalBuildTime float64 resolvingFiles float64 walkingFiles float64 + hashingFiles float64 } func TestSnapshotBenchmark(t *testing.T) { @@ -44,7 +46,7 @@ func TestSnapshotBenchmark(t *testing.T) { } contextDir := filepath.Join(cwd, "benchmark_fs") - nums := []int{10000, 50000, 100000, 200000, 300000, 500000, 700000, 800000} + nums := []int{10000, 50000, 100000, 200000, 300000, 500000, 700000} var timeMap sync.Map var wg sync.WaitGroup @@ -53,7 +55,7 @@ func TestSnapshotBenchmark(t *testing.T) { wg.Add(1) var err error go func(num int, err *error) { - dockerfile := "Dockerfile_fs_benchmark" + dockerfile := "Dockerfile" kanikoImage := fmt.Sprintf("%s_%d", GetKanikoImage(config.imageRepo, dockerfile), num) buildArgs := []string{"--build-arg", fmt.Sprintf("NUM=%d", num)} var benchmarkDir string @@ -106,5 +108,71 @@ func newResult(t *testing.T, f string) result { if c, ok := current["Total Build Time"]; ok { r.totalBuildTime = c.Seconds() } + if c, ok := current["Hashing files"]; ok { + r.hashingFiles = c.Seconds() + } + fmt.Println(r) return r } + +func TestSnapshotBenchmarkGcloud(t *testing.T) { + if b, err := strconv.ParseBool(os.Getenv("BENCHMARK")); err != nil || !b { + t.SkipNow() + } + cwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + contextDir := filepath.Join(cwd, "benchmark_fs") + + nums := []int{10000, 50000, 100000, 200000, 300000, 500000, 700000} + + var wg sync.WaitGroup + fmt.Println("Number of Files,Total Build Time,Walking Filesystem, Resolving Files") + for _, num := range nums { + t.Run(fmt.Sprintf("test_benchmark_%d", num), func(t *testing.T) { + wg.Add(1) + var err error + go func(num int, err error) { + dir, err := runInGcloud(contextDir, num) + if err != nil { + return + } + r := newResult(t, filepath.Join(dir, "results")) + fmt.Println(fmt.Sprintf("%d,%f,%f,%f, %f", num, r.totalBuildTime, r.walkingFiles, r.resolvingFiles, r.hashingFiles)) + wg.Done() + defer os.Remove(dir) + defer os.Chdir(cwd) + }(num, err) + if err != nil { + t.Errorf("could not run benchmark results for num %d due to %s", num, err) + } + }) + } + wg.Wait() +} + +func runInGcloud(dir string, num int) (string, error) { + os.Chdir(dir) + cmd := exec.Command("gcloud", "builds", + "submit", "--config=cloudbuild.yaml", + fmt.Sprintf("--substitutions=_COUNT=%d", num)) + _, err := RunCommandWithoutTest(cmd) + if err != nil { + return "", err + } + + // grab gcs and to temp dir and return + tmpDir, err := ioutil.TempDir("", fmt.Sprintf("%d", num)) + if err != nil { + return "", err + } + src := fmt.Sprintf("gs://tejal-test/redo_gcb/benchmark_file_%d", num) + dest := filepath.Join(tmpDir, "results") + copyCommand := exec.Command("gsutil", "cp", src, dest) + _, err = RunCommandWithoutTest(copyCommand) + if err != nil { + return "", fmt.Errorf("failed to download file to GCS bucket %s: %s", src, err) + } + return tmpDir, nil +} diff --git a/pkg/buildcontext/gcs.go b/pkg/buildcontext/gcs.go index 920028b62..f4e98920d 100644 --- a/pkg/buildcontext/gcs.go +++ b/pkg/buildcontext/gcs.go @@ -17,8 +17,10 @@ limitations under the License. package buildcontext import ( + "io" "os" "path/filepath" + "strings" "cloud.google.com/go/storage" "github.com/GoogleContainerTools/kaniko/pkg/constants" @@ -37,6 +39,25 @@ func (g *GCS) UnpackTarFromBuildContext() (string, error) { return constants.BuildContextDir, unpackTarFromGCSBucket(bucket, item, constants.BuildContextDir) } +func UploadToBucket(r io.Reader, dest string) error { + ctx := context.Background() + context := strings.SplitAfter(dest, "://")[1] + bucketName, item := util.GetBucketAndItem(context) + client, err := storage.NewClient(ctx) + if err != nil { + return err + } + bucket := client.Bucket(bucketName) + w := bucket.Object(item).NewWriter(ctx) + if _, err := io.Copy(w, r); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + return nil +} + // unpackTarFromGCSBucket unpacks the context.tar.gz file in the given bucket to the given directory func unpackTarFromGCSBucket(bucketName, item, directory string) error { // Get the tar from the bucket diff --git a/pkg/filesystem/resolve.go b/pkg/filesystem/resolve.go index 64121146a..30a79dc05 100644 --- a/pkg/filesystem/resolve.go +++ b/pkg/filesystem/resolve.go @@ -35,7 +35,6 @@ import ( // output set. // * Add all ancestors of each path to the output set. func ResolvePaths(paths []string, wl []util.IgnoreListEntry) (pathsToAdd []string, err error) { - logrus.Infof("Resolving %d paths", len(paths)) logrus.Tracef("Resolving paths %s", paths) fileSet := make(map[string]bool) From 66c82666279e34eaa6b23b8c99873ff87b7c62d5 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Fri, 5 Jun 2020 13:58:30 -0700 Subject: [PATCH 23/26] remove private buckets --- integration/benchmark_fs/cloudbuild.yaml | 2 +- integration/benchmark_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/integration/benchmark_fs/cloudbuild.yaml b/integration/benchmark_fs/cloudbuild.yaml index 9f2e7204e..01a83440b 100644 --- a/integration/benchmark_fs/cloudbuild.yaml +++ b/integration/benchmark_fs/cloudbuild.yaml @@ -5,7 +5,7 @@ steps: - --no-push - --snapshotMode=redo env: - - 'BENCHMARK_FILE=gs://tejal-test/redo_gcb/benchmark_file_${_COUNT}' + - 'BENCHMARK_FILE=gs://$PROJECT_ID/gcb/benchmark_file_${_COUNT}' timeout: 2400s timeout: 2400s substitutions: diff --git a/integration/benchmark_test.go b/integration/benchmark_test.go index fe3286e35..8503315a1 100644 --- a/integration/benchmark_test.go +++ b/integration/benchmark_test.go @@ -167,7 +167,7 @@ func runInGcloud(dir string, num int) (string, error) { if err != nil { return "", err } - src := fmt.Sprintf("gs://tejal-test/redo_gcb/benchmark_file_%d", num) + src := fmt.Sprintf("%s/gcb/benchmark_file_%d", config.gcsBucket, num) dest := filepath.Join(tmpDir, "results") copyCommand := exec.Command("gsutil", "cp", src, dest) _, err = RunCommandWithoutTest(copyCommand) From 95a8ecc200385f77b1fd11ba59af482c4c2318b8 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Fri, 5 Jun 2020 14:58:36 -0700 Subject: [PATCH 24/26] fix lint --- integration/benchmark_fs/Dockerfile | 13 +++++++++++++ integration/benchmark_test.go | 3 ++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/integration/benchmark_fs/Dockerfile b/integration/benchmark_fs/Dockerfile index 4065467c4..9a70889f1 100644 --- a/integration/benchmark_fs/Dockerfile +++ b/integration/benchmark_fs/Dockerfile @@ -1,3 +1,16 @@ +# Copyright 2020 Google, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. FROM bash:4.4 ARG NUM diff --git a/integration/benchmark_test.go b/integration/benchmark_test.go index 8503315a1..7c09f6438 100644 --- a/integration/benchmark_test.go +++ b/integration/benchmark_test.go @@ -134,7 +134,8 @@ func TestSnapshotBenchmarkGcloud(t *testing.T) { wg.Add(1) var err error go func(num int, err error) { - dir, err := runInGcloud(contextDir, num) + var dir string + dir, err = runInGcloud(contextDir, num) if err != nil { return } From ba00c9fb7d373b386742341b25a4a246fcb6433b Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Fri, 5 Jun 2020 15:10:28 -0700 Subject: [PATCH 25/26] add benchmarking for gcb --- DEVELOPMENT.md | 15 +++++++++++++++ integration/benchmark_fs/cloudbuild.yaml | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 18b69b3e3..330d9ba56 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -166,6 +166,21 @@ Additionally, the integration tests can output benchmarking information to a `be BENCHMARK=true go test -v --bucket $GCS_BUCKET --repo $IMAGE_REPO ``` +#### Benchmarking your GCB runs +If you are GCB builds are slow, you can check which phases in kaniko are bottlenecks or taking more time. +To do this, add "BENCHMARK_ENV" to your cloudbuild.yaml like this. +```shell script +steps: +- name: 'gcr.io/kaniko-project/executor:latest' + args: + - --build-arg=NUM=${_COUNT} + - --no-push + - --snapshotMode=redo + env: + - 'BENCHMARK_FILE=gs://$PROJECT_ID/gcb/benchmark_file' +``` +You can download the file `gs://$PROJECT_ID/gcb/benchmark_file` using `gsutil cp` command. + ## Creating a PR When you have changes you would like to propose to kaniko, you will need to: diff --git a/integration/benchmark_fs/cloudbuild.yaml b/integration/benchmark_fs/cloudbuild.yaml index 01a83440b..829fb946b 100644 --- a/integration/benchmark_fs/cloudbuild.yaml +++ b/integration/benchmark_fs/cloudbuild.yaml @@ -1,5 +1,5 @@ steps: -- name: 'gcr.io/kaniko-project/executor:perf-latest' +- name: 'gcr.io/kaniko-project/executor:latest' args: - --build-arg=NUM=${_COUNT} - --no-push From 57818cfb7997fea58a7c4c502116326f90c50146 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Fri, 5 Jun 2020 16:07:32 -0700 Subject: [PATCH 26/26] lint --- integration/benchmark_test.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/integration/benchmark_test.go b/integration/benchmark_test.go index 7c09f6438..f7364114d 100644 --- a/integration/benchmark_test.go +++ b/integration/benchmark_test.go @@ -132,11 +132,10 @@ func TestSnapshotBenchmarkGcloud(t *testing.T) { for _, num := range nums { t.Run(fmt.Sprintf("test_benchmark_%d", num), func(t *testing.T) { wg.Add(1) - var err error - go func(num int, err error) { - var dir string - dir, err = runInGcloud(contextDir, num) + go func(num int) { + dir, err := runInGcloud(contextDir, num) if err != nil { + t.Errorf("error when running in gcloud %v", err) return } r := newResult(t, filepath.Join(dir, "results")) @@ -144,10 +143,7 @@ func TestSnapshotBenchmarkGcloud(t *testing.T) { wg.Done() defer os.Remove(dir) defer os.Chdir(cwd) - }(num, err) - if err != nil { - t.Errorf("could not run benchmark results for num %d due to %s", num, err) - } + }(num) }) } wg.Wait()