fix: Refactor `LayersMap` to correct old strange code behavior (#2066)

* fix: Correct flatten function in layers

- Added a test.
- Cache current image, track deletes in `whiteouts` as well as normal adds in `layers`.
- Fix ugly delete behavior of `layerHashCache`.
  Delete it when crerating a new snapshot.
- Slight cleanup in `snapshot.go`.
- Format ugly `WalkFS` function.

* fix: Add symbolic link changes  to Hasher and CacheHasher

* fix: Better log messages

* fix(ci): Integration tests

* fix(ci): Add `--no-cache` to docker builds

* fix(ci): Pass credentials for error integration test

* np: Missing .gitignore in `hack`

* np: Capitalize every log message

- Correct some linting.

* fix: Key function

- Merge only last layer onto `currentImage`.

* fix: Remove old obsolete `cacheHasher`
This commit is contained in:
Gabriel Nützi 2022-05-18 02:30:58 +02:00 committed by GitHub
parent 28432d3c84
commit 323e616a67
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
52 changed files with 475 additions and 275 deletions

View File

@ -74,7 +74,7 @@ The helper script to install and run lint is placed here at the root of project.
```shell
./hack/linter.sh
```
```
To fix any `gofmt` issues, you can simply run `gofmt` with `-w` flag like this
@ -99,6 +99,8 @@ To run integration tests with your GCloud Storage, you will also need the follow
* A bucket in [GCS](https://cloud.google.com/storage/) which you have write access to via
the user currently logged into `gcloud`
* An image repo which you have write access to via the user currently logged into `gcloud`
* A docker account and a `~/.docker/config.json` with login credentials if you run
into rate limiting problems during tests.
Once this step done, you must override the project using environment variables:
@ -162,9 +164,9 @@ These tests will be kicked off by [reviewers](#reviews) for submitted PRs using
### Benchmarking
The goal is for Kaniko to be at least as fast at building Dockerfiles as Docker is, and to that end, we've built
The goal is for Kaniko to be at least as fast at building Dockerfiles as Docker is, and to that end, we've built
in benchmarking to check the speed of not only each full run, but also how long each step of each run takes. To turn
on benchmarking, just set the `BENCHMARK_FILE` environment variable, and kaniko will output all the benchmark info
on benchmarking, just set the `BENCHMARK_FILE` environment variable, and kaniko will output all the benchmark info
of each run to that file location.
```shell
@ -174,7 +176,7 @@ gcr.io/kaniko-project/executor:latest \
--dockerfile=<path to Dockerfile> --context=/workspace \
--destination=gcr.io/my-repo/my-image
```
Additionally, the integration tests can output benchmarking information to a `benchmarks` directory under the
Additionally, the integration tests can output benchmarking information to a `benchmarks` directory under the
`integration` directory if the `BENCHMARK` environment variable is set to `true.`
```shell

View File

@ -128,7 +128,7 @@ var RootCmd = &cobra.Command{
if !force {
exit(errors.New("kaniko should only be run inside of a container, run with the --force flag if you are sure you want to continue"))
}
logrus.Warn("kaniko is being run outside of a container. This can have dangerous effects on your system")
logrus.Warn("Kaniko is being run outside of a container. This can have dangerous effects on your system")
}
if !opts.NoPush || opts.CacheRepo != "" {
if err := executor.CheckPushPermissions(opts); err != nil {
@ -158,11 +158,11 @@ var RootCmd = &cobra.Command{
return
}
if strings.HasPrefix(benchmarkFile, "gs://") {
logrus.Info("uploading to gcs")
logrus.Info("Uploading to gcs")
if err := buildcontext.UploadToBucket(strings.NewReader(s), benchmarkFile); err != nil {
logrus.Infof("Unable to upload %s due to %v", benchmarkFile, err)
}
logrus.Infof("benchmark file written at %s", benchmarkFile)
logrus.Infof("Benchmark file written at %s", benchmarkFile)
} else {
f, err := os.Create(benchmarkFile)
if err != nil {
@ -171,7 +171,7 @@ var RootCmd = &cobra.Command{
}
defer f.Close()
f.WriteString(s)
logrus.Infof("benchmark file written at %s", benchmarkFile)
logrus.Infof("Benchmark file written at %s", benchmarkFile)
}
}
},

1
hack/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
bin

2
integration/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
cache
config.json

View File

@ -169,7 +169,7 @@ func runInGcloud(dir string, num int) (string, error) {
copyCommand := exec.Command("gsutil", "cp", src, dest)
_, err = RunCommandWithoutTest(copyCommand)
if err != nil {
return "", fmt.Errorf("failed to download file to GCS bucket %s: %s", src, err)
return "", fmt.Errorf("failed to download file to GCS bucket %s: %w", src, err)
}
return tmpDir, nil
}

View File

@ -31,17 +31,17 @@ func CreateIntegrationTarball() (string, error) {
log.Println("Creating tarball of integration test files to use as build context")
dir, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("Failed find path to integration dir: %s", err)
return "", fmt.Errorf("Failed find path to integration dir: %w", err)
}
tempDir, err := ioutil.TempDir("", "")
if err != nil {
return "", fmt.Errorf("Failed to create temporary directory to hold tarball: %s", err)
return "", fmt.Errorf("Failed to create temporary directory to hold tarball: %w", err)
}
contextFile := fmt.Sprintf("%s/context_%d.tar.gz", tempDir, time.Now().UnixNano())
cmd := exec.Command("tar", "-C", dir, "-zcvf", contextFile, ".")
_, err = RunCommandWithoutTest(cmd)
if err != nil {
return "", fmt.Errorf("Failed to create build context tarball from integration dir: %s", err)
return "", fmt.Errorf("Failed to create build context tarball from integration dir: %w", err)
}
return contextFile, err
}
@ -57,7 +57,7 @@ func UploadFileToBucket(gcsBucket string, filePath string, gcsPath string) (stri
if err != nil {
log.Printf("Error uploading file %s to GCS at %s: %s", filePath, dst, err)
log.Println(string(out))
return "", fmt.Errorf("Failed to copy tarball to GCS bucket %s: %s", gcsBucket, err)
return "", fmt.Errorf("Failed to copy tarball to GCS bucket %s: %w", gcsBucket, err)
}
return dst, nil
@ -69,7 +69,7 @@ func DeleteFromBucket(path string) error {
cmd := exec.Command("gsutil", "rm", path)
_, err := RunCommandWithoutTest(cmd)
if err != nil {
return fmt.Errorf("Failed to delete file %s from GCS: %s", path, err)
return fmt.Errorf("Failed to delete file %s from GCS: %w", path, err)
}
return err
}

View File

@ -31,6 +31,7 @@ import (
"time"
"github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
)
const (
@ -82,7 +83,7 @@ var additionalKanikoFlagsMap = map[string][]string{
"Dockerfile_test_scratch": {"--single-snapshot"},
"Dockerfile_test_maintainer": {"--single-snapshot"},
"Dockerfile_test_target": {"--target=second"},
"Dockerfile_test_snapshotter_ignorelist": {"--use-new-run=true", "-v=debug"},
"Dockerfile_test_snapshotter_ignorelist": {"--use-new-run=true", "-v=trace"},
}
// output check to do when building with kaniko
@ -98,8 +99,8 @@ var outputChecks = map[string]func(string, []byte) error{
}
for _, s := range []string{
"resolved symlink /hello to /dev/null",
"path /dev/null is ignored, ignoring it",
"Resolved symlink /hello to /dev/null",
"Path /dev/null is ignored, ignoring it",
} {
if !strings.Contains(string(out), s) {
return fmt.Errorf("output must contain %s", s)
@ -162,7 +163,7 @@ func GetVersionedKanikoImage(imageRepo, dockerfile string, version int) string {
func FindDockerFiles(dockerfilesPath string) ([]string, error) {
allDockerfiles, err := filepath.Glob(path.Join(dockerfilesPath, "Dockerfile_test*"))
if err != nil {
return []string{}, fmt.Errorf("Failed to find docker files at %s: %s", dockerfilesPath, err)
return []string{}, fmt.Errorf("Failed to find docker files at %s: %w", dockerfilesPath, err)
}
var dockerfiles []string
@ -218,7 +219,15 @@ func addServiceAccountFlags(flags []string, serviceAccount string) []string {
"GOOGLE_APPLICATION_CREDENTIALS=/secret/"+filepath.Base(serviceAccount),
"-v", filepath.Dir(serviceAccount)+":/secret/")
} else {
flags = append(flags, "-v", os.Getenv("HOME")+"/.config/gcloud:/root/.config/gcloud")
gcloudConfig := os.Getenv("HOME") + "/.config/gcloud"
if util.FilepathExists(gcloudConfig) {
flags = append(flags, "-v", gcloudConfig+":/root/.config/gcloud")
}
dockerConfig := os.Getenv("HOME") + "/.docker/config.json"
if util.FilepathExists(dockerConfig) {
flags = append(flags, "-v", dockerConfig+":/root/.docker/config.json", "-e", "DOCKER_CONFIG=/root/.docker")
}
}
return flags
}
@ -238,6 +247,7 @@ func (d *DockerFileBuilder) BuildDockerImage(t *testing.T, imageRepo, dockerfile
dockerArgs := []string{
"build",
"--no-cache",
"-t", dockerImage,
}
@ -255,7 +265,7 @@ func (d *DockerFileBuilder) BuildDockerImage(t *testing.T, imageRepo, dockerfile
out, err := RunCommandWithoutTest(dockerCmd)
if err != nil {
return fmt.Errorf("Failed to build image %s with docker command \"%s\": %s %s", dockerImage, dockerCmd.Args, err, string(out))
return fmt.Errorf("Failed to build image %s with docker command \"%s\": %w %s", dockerImage, dockerCmd.Args, err, string(out))
}
t.Logf("Build image for Dockerfile %s as %s. docker build output: %s \n", dockerfile, dockerImage, out)
return nil
@ -333,7 +343,7 @@ func populateVolumeCache() error {
)
if _, err := RunCommandWithoutTest(warmerCmd); err != nil {
return fmt.Errorf("Failed to warm kaniko cache: %s", err)
return fmt.Errorf("Failed to warm kaniko cache: %w", err)
}
return nil
@ -373,7 +383,7 @@ func (d *DockerFileBuilder) buildCachedImages(config *integrationTestConfig, cac
_, err := RunCommandWithoutTest(kanikoCmd)
if err != nil {
return fmt.Errorf("Failed to build cached image %s with kaniko command \"%s\": %s", kanikoImage, kanikoCmd.Args, err)
return fmt.Errorf("Failed to build cached image %s with kaniko command \"%s\": %w", kanikoImage, kanikoCmd.Args, err)
}
}
return nil
@ -399,7 +409,7 @@ func (d *DockerFileBuilder) buildRelativePathsImage(imageRepo, dockerfile, servi
out, err := RunCommandWithoutTest(dockerCmd)
timing.DefaultRun.Stop(timer)
if err != nil {
return fmt.Errorf("Failed to build image %s with docker command \"%s\": %s %s", dockerImage, dockerCmd.Args, err, string(out))
return fmt.Errorf("Failed to build image %s with docker command \"%s\": %w %s", dockerImage, dockerCmd.Args, err, string(out))
}
dockerRunFlags := []string{"run", "--net=host", "-v", cwd + ":/workspace"}
@ -417,7 +427,7 @@ func (d *DockerFileBuilder) buildRelativePathsImage(imageRepo, dockerfile, servi
if err != nil {
return fmt.Errorf(
"Failed to build relative path image %s with kaniko command \"%s\": %s\n%s",
"Failed to build relative path image %s with kaniko command \"%s\": %w\n%s",
kanikoImage, kanikoCmd.Args, err, string(out))
}
@ -485,11 +495,11 @@ func buildKanikoImage(
out, err := RunCommandWithoutTest(kanikoCmd)
if err != nil {
return "", fmt.Errorf("Failed to build image %s with kaniko command \"%s\": %s\n%s", kanikoImage, kanikoCmd.Args, err, string(out))
return "", fmt.Errorf("Failed to build image %s with kaniko command \"%s\": %w\n%s", kanikoImage, kanikoCmd.Args, err, string(out))
}
if outputCheck := outputChecks[dockerfile]; outputCheck != nil {
if err := outputCheck(dockerfile, out); err != nil {
return "", fmt.Errorf("Output check failed for image %s with kaniko command : %s\n%s", kanikoImage, err, string(out))
return "", fmt.Errorf("Output check failed for image %s with kaniko command : %w\n%s", kanikoImage, err, string(out))
}
}
return benchmarkDir, nil

View File

@ -496,14 +496,16 @@ func TestLayers(t *testing.T) {
offset := map[string]int{
"Dockerfile_test_add": 12,
"Dockerfile_test_scratch": 3,
// TODO: tejaldesai fix this!
"Dockerfile_test_meta_arg": 1,
"Dockerfile_test_copy_same_file_many_times": 47,
"Dockerfile_test_arg_multi_with_quotes": 1,
"Dockerfile_test_arg_multi": 1,
"Dockerfile_test_arg_blank_with_quotes": 1,
}
if os.Getenv("CI") == "true" {
// TODO: tejaldesai fix this!
// This files build locally with difference 0, on CI docker
// produces a different amount of layers (?).
offset["Dockerfile_test_copy_same_file_many_times"] = 47
offset["Dockerfile_test_meta_arg"] = 1
}
for _, dockerfile := range allDockerfiles {
t.Run("test_layer_"+dockerfile, func(t *testing.T) {
dockerfileTest := dockerfile
@ -531,6 +533,8 @@ func TestLayers(t *testing.T) {
}
func buildImage(t *testing.T, dockerfile string, imageBuilder *DockerFileBuilder) {
t.Logf("Building image '%v'...", dockerfile)
if err := imageBuilder.BuildImage(t, config, dockerfilesPath, dockerfile); err != nil {
t.Errorf("Error building image: %s", err)
t.FailNow()
@ -619,14 +623,15 @@ func TestExitCodePropagation(t *testing.T) {
t.Run("test error code propagation", func(t *testing.T) {
// building the image with docker should fail with exit code 42
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_exit_code_propagation")
dockerCmd := exec.Command("docker",
append([]string{"build",
"-t", dockerImage,
"-f", dockerfile,
context})...)
_, kanikoErr := RunCommandWithoutTest(dockerCmd)
dockerFlags := []string{
"build",
"-t", dockerImage,
"-f", dockerfile}
dockerCmd := exec.Command("docker", append(dockerFlags, context)...)
out, kanikoErr := RunCommandWithoutTest(dockerCmd)
if kanikoErr == nil {
t.Fatal("docker build did not produce an error")
t.Fatalf("docker build did not produce an error:\n%s", out)
}
var dockerCmdExitErr *exec.ExitError
var dockerExitCode int
@ -634,32 +639,43 @@ func TestExitCodePropagation(t *testing.T) {
if errors.As(kanikoErr, &dockerCmdExitErr) {
dockerExitCode = dockerCmdExitErr.ExitCode()
testutil.CheckDeepEqual(t, 42, dockerExitCode)
if t.Failed() {
t.Fatalf("Output was:\n%s", out)
}
} else {
t.Fatalf("did not produce the expected error")
t.Fatalf("did not produce the expected error:\n%s", out)
}
//try to build the same image with kaniko the error code should match with the one from the plain docker build
contextVolume := fmt.Sprintf("%s:/workspace", context)
dockerCmdWithKaniko := exec.Command("docker", append([]string{
dockerFlags = []string{
"run",
"-v", contextVolume,
ExecutorImage,
}
dockerFlags = addServiceAccountFlags(dockerFlags, "")
dockerFlags = append(dockerFlags, ExecutorImage,
"-c", "dir:///workspace/",
"-f", "./Dockerfile_exit_code_propagation",
"--no-push",
"--force", // TODO: detection of whether kaniko is being run inside a container might be broken?
})...)
)
_, kanikoErr = RunCommandWithoutTest(dockerCmdWithKaniko)
dockerCmdWithKaniko := exec.Command("docker", dockerFlags...)
out, kanikoErr = RunCommandWithoutTest(dockerCmdWithKaniko)
if kanikoErr == nil {
t.Fatal("the kaniko build did not produce the expected error")
t.Fatalf("the kaniko build did not produce the expected error:\n%s", out)
}
var kanikoExitErr *exec.ExitError
if errors.As(kanikoErr, &kanikoExitErr) {
testutil.CheckDeepEqual(t, dockerExitCode, kanikoExitErr.ExitCode())
if t.Failed() {
t.Fatalf("Output was:\n%s", out)
}
} else {
t.Fatalf("did not produce the expected error")
t.Fatalf("did not produce the expected error:\n%s", out)
}
})
}
@ -798,19 +814,19 @@ func checkLayers(t *testing.T, image1, image2 string, offset int) {
func getImageDetails(image string) (*imageDetails, error) {
ref, err := name.ParseReference(image, name.WeakValidation)
if err != nil {
return nil, fmt.Errorf("Couldn't parse referance to image %s: %s", image, err)
return nil, fmt.Errorf("Couldn't parse referance to image %s: %w", image, err)
}
imgRef, err := daemon.Image(ref)
if err != nil {
return nil, fmt.Errorf("Couldn't get reference to image %s from daemon: %s", image, err)
return nil, fmt.Errorf("Couldn't get reference to image %s from daemon: %w", image, err)
}
layers, err := imgRef.Layers()
if err != nil {
return nil, fmt.Errorf("Error getting layers for image %s: %s", image, err)
return nil, fmt.Errorf("Error getting layers for image %s: %w", image, err)
}
digest, err := imgRef.Digest()
if err != nil {
return nil, fmt.Errorf("Error getting digest for image %s: %s", image, err)
return nil, fmt.Errorf("Error getting digest for image %s: %w", image, err)
}
return &imageDetails{
name: image,

View File

@ -191,7 +191,7 @@ func (cr *CachingCopyCommand) ExecuteCommand(config *v1.Config, buildArgs *docke
cr.layer = layers[0]
cr.extractedFiles, err = util.GetFSFromLayers(kConfig.RootDir, layers, util.ExtractFunc(cr.extractFn), util.IncludeWhiteout())
logrus.Debugf("extractedFiles: %s", cr.extractedFiles)
logrus.Debugf("ExtractedFiles: %s", cr.extractedFiles)
if err != nil {
return errors.Wrap(err, "extracting fs from image")
}

View File

@ -58,7 +58,7 @@ func setupTestTemp(t *testing.T) string {
srcPath, err := filepath.Abs("../../integration/context")
if err != nil {
logrus.Fatalf("error getting abs path %s", srcPath)
logrus.Fatalf("Error getting abs path %s", srcPath)
}
cperr := filepath.Walk(srcPath,
func(path string, info os.FileInfo, err error) error {
@ -98,7 +98,7 @@ func setupTestTemp(t *testing.T) string {
return nil
})
if cperr != nil {
logrus.Fatalf("error populating temp dir %s", cperr)
logrus.Fatalf("Error populating temp dir %s", cperr)
}
return tempDir
@ -301,7 +301,7 @@ func TestCopyExecuteCmd(t *testing.T) {
t.Error()
}
for _, file := range files {
logrus.Debugf("file: %v", file.Name())
logrus.Debugf("File: %v", file.Name())
dirList = append(dirList, file.Name())
}
} else {

View File

@ -34,7 +34,7 @@ type ExposeCommand struct {
}
func (r *ExposeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: EXPOSE")
logrus.Info("Cmd: EXPOSE")
// Grab the currently exposed ports
existingPorts := config.ExposedPorts
if existingPorts == nil {

View File

@ -30,8 +30,8 @@ type OnBuildCommand struct {
//ExecuteCommand adds the specified expression in Onbuild to the config
func (o *OnBuildCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: ONBUILD")
logrus.Infof("args: %s", o.cmd.Expression)
logrus.Info("Cmd: ONBUILD")
logrus.Infof("Args: %s", o.cmd.Expression)
if config.OnBuild == nil {
config.OnBuild = []string{o.cmd.Expression}
} else {

View File

@ -80,8 +80,8 @@ func runCommandInExec(config *v1.Config, buildArgs *dockerfile.BuildArgs, cmdRun
}
}
logrus.Infof("cmd: %s", newCommand[0])
logrus.Infof("args: %s", newCommand[1:])
logrus.Infof("Cmd: %s", newCommand[0])
logrus.Infof("Args: %s", newCommand[1:])
cmd := exec.Command(newCommand[0], newCommand[1:]...)

View File

@ -34,14 +34,14 @@ type RunMarkerCommand struct {
func (r *RunMarkerCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
// run command `touch filemarker`
logrus.Debugf("using new RunMarker command")
logrus.Debugf("Using new RunMarker command")
prevFilesMap, _ := util.GetFSInfoMap("/", map[string]os.FileInfo{})
if err := runCommandInExec(config, buildArgs, r.cmd); err != nil {
return err
}
_, r.Files = util.GetFSInfoMap("/", prevFilesMap)
logrus.Debugf("files changed %s", r.Files)
logrus.Debugf("Files changed %s", r.Files)
return nil
}

View File

@ -32,7 +32,7 @@ type StopSignalCommand struct {
// ExecuteCommand handles command processing similar to CMD and RUN,
func (s *StopSignalCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: STOPSIGNAL")
logrus.Info("Cmd: STOPSIGNAL")
// resolve possible environment variables
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)

View File

@ -39,7 +39,7 @@ type UserCommand struct {
}
func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: USER")
logrus.Info("Cmd: USER")
u := r.cmd.User
userAndGroup := strings.Split(u, ":")
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)

View File

@ -34,7 +34,7 @@ type VolumeCommand struct {
}
func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: VOLUME")
logrus.Info("Cmd: VOLUME")
volumes := v.cmd.Volumes
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
resolvedVolumes, err := util.ResolveEnvironmentReplacementList(volumes, replacementEnvs, true)

View File

@ -38,7 +38,7 @@ type WorkdirCommand struct {
var mkdir = os.MkdirAll
func (w *WorkdirCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: workdir")
logrus.Info("Cmd: workdir")
workdirPath := w.cmd.Path
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
resolvedWorkingDir, err := util.ResolveEnvironmentReplacement(workdirPath, replacementEnvs, true)

View File

@ -35,7 +35,7 @@ func (b *multiArg) String() string {
// The second method is Set(value string) error
func (b *multiArg) Set(value string) error {
logrus.Debugf("appending to multi args %s", value)
logrus.Debugf("Appending to multi args %s", value)
*b = append(*b, value)
return nil
}

View File

@ -176,7 +176,7 @@ func extractValFromQuotes(val string) (string, error) {
}
if leader != tail {
logrus.Infof("leader %s tail %s", leader, tail)
logrus.Infof("Leader %s tail %s", leader, tail)
return "", errors.New("quotes wrapping arg values must be matched")
}

View File

@ -108,7 +108,7 @@ func newStageBuilder(args *dockerfile.BuildArgs, opts *config.KanikoOptions, sta
if err != nil {
return nil, err
}
l := snapshot.NewLayeredMap(hasher, util.CacheHasher())
l := snapshot.NewLayeredMap(hasher)
snapshotter := snapshot.NewSnapshotter(l, config.RootDir)
digest, err := sourceImage.Digest()
@ -222,7 +222,7 @@ func (s *stageBuilder) populateCopyCmdCompositeKey(command fmt.Stringer, from st
ds := digest
cacheKey, ok := s.digestToCacheKey[ds]
if ok {
logrus.Debugf("adding digest %v from previous stage to composite key for %v", ds, command.String())
logrus.Debugf("Adding digest %v from previous stage to composite key for %v", ds, command.String())
compositeKey.AddKey(cacheKey)
}
}
@ -260,13 +260,13 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro
return err
}
logrus.Debugf("optimize: composite key for command %v %v", command.String(), compositeKey)
logrus.Debugf("Optimize: composite key for command %v %v", command.String(), compositeKey)
ck, err := compositeKey.Hash()
if err != nil {
return errors.Wrap(err, "failed to hash composite key")
}
logrus.Debugf("optimize: cache key for command %v %v", command.String(), ck)
logrus.Debugf("Optimize: cache key for command %v %v", command.String(), ck)
s.finalCacheKey = ck
if command.ShouldCacheOutput() && !stopCache {
@ -395,7 +395,7 @@ func (s *stageBuilder) build() error {
timing.DefaultRun.Stop(t)
if !s.shouldTakeSnapshot(index, command.MetadataOnly()) && !s.opts.ForceBuildMetadata {
logrus.Debugf("build: skipping snapshot for [%v]", command.String())
logrus.Debugf("Build: skipping snapshot for [%v]", command.String())
continue
}
if isCacheCommand {
@ -411,13 +411,13 @@ func (s *stageBuilder) build() error {
}
if s.opts.Cache {
logrus.Debugf("build: composite key for command %v %v", command.String(), compositeKey)
logrus.Debugf("Build: composite key for command %v %v", command.String(), compositeKey)
ck, err := compositeKey.Hash()
if err != nil {
return errors.Wrap(err, "failed to hash composite key")
}
logrus.Debugf("build: cache key for command %v %v", command.String(), ck)
logrus.Debugf("Build: cache key for command %v %v", command.String(), ck)
// Push layer to cache (in parallel) now along with new config file
if command.ShouldCacheOutput() && !s.opts.NoPushCache {
@ -433,7 +433,7 @@ func (s *stageBuilder) build() error {
}
if err := cacheGroup.Wait(); err != nil {
logrus.Warnf("error uploading layer to cache: %s", err)
logrus.Warnf("Error uploading layer to cache: %s", err)
}
return nil
@ -623,7 +623,17 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
for index, stage := range kanikoStages {
sb, err := newStageBuilder(args, opts, stage, crossStageDependencies, digestToCacheKey, stageIdxToDigest, stageNameToIdx, fileContext)
sb, err := newStageBuilder(
args, opts, stage,
crossStageDependencies,
digestToCacheKey,
stageIdxToDigest,
stageNameToIdx,
fileContext)
logrus.Infof("Building stage '%v' [idx: '%v', base-idx: '%v']",
stage.BaseName, stage.Index, stage.BaseImageIndex)
args = sb.args
if err != nil {
@ -662,10 +672,10 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
}
stageIdxToDigest[fmt.Sprintf("%d", sb.stage.Index)] = d.String()
logrus.Debugf("mapping stage idx %v to digest %v", sb.stage.Index, d.String())
logrus.Debugf("Mapping stage idx %v to digest %v", sb.stage.Index, d.String())
digestToCacheKey[d.String()] = sb.finalCacheKey
logrus.Debugf("mapping digest %v to cachekey %v", d.String(), sb.finalCacheKey)
logrus.Debugf("Mapping digest %v to cachekey %v", d.String(), sb.finalCacheKey)
if stage.Final {
sourceImage, err = mutate.CreatedAt(sourceImage, v1.Time{Time: time.Now()})
@ -817,7 +827,7 @@ func extractImageToDependencyDir(name string, image v1.Image) error {
if err := os.MkdirAll(dependencyDir, 0755); err != nil {
return err
}
logrus.Debugf("trying to extract to %s", dependencyDir)
logrus.Debugf("Trying to extract to %s", dependencyDir)
_, err := util.GetFSFromImage(dependencyDir, image, util.ExtractFile)
return err
}

View File

@ -126,7 +126,7 @@ func writeDigestFile(path string, digestByteArray []byte) error {
parentDir := filepath.Dir(path)
if _, err := os.Stat(parentDir); os.IsNotExist(err) {
if err := os.MkdirAll(parentDir, 0700); err != nil {
logrus.Debugf("error creating %s, %s", parentDir, err)
logrus.Debugf("Error creating %s, %s", parentDir, err)
return err
}
logrus.Tracef("Created directory %v", parentDir)

View File

@ -42,7 +42,7 @@ func ResolvePaths(paths []string, wl []util.IgnoreListEntry) (pathsToAdd []strin
for _, f := range paths {
// If the given path is part of the ignorelist ignore it
if util.IsInProvidedIgnoreList(f, wl) {
logrus.Debugf("path %s is in list to ignore, ignoring it", f)
logrus.Debugf("Path %s is in list to ignore, ignoring it", f)
continue
}
@ -52,7 +52,7 @@ func ResolvePaths(paths []string, wl []util.IgnoreListEntry) (pathsToAdd []strin
}
if f != link {
logrus.Tracef("updated link %s to %s", f, link)
logrus.Tracef("Updated link %s to %s", f, link)
}
if !fileSet[link] {
@ -67,21 +67,21 @@ func ResolvePaths(paths []string, wl []util.IgnoreListEntry) (pathsToAdd []strin
evaled, e = filepath.EvalSymlinks(f)
if e != nil {
if !os.IsNotExist(e) {
logrus.Errorf("couldn't eval %s with link %s", f, link)
logrus.Errorf("Couldn't eval %s with link %s", f, link)
return
}
logrus.Debugf("symlink path %s, target does not exist", f)
logrus.Tracef("Symlink path %s, target does not exist", f)
continue
}
if f != evaled {
logrus.Debugf("resolved symlink %s to %s", f, evaled)
logrus.Tracef("Resolved symlink %s to %s", f, evaled)
}
// If the given path is a symlink and the target is part of the ignorelist
// ignore the target
if util.CheckProvidedIgnoreList(evaled, wl) {
logrus.Debugf("path %s is ignored, ignoring it", evaled)
logrus.Debugf("Path %s is ignored, ignoring it", evaled)
continue
}

View File

@ -20,129 +20,179 @@ import (
"bytes"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
)
type LayeredMap struct {
layers []map[string]string
whiteouts []map[string]struct{}
adds []map[string]string // All layers with added files.
deletes []map[string]struct{} // All layers with deleted files.
currentImage map[string]string // All files and hashes in the current image (up to the last layer).
isCurrentImageValid bool // If the currentImage is not out-of-date.
layerHashCache map[string]string
hasher func(string) (string, error)
// cacheHasher doesn't include mtime in it's hash so that filesystem cache keys are stable
cacheHasher func(string) (string, error)
}
func NewLayeredMap(h func(string) (string, error), c func(string) (string, error)) *LayeredMap {
// NewLayeredMap creates a new layered map which keeps track of adds and deletes.
func NewLayeredMap(h func(string) (string, error)) *LayeredMap {
l := LayeredMap{
hasher: h,
cacheHasher: c,
hasher: h,
}
l.layers = []map[string]string{}
l.currentImage = map[string]string{}
l.layerHashCache = map[string]string{}
return &l
}
// Snapshot creates a new layer.
func (l *LayeredMap) Snapshot() {
l.whiteouts = append(l.whiteouts, map[string]struct{}{})
l.layers = append(l.layers, map[string]string{})
// Save current state of image
l.updateCurrentImage()
l.adds = append(l.adds, map[string]string{})
l.deletes = append(l.deletes, map[string]struct{}{})
l.layerHashCache = map[string]string{} // Erase the hash cache for this new layer.
}
// Key returns a hash for added files
// Key returns a hash for added and delted files.
func (l *LayeredMap) Key() (string, error) {
var adds map[string]string
var deletes map[string]struct{}
if len(l.adds) != 0 {
adds = l.adds[len(l.adds)-1]
deletes = l.deletes[len(l.deletes)-1]
}
c := bytes.NewBuffer([]byte{})
enc := json.NewEncoder(c)
enc.Encode(l.layers)
err := enc.Encode(adds)
if err != nil {
return "", err
}
err = enc.Encode(deletes)
if err != nil {
return "", err
}
return util.SHA256(c)
}
// getFlattenedPaths returns all existing paths in the current FS
func (l *LayeredMap) getFlattenedPaths() map[string]struct{} {
// getCurrentImage returns the current image by merging the latest
// adds and deletes on to the current image (if its not yet valid.)
func (l *LayeredMap) getCurrentImage() map[string]string {
if l.isCurrentImageValid || len(l.adds) == 0 {
// No layers yet or current image is valid.
return l.currentImage
}
current := map[string]string{}
// Copy current image paths/hashes.
for p, h := range l.currentImage {
current[p] = h
}
// Add the last layer on top.
addedFiles := l.adds[len(l.adds)-1]
deletedFiles := l.deletes[len(l.deletes)-1]
for add, hash := range addedFiles {
current[add] = hash
}
for del := range deletedFiles {
delete(current, del)
}
return current
}
// updateCurrentImage update the internal current image by merging the
// top adds and deletes onto the current image.
func (l *LayeredMap) updateCurrentImage() {
if l.isCurrentImageValid {
return
}
l.currentImage = l.getCurrentImage()
l.isCurrentImageValid = true
}
// get returns the current hash in the current image `l.currentImage`.
func (l *LayeredMap) get(s string) (string, bool) {
h, ok := l.currentImage[s]
return h, ok
}
// GetCurrentPaths returns all existing paths in the actual current image
// cached by FlattenLayers.
func (l *LayeredMap) GetCurrentPaths() map[string]struct{} {
current := l.getCurrentImage()
paths := map[string]struct{}{}
for _, l := range l.layers {
for p := range l {
basename := filepath.Base(p)
if strings.HasPrefix(basename, archive.WhiteoutPrefix) {
deletedFile := filepath.Join(filepath.Dir(p), strings.TrimPrefix(basename, archive.WhiteoutPrefix))
delete(paths, deletedFile)
} else {
paths[p] = struct{}{}
}
}
for f := range current {
paths[f] = struct{}{}
}
return paths
}
func (l *LayeredMap) Get(s string) (string, bool) {
for i := len(l.layers) - 1; i >= 0; i-- {
if v, ok := l.layers[i][s]; ok {
return v, ok
}
}
return "", false
// AddDelete will delete the specific files in the current layer.
func (l *LayeredMap) AddDelete(s string) error {
l.isCurrentImageValid = false
l.deletes[len(l.deletes)-1][s] = struct{}{}
return nil
}
func (l *LayeredMap) GetWhiteout(s string) bool {
for i := len(l.whiteouts) - 1; i >= 0; i-- {
if _, ok := l.whiteouts[i][s]; ok {
return ok
}
}
return false
}
func (l *LayeredMap) MaybeAddWhiteout(s string) bool {
ok := l.GetWhiteout(s)
if ok {
return false
}
l.whiteouts[len(l.whiteouts)-1][s] = struct{}{}
return true
}
// Add will add the specified file s to the layered map.
// Add will add the specified file s to the current layer.
func (l *LayeredMap) Add(s string) error {
l.isCurrentImageValid = false
// Use hash function and add to layers
newV, err := func(s string) (string, error) {
if v, ok := l.layerHashCache[s]; ok {
// clear it cache for next layer.
delete(l.layerHashCache, s)
return v, nil
}
return l.hasher(s)
}(s)
if err != nil {
return fmt.Errorf("error creating hash for %s: %v", s, err)
return fmt.Errorf("Error creating hash for %s: %w", s, err)
}
l.layers[len(l.layers)-1][s] = newV
l.adds[len(l.adds)-1][s] = newV
return nil
}
// CheckFileChange checks whether a given file changed
// CheckFileChange checks whether a given file (needs to exist) changed
// from the current layered map by its hashing function.
// If the file does not exist, an error is returned.
// Returns true if the file is changed.
func (l *LayeredMap) CheckFileChange(s string) (bool, error) {
t := timing.Start("Hashing files")
defer timing.DefaultRun.Stop(t)
newV, err := l.hasher(s)
if err != nil {
// if this file does not exist in the new layer return.
if os.IsNotExist(err) {
logrus.Tracef("%s detected as changed but does not exist", s)
return false, nil
}
return false, err
}
// Save hash to not recompute it when
// adding the file.
l.layerHashCache[s] = newV
oldV, ok := l.Get(s)
oldV, ok := l.get(s)
if ok && newV == oldV {
// File hash did not change => Unchanged.
return false, nil
}
// File does not exist in current image,
// or it did change => Changed.
return true, nil
}

View File

@ -61,8 +61,8 @@ func Test_CacheKey(t *testing.T) {
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
lm1 := LayeredMap{layers: []map[string]string{test.map1}}
lm2 := LayeredMap{layers: []map[string]string{test.map2}}
lm1 := LayeredMap{adds: []map[string]string{test.map1}, deletes: []map[string]struct{}{nil, nil}}
lm2 := LayeredMap{adds: []map[string]string{test.map2}, deletes: []map[string]struct{}{nil, nil}}
k1, err := lm1.Key()
if err != nil {
t.Fatalf("error getting key for map 1: %v", err)
@ -77,3 +77,67 @@ func Test_CacheKey(t *testing.T) {
})
}
}
func Test_FlattenPaths(t *testing.T) {
layers := []map[string]string{
{
"a": "2",
"b": "3",
},
{
"b": "5",
"c": "6",
},
{
"a": "8",
},
}
whiteouts := []map[string]struct{}{
{
"a": {}, // delete a
},
{
"b": {}, // delete b
},
{
"c": {}, // delete c
},
}
lm := LayeredMap{
adds: []map[string]string{layers[0]},
deletes: []map[string]struct{}{whiteouts[0]}}
paths := lm.GetCurrentPaths()
assertPath := func(f string, exists bool) {
_, ok := paths[f]
if exists && !ok {
t.Fatalf("expected path '%s' to be present.", f)
} else if !exists && ok {
t.Fatalf("expected path '%s' not to be present.", f)
}
}
assertPath("a", false)
assertPath("b", true)
lm = LayeredMap{
adds: []map[string]string{layers[0], layers[1]},
deletes: []map[string]struct{}{whiteouts[0], whiteouts[1]}}
paths = lm.GetCurrentPaths()
assertPath("a", false)
assertPath("b", false)
assertPath("c", true)
lm = LayeredMap{
adds: []map[string]string{layers[0], layers[1], layers[2]},
deletes: []map[string]struct{}{whiteouts[0], whiteouts[1], whiteouts[2]}}
paths = lm.GetCurrentPaths()
assertPath("a", true)
assertPath("b", false)
assertPath("c", false)
}

View File

@ -49,6 +49,7 @@ func NewSnapshotter(l *LayeredMap, d string) *Snapshotter {
// Init initializes a new snapshotter
func (s *Snapshotter) Init() error {
logrus.Info("Initializing snapshotter ...")
_, _, err := s.scanFullFilesystem()
return err
}
@ -75,41 +76,39 @@ func (s *Snapshotter) TakeSnapshot(files []string, shdCheckDelete bool, forceBui
filesToAdd, err := filesystem.ResolvePaths(files, s.ignorelist)
if err != nil {
return "", nil
return "", err
}
logrus.Info("Taking snapshot of files...")
logrus.Debugf("Taking snapshot of files %v", filesToAdd)
sort.Strings(filesToAdd)
logrus.Debugf("Adding to layer: %v", filesToAdd)
// Add files to the layered map
// Add files to current layer.
for _, file := range filesToAdd {
if err := s.l.Add(file); err != nil {
return "", fmt.Errorf("unable to add file %s to layered map: %s", file, err)
return "", fmt.Errorf("Unable to add file %s to layered map: %w", file, err)
}
}
// Get whiteout paths
filesToWhiteout := []string{}
var filesToWhiteout []string
if shdCheckDelete {
_, deletedFiles := util.WalkFS(s.directory, s.l.getFlattenedPaths(), func(s string) (bool, error) {
_, deletedFiles := util.WalkFS(s.directory, s.l.GetCurrentPaths(), func(s string) (bool, error) {
return true, nil
})
// The paths left here are the ones that have been deleted in this layer.
for path := range deletedFiles {
// Only add the whiteout if the directory for the file still exists.
dir := filepath.Dir(path)
if _, ok := deletedFiles[dir]; !ok {
if s.l.MaybeAddWhiteout(path) {
logrus.Debugf("Adding whiteout for %s", path)
filesToWhiteout = append(filesToWhiteout, path)
}
logrus.Debugf("Deleting in layer: %v", deletedFiles)
// Whiteout files in current layer.
for file := range deletedFiles {
if err := s.l.AddDelete(file); err != nil {
return "", fmt.Errorf("Unable to whiteout file %s in layered map: %w", file, err)
}
}
}
sort.Strings(filesToWhiteout)
filesToWhiteout = removeObsoleteWhiteouts(deletedFiles)
sort.Strings(filesToWhiteout)
}
t := util.NewTar(f)
defer t.Close()
@ -159,7 +158,9 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
s.l.Snapshot()
changedPaths, deletedPaths := util.WalkFS(s.directory, s.l.getFlattenedPaths(), s.l.CheckFileChange)
logrus.Debugf("Current image filesystem: %v", s.l.currentImage)
changedPaths, deletedPaths := util.WalkFS(s.directory, s.l.GetCurrentPaths(), s.l.CheckFileChange)
timer := timing.Start("Resolving Paths")
filesToAdd := []string{}
@ -169,41 +170,55 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
}
for _, path := range resolvedFiles {
if util.CheckIgnoreList(path) {
logrus.Tracef("Not adding %s to layer, as it's ignored", path)
logrus.Debugf("Not adding %s to layer, as it's ignored", path)
continue
}
filesToAdd = append(filesToAdd, path)
}
// The paths left here are the ones that have been deleted in this layer.
filesToWhiteOut := []string{}
for path := range deletedPaths {
// Only add the whiteout if the directory for the file still exists.
dir := filepath.Dir(path)
if _, ok := deletedPaths[dir]; !ok {
if s.l.MaybeAddWhiteout(path) {
logrus.Debugf("Adding whiteout for %s", path)
filesToWhiteOut = append(filesToWhiteOut, path)
}
}
}
timing.DefaultRun.Stop(timer)
sort.Strings(filesToAdd)
sort.Strings(filesToWhiteOut)
logrus.Debugf("Adding to layer: %v", filesToAdd)
logrus.Debugf("Deleting in layer: %v", deletedPaths)
// Add files to the layered map
for _, file := range filesToAdd {
if err := s.l.Add(file); err != nil {
return nil, nil, fmt.Errorf("unable to add file %s to layered map: %s", file, err)
return nil, nil, fmt.Errorf("Unable to add file %s to layered map: %w", file, err)
}
}
return filesToAdd, filesToWhiteOut, nil
for file := range deletedPaths {
if err := s.l.AddDelete(file); err != nil {
return nil, nil, fmt.Errorf("Unable to whiteout file %s in layered map: %w", file, err)
}
}
filesToWhiteout := removeObsoleteWhiteouts(deletedPaths)
timing.DefaultRun.Stop(timer)
sort.Strings(filesToAdd)
sort.Strings(filesToWhiteout)
return filesToAdd, filesToWhiteout, nil
}
// removeObsoleteWhiteouts filters deleted files according to their parents delete status.
func removeObsoleteWhiteouts(deletedFiles map[string]struct{}) (filesToWhiteout []string) {
for path := range deletedFiles {
// Only add the whiteout if the directory for the file still exists.
dir := filepath.Dir(path)
if _, ok := deletedFiles[dir]; !ok {
logrus.Tracef("Adding whiteout for %s", path)
filesToWhiteout = append(filesToWhiteout, path)
}
}
return filesToWhiteout
}
func writeToTar(t util.Tar, files, whiteouts []string) error {
timer := timing.Start("Writing tar file")
defer timing.DefaultRun.Stop(timer)
// Now create the tar.
for _, path := range whiteouts {
if err := t.Whiteout(path); err != nil {

View File

@ -585,7 +585,7 @@ func setUpTest(t *testing.T) (string, *Snapshotter, func(), error) {
snapshotPathPrefix = snapshotPath
// Take the initial snapshot
l := NewLayeredMap(util.Hasher(), util.CacheHasher())
l := NewLayeredMap(util.Hasher())
snapshotter := NewSnapshotter(l, testDir)
if err := snapshotter.Init(); err != nil {
return "", nil, nil, errors.Wrap(err, "initializing snapshotter")

View File

@ -178,17 +178,17 @@ func GetFSFromLayers(root string, layers []v1.Layer, opts ...FSOpt) ([]string, e
dir := filepath.Dir(path)
if strings.HasPrefix(base, archive.WhiteoutPrefix) {
logrus.Debugf("Whiting out %s", path)
logrus.Tracef("Whiting out %s", path)
name := strings.TrimPrefix(base, archive.WhiteoutPrefix)
path := filepath.Join(dir, name)
if CheckIgnoreList(path) {
logrus.Debugf("Not deleting %s, as it's ignored", path)
logrus.Tracef("Not deleting %s, as it's ignored", path)
continue
}
if childDirInIgnoreList(path) {
logrus.Debugf("Not deleting %s, as it contains a ignored path", path)
logrus.Tracef("Not deleting %s, as it contains a ignored path", path)
continue
}
@ -197,7 +197,7 @@ func GetFSFromLayers(root string, layers []v1.Layer, opts ...FSOpt) ([]string, e
}
if !cfg.includeWhiteout {
logrus.Debug("not including whiteout files")
logrus.Trace("Not including whiteout files")
continue
}
@ -301,13 +301,13 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error {
}
switch hdr.Typeflag {
case tar.TypeReg:
logrus.Tracef("creating file %s", path)
logrus.Tracef("Creating file %s", path)
// It's possible a file is in the tar before its directory,
// or a file was copied over a directory prior to now
fi, err := os.Stat(dir)
if os.IsNotExist(err) || !fi.IsDir() {
logrus.Debugf("base %s for file %s does not exist. Creating.", base, path)
logrus.Debugf("Base %s for file %s does not exist. Creating.", base, path)
if err := os.MkdirAll(dir, 0755); err != nil {
return err
@ -345,19 +345,19 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error {
currFile.Close()
case tar.TypeDir:
logrus.Tracef("creating dir %s", path)
logrus.Tracef("Creating dir %s", path)
if err := mkdirAllWithPermissions(path, mode, int64(uid), int64(gid)); err != nil {
return err
}
case tar.TypeLink:
logrus.Tracef("link from %s to %s", hdr.Linkname, path)
logrus.Tracef("Link from %s to %s", hdr.Linkname, path)
abs, err := filepath.Abs(hdr.Linkname)
if err != nil {
return err
}
if CheckIgnoreList(abs) {
logrus.Tracef("skipping symlink from %s to %s because %s is ignored", hdr.Linkname, path, hdr.Linkname)
logrus.Tracef("Skipping link from %s to %s because %s is ignored", hdr.Linkname, path, hdr.Linkname)
return nil
}
// The base directory for a link may not exist before it is created.
@ -377,7 +377,7 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error {
}
case tar.TypeSymlink:
logrus.Tracef("symlink from %s to %s", hdr.Linkname, path)
logrus.Tracef("Symlink from %s to %s", hdr.Linkname, path)
// The base directory for a symlink may not exist before it is created.
if err := os.MkdirAll(dir, 0755); err != nil {
return err
@ -559,7 +559,7 @@ func CreateFile(path string, reader io.Reader, perm os.FileMode, uid uint32, gid
// AddVolumePath adds the given path to the volume ignorelist.
func AddVolumePathToIgnoreList(path string) {
logrus.Infof("adding volume %s to ignorelist", path)
logrus.Infof("Adding volume %s to ignorelist", path)
ignorelist = append(ignorelist, IgnoreListEntry{
Path: path,
PrefixMatchOnly: true,
@ -667,7 +667,7 @@ func CopySymlink(src, dest string, context FileContext) (bool, error) {
}
link, err := os.Readlink(src)
if err != nil {
logrus.Debugf("could not read link for %s", src)
logrus.Debugf("Could not read link for %s", src)
}
return false, os.Symlink(link, dest)
}
@ -733,13 +733,13 @@ func (c FileContext) ExcludesFile(path string) bool {
var err error
path, err = filepath.Rel(c.Root, path)
if err != nil {
logrus.Errorf("unable to get relative path, including %s in build: %v", path, err)
logrus.Errorf("Unable to get relative path, including %s in build: %v", path, err)
return false
}
}
match, err := fileutils.Matches(path, c.ExcludedFiles)
if err != nil {
logrus.Errorf("error matching, including %s in build: %v", path, err)
logrus.Errorf("Error matching, including %s in build: %v", path, err)
return false
}
return match
@ -779,7 +779,7 @@ func mkdirAllWithPermissions(path string, mode os.FileMode, uid, gid int64) erro
// Check if a file already exists on the path, if yes then delete it
info, err := os.Stat(path)
if err == nil && !info.IsDir() {
logrus.Tracef("removing file because it needs to be a directory %s", path)
logrus.Tracef("Removing file because it needs to be a directory %s", path)
if err := os.Remove(path); err != nil {
return errors.Wrapf(err, "error removing %s to make way for new directory.", path)
}
@ -817,12 +817,12 @@ func setFileTimes(path string, aTime, mTime time.Time) error {
// converted into a valid argument to the syscall that os.Chtimes uses. If mTime or
// aTime are zero we convert them to the zero value for Unix Epoch.
if mTime.IsZero() {
logrus.Tracef("mod time for %s is zero, converting to zero for epoch", path)
logrus.Tracef("Mod time for %s is zero, converting to zero for epoch", path)
mTime = time.Unix(0, 0)
}
if aTime.IsZero() {
logrus.Tracef("access time for %s is zero, converting to zero for epoch", path)
logrus.Tracef("Access time for %s is zero, converting to zero for epoch", path)
aTime = time.Unix(0, 0)
}
@ -845,7 +845,7 @@ func setFileTimes(path string, aTime, mTime time.Time) error {
func CreateTargetTarfile(tarpath string) (*os.File, error) {
baseDir := filepath.Dir(tarpath)
if _, err := os.Lstat(baseDir); os.IsNotExist(err) {
logrus.Debugf("baseDir %s for file %s does not exist. Creating.", baseDir, tarpath)
logrus.Debugf("BaseDir %s for file %s does not exist. Creating.", baseDir, tarpath)
if err := os.MkdirAll(baseDir, 0755); err != nil {
return nil, err
}
@ -963,12 +963,12 @@ func CopyOwnership(src string, destDir string, root string) error {
func createParentDirectory(path string) error {
baseDir := filepath.Dir(path)
if info, err := os.Lstat(baseDir); os.IsNotExist(err) {
logrus.Tracef("baseDir %s for file %s does not exist. Creating.", baseDir, path)
logrus.Tracef("BaseDir %s for file %s does not exist. Creating.", baseDir, path)
if err := os.MkdirAll(baseDir, 0755); err != nil {
return err
}
} else if IsSymlink(info) {
logrus.Infof("destination cannot be a symlink %v", baseDir)
logrus.Infof("Destination cannot be a symlink %v", baseDir)
return errors.New("destination cannot be a symlink")
}
return nil
@ -995,22 +995,28 @@ type walkFSResult struct {
existingPaths map[string]struct{}
}
// WalkFS given a directory and list of existing files,
// returns a list of changed filed determined by changeFunc and a list
// of deleted files.
// It timesout after 90 mins. Can be configured via setting an environment variable
// WalkFS given a directory dir and list of existing files existingPaths,
// returns a list of changed files determined by `changeFunc` and a list
// of deleted files. Input existingPaths is changed inside this function and
// returned as deleted files map.
// It timesout after 90 mins which can be configured via setting an environment variable
// SNAPSHOT_TIMEOUT in the kaniko pod definition.
func WalkFS(dir string, existingPaths map[string]struct{}, changeFunc func(string) (bool, error)) ([]string, map[string]struct{}) {
func WalkFS(
dir string,
existingPaths map[string]struct{},
changeFunc func(string) (bool, error)) ([]string, map[string]struct{}) {
timeOutStr := os.Getenv(snapshotTimeout)
if timeOutStr == "" {
logrus.Tracef("%s environment not set. Using default snapshot timeout %s", snapshotTimeout, defaultTimeout)
logrus.Tracef("Environment '%s' not set. Using default snapshot timeout '%s'", snapshotTimeout, defaultTimeout)
timeOutStr = defaultTimeout
}
timeOut, err := time.ParseDuration(timeOutStr)
if err != nil {
logrus.Fatalf("could not parse duration %s", timeOutStr)
logrus.Fatalf("Could not parse duration '%s'", timeOutStr)
}
timer := timing.Start("Walking filesystem with timeout")
ch := make(chan walkFSResult, 1)
go func() {
@ -1024,35 +1030,45 @@ func WalkFS(dir string, existingPaths map[string]struct{}, changeFunc func(strin
return res.filesAdded, res.existingPaths
case <-time.After(timeOut):
timing.DefaultRun.Stop(timer)
logrus.Fatalf("timed out snapshotting FS in %s", timeOutStr)
logrus.Fatalf("Timed out snapshotting FS in %s", timeOutStr)
return nil, nil
}
}
func gowalkDir(dir string, existingPaths map[string]struct{}, changeFunc func(string) (bool, error)) walkFSResult {
foundPaths := make([]string, 0)
godirwalk.Walk(dir, &godirwalk.Options{
Callback: func(path string, ent *godirwalk.Dirent) error {
logrus.Tracef("Analyzing path %s", path)
if IsInIgnoreList(path) {
if IsDestDir(path) {
logrus.Tracef("Skipping paths under %s, as it is a ignored directory", path)
return filepath.SkipDir
}
return nil
}
delete(existingPaths, path)
if t, err := changeFunc(path); err != nil {
return err
} else if t {
foundPaths = append(foundPaths, path)
deletedFiles := existingPaths // Make a reference.
callback := func(path string, ent *godirwalk.Dirent) error {
logrus.Tracef("Analyzing path '%s'", path)
if IsInIgnoreList(path) {
if IsDestDir(path) {
logrus.Tracef("Skipping paths under '%s', as it is an ignored directory", path)
return filepath.SkipDir
}
return nil
},
Unsorted: true,
},
)
return walkFSResult{foundPaths, existingPaths}
}
// File is existing on disk, remove it from deleted files.
delete(deletedFiles, path)
if isChanged, err := changeFunc(path); err != nil {
return err
} else if isChanged {
foundPaths = append(foundPaths, path)
}
return nil
}
godirwalk.Walk(dir,
&godirwalk.Options{
Callback: callback,
Unsorted: true,
})
return walkFSResult{foundPaths, deletedFiles}
}
// GetFSInfoMap given a directory gets a map of FileInfo for all files

View File

@ -43,7 +43,7 @@ type group struct {
// groupIDs returns all of the group ID's a user is a member of
func groupIDs(u *user.User) ([]string, error) {
logrus.Infof("performing slow lookup of group ids for %s", u.Username)
logrus.Infof("Performing slow lookup of group ids for %s", u.Username)
f, err := os.Open(groupFile)
if err != nil {

View File

@ -34,7 +34,7 @@ func SyscallCredentials(userStr string) (*syscall.Credential, error) {
if err != nil {
return nil, errors.Wrap(err, "lookup")
}
logrus.Infof("util.Lookup returned: %+v", u)
logrus.Infof("Util.Lookup returned: %+v", u)
var groups []uint32

View File

@ -70,7 +70,7 @@ func (t *Tar) AddFileToTar(p string) error {
}
}
if i.Mode()&os.ModeSocket != 0 {
logrus.Infof("ignoring socket %s, not adding to tar", i.Name())
logrus.Infof("Ignoring socket %s, not adding to tar", i.Name())
return nil
}
hdr, err := tar.FileInfoHeader(i, linkDst)

View File

@ -71,6 +71,12 @@ func Hasher() func(string) (string, error) {
if _, err := io.CopyBuffer(h, f, *buf); err != nil {
return "", err
}
} else if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
linkPath, err := os.Readlink(p)
if err != nil {
return "", err
}
h.Write([]byte(linkPath))
}
return hex.EncodeToString(h.Sum(nil)), nil
@ -101,6 +107,12 @@ func CacheHasher() func(string) (string, error) {
if _, err := io.Copy(h, f); err != nil {
return "", err
}
} else if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
linkPath, err := os.Readlink(p)
if err != nil {
return "", err
}
h.Write([]byte(linkPath))
}
return hex.EncodeToString(h.Sum(nil)), nil

View File

@ -253,7 +253,7 @@ func getStatFileContentUint64(filePath string) uint64 {
res, err := parseUint(trimmed, 10, 64)
if err != nil {
logrus.Errorf("unable to parse %q as a uint from Cgroup file %q", string(contents), filePath)
logrus.Errorf("Unable to parse %q as a uint from Cgroup file %q", string(contents), filePath)
return res
}

View File

@ -132,7 +132,7 @@ func removeDockerfile(c modifiableContext, filesToRemove ...string) error {
for _, fileToRemove := range filesToRemove {
if rm, _ := fileutils.Matches(fileToRemove, excludes); rm {
if err := c.Remove(fileToRemove); err != nil {
logrus.Errorf("failed to remove %s: %v", fileToRemove, err)
logrus.Errorf("Failed to remove %s: %v", fileToRemove, err)
}
}
}

View File

@ -681,7 +681,7 @@ func (container *Container) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) {
if container.StreamConfig.Stdin() == nil && !container.Config.Tty {
if iop.Stdin != nil {
if err := iop.Stdin.Close(); err != nil {
logrus.Warnf("error closing stdin: %+v", err)
logrus.Warnf("Error closing stdin: %+v", err)
}
}
}

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package container // import "github.com/docker/docker/container"
@ -144,7 +145,7 @@ func (container *Container) CopyImagePathContent(v volume.Volume, destination st
defer func() {
if err := v.Unmount(id); err != nil {
logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err)
logrus.Warnf("Error while unmounting volume %s: %v", v.Name(), err)
}
}()
if err := label.Relabel(path, container.MountLabel, true); err != nil && !errors.Is(err, syscall.ENOTSUP) {

View File

@ -63,8 +63,8 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro
// Connect stdin of container to the attach stdin stream.
if cfg.Stdin != nil {
group.Go(func() error {
logrus.Debug("attach: stdin: begin")
defer logrus.Debug("attach: stdin: end")
logrus.Debug("Attach: stdin: begin")
defer logrus.Debug("Attach: stdin: end")
defer func() {
if cfg.CloseStdin && !cfg.TTY {
@ -98,8 +98,8 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro
}
attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) error {
logrus.Debugf("attach: %s: begin", name)
defer logrus.Debugf("attach: %s: end", name)
logrus.Debugf("Attach: %s: begin", name)
defer logrus.Debugf("Attach: %s: end", name)
defer func() {
// Make sure stdin gets closed
if cfg.Stdin != nil {
@ -132,7 +132,7 @@ func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan erro
errs := make(chan error, 1)
go func() {
defer logrus.Debug("attach done")
defer logrus.Debug("Attach done")
groupErr := make(chan error, 1)
go func() {
groupErr <- group.Wait()

View File

@ -122,7 +122,7 @@ func (c *Config) CopyToPipe(iop *cio.DirectIO) {
c.wg.Add(1)
go func() {
if _, err := pools.Copy(w, r); err != nil {
logrus.Errorf("stream copy error: %v", err)
logrus.Errorf("Stream copy error: %v", err)
}
r.Close()
c.wg.Done()
@ -141,7 +141,7 @@ func (c *Config) CopyToPipe(iop *cio.DirectIO) {
go func() {
pools.Copy(iop.Stdin, stdin)
if err := iop.Stdin.Close(); err != nil {
logrus.Warnf("failed to close stdin: %v", err)
logrus.Warnf("Failed to close stdin: %v", err)
}
}()
}

View File

@ -382,7 +382,7 @@ func (v *memdbView) transform(container *Container) *Snapshot {
for port, bindings := range container.NetworkSettings.Ports {
p, err := nat.ParsePort(port.Port())
if err != nil {
logrus.Warnf("invalid port map %+v", err)
logrus.Warnf("Invalid port map %+v", err)
continue
}
if len(bindings) == 0 {
@ -395,7 +395,7 @@ func (v *memdbView) transform(container *Container) *Snapshot {
for _, binding := range bindings {
h, err := nat.ParsePort(binding.HostPort)
if err != nil {
logrus.Warnf("invalid host port map %+v", err)
logrus.Warnf("Invalid host port map %+v", err)
continue
}
snapshot.Ports = append(snapshot.Ports, types.Port{

View File

@ -71,7 +71,7 @@ func (c *Config) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) {
if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" {
if iop.Stdin != nil {
if err := iop.Stdin.Close(); err != nil {
logrus.Errorf("error closing exec stdin: %+v", err)
logrus.Errorf("Error closing exec stdin: %+v", err)
}
}
}

View File

@ -293,7 +293,7 @@ func IsInitialized(driverHome string) bool {
return false
}
if err != nil {
logrus.Warnf("graphdriver.IsInitialized: stat failed: %v", err)
logrus.Warnf("Graphdriver.IsInitialized: stat failed: %v", err)
}
return !isEmptyDir(driverHome)
}

View File

@ -85,7 +85,7 @@ func (fl *follow) waitRead() error {
}
return errRetry
case err := <-fl.fileWatcher.Errors():
logrus.Debugf("logger got error watching file: %v", err)
logrus.Debugf("Logger got error watching file: %v", err)
// Something happened, let's try and stay alive and create a new watcher
if fl.retries <= 5 {
fl.fileWatcher.Close()

View File

@ -76,7 +76,7 @@ func (s *fs) Walk(f DigestWalkFunc) error {
for _, v := range dir {
dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name())
if err := dgst.Validate(); err != nil {
logrus.Debugf("skipping invalid digest %s: %s", dgst, err)
logrus.Debugf("Skipping invalid digest %s: %s", dgst, err)
continue
}
if err := f(dgst); err != nil {

View File

@ -69,19 +69,19 @@ func (is *store) restore() error {
err := is.fs.Walk(func(dgst digest.Digest) error {
img, err := is.Get(IDFromDigest(dgst))
if err != nil {
logrus.Errorf("invalid image %v, %v", dgst, err)
logrus.Errorf("Invalid image %v, %v", dgst, err)
return nil
}
var l layer.Layer
if chainID := img.RootFS.ChainID(); chainID != "" {
if !system.IsOSSupported(img.OperatingSystem()) {
logrus.Errorf("not restoring image with unsupported operating system %v, %v, %s", dgst, chainID, img.OperatingSystem())
logrus.Errorf("Not restoring image with unsupported operating system %v, %v, %s", dgst, chainID, img.OperatingSystem())
return nil
}
l, err = is.lss[img.OperatingSystem()].Get(chainID)
if err != nil {
if err == layer.ErrLayerDoesNotExist {
logrus.Errorf("layer does not exist, not restoring image %v, %v, %s", dgst, chainID, img.OperatingSystem())
logrus.Errorf("Layer does not exist, not restoring image %v, %v, %s", dgst, chainID, img.OperatingSystem())
return nil
}
return err
@ -244,7 +244,7 @@ func (is *store) Delete(id ID) ([]layer.Metadata, error) {
}
if err := is.digestSet.Remove(id.Digest()); err != nil {
logrus.Errorf("error removing %s from digest set: %q", id, err)
logrus.Errorf("Error removing %s from digest set: %q", id, err)
}
delete(is.images, id)
is.fs.Delete(id.Digest())
@ -330,7 +330,7 @@ func (is *store) imagesMap(all bool) map[ID]*Image {
}
img, err := is.Get(id)
if err != nil {
logrus.Errorf("invalid image access: %q, error: %q", id, err)
logrus.Errorf("Invalid image access: %q, error: %q", id, err)
continue
}
images[id] = img

View File

@ -339,7 +339,7 @@ func (fms *fileMetadataStore) getOrphan() ([]roLayer, error) {
}
cacheID := strings.TrimSpace(string(contentBytes))
if cacheID == "" {
logrus.Error("invalid cache ID")
logrus.Error("Invalid cache ID")
continue
}

View File

@ -764,9 +764,9 @@ func (ls *layerStore) Cleanup() error {
if err != nil {
logrus.Errorf("Cannot get orphan layers: %v", err)
}
logrus.Debugf("found %v orphan layers", len(orphanLayers))
logrus.Debugf("Found %v orphan layers", len(orphanLayers))
for _, orphan := range orphanLayers {
logrus.Debugf("removing orphan layer, chain ID: %v , cache ID: %v", orphan.chainID, orphan.cacheID)
logrus.Debugf("Removing orphan layer, chain ID: %v , cache ID: %v", orphan.chainID, orphan.cacheID)
err = ls.driver.Remove(orphan.cacheID)
if err != nil && !os.IsNotExist(err) {
logrus.WithError(err).WithField("cache-id", orphan.cacheID).Error("cannot remove orphan layer")

View File

@ -165,7 +165,7 @@ func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
unpigzPath, err := exec.LookPath("unpigz")
if err != nil {
logrus.Debugf("unpigz binary not found, falling back to go gzip library")
logrus.Debugf("Unpigz binary not found, falling back to go gzip library")
return gzip.NewReader(buf)
}

View File

@ -438,7 +438,7 @@ func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMa
logrus.Debugf("Can't close layer: %s", err)
}
if err := writer.Close(); err != nil {
logrus.Debugf("failed close Changes writer: %s", err)
logrus.Debugf("Failed close Changes writer: %s", err)
}
}()
return reader, nil

View File

@ -108,7 +108,7 @@ func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, er
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
opts := TarResourceRebaseOpts(sourceBase, rebaseName)
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
logrus.Debugf("Copying %q from %q", sourceBase, sourceDir)
return TarWithOptions(sourceDir, opts)
}

View File

@ -159,7 +159,7 @@ func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}
select {
case <-timer.C:
case <-chClose:
logrus.Debugf("watch for %s closed", f.Name())
logrus.Debugf("Watch for %s closed", f.Name())
return
}

View File

@ -1,3 +1,4 @@
//go:build linux || freebsd || openbsd
// +build linux freebsd openbsd
// Package kernel provides helper function to get, parse and compare kernel
@ -25,7 +26,7 @@ func GetKernelVersion() (*VersionInfo, error) {
// the given version.
func CheckKernelVersion(k, major, minor int) bool {
if v, err := GetKernelVersion(); err != nil {
logrus.Warnf("error getting kernel version: %s", err)
logrus.Warnf("Error getting kernel version: %s", err)
} else {
if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 {
return false

View File

@ -291,7 +291,7 @@ func (hooks *Hooks) MarshalJSON() ([]byte, error) {
case CommandHook:
serializableHooks = append(serializableHooks, chook)
default:
logrus.Warnf("cannot serialize hook of type %T, skipping", hook)
logrus.Warnf("Cannot serialize hook of type %T, skipping", hook)
}
}