Rebased on master

This commit is contained in:
Priya Wadhwa 2018-08-27 14:18:24 -07:00
commit 935d322f1d
18 changed files with 307 additions and 163 deletions

2
Gopkg.lock generated
View File

@ -973,6 +973,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "e2edf813355250ce0afb670aa8ed7096b44d93edaa813598a2717417bfd30d5a" inputs-digest = "cb0a41d18a5ad50aed7a41a7de5d4cf9d6dd43d473fef064cd891dbe3677c3e1"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View File

@ -11,10 +11,6 @@ required = [
name = "github.com/prometheus/client_golang" name = "github.com/prometheus/client_golang"
revision = "a40133b69fbd73ee655606a9bf5f8b9b9bf758dd" revision = "a40133b69fbd73ee655606a9bf5f8b9b9bf758dd"
[[override]]
name = "github.com/containers/storage"
revision = "1e5ce40cdb84ab66e26186435b1273e04b879fef"
[[override]] [[override]]
name = "github.com/opencontainers/runc" name = "github.com/opencontainers/runc"
revision = "4fc53a81fb7c994640722ac585fa9ca548971871" revision = "4fc53a81fb7c994640722ac585fa9ca548971871"
@ -31,10 +27,6 @@ required = [
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
revision = "8124abf74e7633d82a5b96585b0da487d0e6eed0" revision = "8124abf74e7633d82a5b96585b0da487d0e6eed0"
[[constraint]]
name = "github.com/genuinetools/amicontained"
version = "0.4.0"
[[constraint]] [[constraint]]
name = "github.com/aws/aws-sdk-go" name = "github.com/aws/aws-sdk-go"
version = "1.13.56" version = "1.13.56"
@ -46,11 +38,3 @@ required = [
[[override]] [[override]]
name = "k8s.io/apimachinery" name = "k8s.io/apimachinery"
version = "kubernetes-1.11.0" version = "kubernetes-1.11.0"
[[override]]
name = "github.com/tonistiigi/fsutil"
branch = "master"
[[constraint]]
name = "github.com/docker/docker"
revision = "71cd53e4a197b303c6ba086bd584ffd67a884281"

View File

@ -29,6 +29,7 @@ We do **not** recommend running the kaniko executor binary in another image, as
- [Security](#security) - [Security](#security)
- [Comparison with Other Tools](#comparison-with-other-tools) - [Comparison with Other Tools](#comparison-with-other-tools)
- [Community](#community) - [Community](#community)
- [Limitations](#limitations)
_If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPMENT.md) and [CONTRIBUTING.md](CONTRIBUTING.md)._ _If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPMENT.md) and [CONTRIBUTING.md](CONTRIBUTING.md)._
@ -256,7 +257,8 @@ To configure credentials, you will need to do the following:
#### --snapshotMode #### --snapshotMode
You can set the `--snapshotMode=<full (default), time>` flag to set how kaniko will snapshot the filesystem. You can set the `--snapshotMode=<full (default), time>` flag to set how kaniko will snapshot the filesystem.
If `--snapshotMode=time` is set, only file mtime will be considered when snapshotting. If `--snapshotMode=time` is set, only file mtime will be considered when snapshotting (see
[limitations related to mtime](#mtime-and-snapshotting)).
#### --build-arg #### --build-arg
@ -356,3 +358,23 @@ provides.
[kaniko-users](https://groups.google.com/forum/#!forum/kaniko-users) Google group [kaniko-users](https://groups.google.com/forum/#!forum/kaniko-users) Google group
To Contribute to kaniko, see [DEVELOPMENT.md](DEVELOPMENT.md) and [CONTRIBUTING.md](CONTRIBUTING.md). To Contribute to kaniko, see [DEVELOPMENT.md](DEVELOPMENT.md) and [CONTRIBUTING.md](CONTRIBUTING.md).
## Limitations
### mtime and snapshotting
When taking a snapshot, kaniko's hashing algorithms include (or in the case of
[`--snapshotMode=time`](#--snapshotmode), only use) a file's
[`mtime`](https://en.wikipedia.org/wiki/Inode#POSIX_inode_description) to determine
if the file has changed. Unfortunately there is a delay between when changes to a
file are made and when the `mtime` is updated. This means:
* With the time-only snapshot mode (`--snapshotMode=time`), kaniko may miss changes
introduced by `RUN` commands entirely.
* With the default snapshot mode (`--snapshotMode=full`), whether or not kaniko will
add a layer in the case where a `RUN` command modifies a file **but the contents do
not** change is theoretically non-deterministic. This _does not affect the contents_
which will still be correct, but it does affect the number of layers.
_Note that these issues are currently theoretical only. If you see this issue occur, please
[open an issue](https://github.com/GoogleContainerTools/kaniko/issues)._

View File

@ -0,0 +1,49 @@
FROM alpine@sha256:5ce5f501c457015c4b91f91a15ac69157d9b06f1a75cf9107bf2b62e0843983a
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo
COPY context/foo /foo

View File

@ -135,11 +135,9 @@ func TestMain(m *testing.M) {
defer DeleteFromBucket(fileInBucket) defer DeleteFromBucket(fileInBucket)
fmt.Println("Building kaniko image") fmt.Println("Building kaniko image")
buildKaniko := exec.Command("docker", "build", "-t", ExecutorImage, "-f", "../deploy/Dockerfile", "..") cmd := exec.Command("docker", "build", "-t", ExecutorImage, "-f", "../deploy/Dockerfile", "..")
err = buildKaniko.Run() if _, err = RunCommandWithoutTest(cmd); err != nil {
if err != nil { fmt.Printf("Building kaniko failed: %s", err)
fmt.Print(err)
fmt.Print("Building kaniko failed.")
os.Exit(1) os.Exit(1)
} }

View File

@ -79,16 +79,12 @@ func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui
a.snapshotFiles = append(a.snapshotFiles, urlDest) a.snapshotFiles = append(a.snapshotFiles, urlDest)
} else if util.IsFileLocalTarArchive(fullPath) { } else if util.IsFileLocalTarArchive(fullPath) {
logrus.Infof("Unpacking local tar archive %s to %s", src, dest) logrus.Infof("Unpacking local tar archive %s to %s", src, dest)
if err := util.UnpackLocalTarArchive(fullPath, dest); err != nil { extractedFiles, err := util.UnpackLocalTarArchive(fullPath, dest)
return err
}
// Add the unpacked files to the snapshotter
filesAdded, err := util.Files(dest)
if err != nil { if err != nil {
return err return err
} }
logrus.Debugf("Added %v from local tar archive %s", filesAdded, src) logrus.Debugf("Added %v from local tar archive %s", extractedFiles, src)
a.snapshotFiles = append(a.snapshotFiles, filesAdded...) a.snapshotFiles = append(a.snapshotFiles, extractedFiles...)
} else { } else {
unresolvedSrcs = append(unresolvedSrcs, src) unresolvedSrcs = append(unresolvedSrcs, src)
} }

View File

@ -79,10 +79,7 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu
// we need to add '/' to the end to indicate the destination is a directory // we need to add '/' to the end to indicate the destination is a directory
dest = filepath.Join(cwd, dest) + "/" dest = filepath.Join(cwd, dest) + "/"
} }
if err := util.CopyDir(fullPath, dest); err != nil { copiedFiles, err := util.CopyDir(fullPath, dest)
return err
}
copiedFiles, err := util.Files(dest)
if err != nil { if err != nil {
return err return err
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package commands package commands
import ( import (
"fmt"
"os" "os"
"strings" "strings"
@ -53,13 +54,14 @@ func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.
return err return err
} }
logrus.Infof("Creating directory %s", volume) // Only create and snapshot the dir if it didn't exist already
if err := os.MkdirAll(volume, 0755); err != nil { if _, err := os.Stat(volume); os.IsNotExist(err) {
return err logrus.Infof("Creating directory %s", volume)
v.snapshotFiles = []string{volume}
if err := os.MkdirAll(volume, 0755); err != nil {
return fmt.Errorf("Could not create directory for volume %s: %s", volume, err)
}
} }
//Check if directory already exists?
v.snapshotFiles = append(v.snapshotFiles, volume)
} }
config.Volumes = existingVolumes config.Volumes = existingVolumes

View File

@ -47,8 +47,14 @@ func (w *WorkdirCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile
config.WorkingDir = filepath.Join(config.WorkingDir, resolvedWorkingDir) config.WorkingDir = filepath.Join(config.WorkingDir, resolvedWorkingDir)
} }
logrus.Infof("Changed working directory to %s", config.WorkingDir) logrus.Infof("Changed working directory to %s", config.WorkingDir)
w.snapshotFiles = []string{config.WorkingDir}
return os.MkdirAll(config.WorkingDir, 0755) // Only create and snapshot the dir if it didn't exist already
if _, err := os.Stat(config.WorkingDir); os.IsNotExist(err) {
logrus.Infof("Creating directory %s", config.WorkingDir)
w.snapshotFiles = []string{config.WorkingDir}
return os.MkdirAll(config.WorkingDir, 0755)
}
return nil
} }
// FilesToSnapshot returns the workingdir, which should have been created if it didn't already exist // FilesToSnapshot returns the workingdir, which should have been created if it didn't already exist

View File

@ -85,23 +85,41 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
if err := dockerCommand.ExecuteCommand(&imageConfig.Config, buildArgs); err != nil { if err := dockerCommand.ExecuteCommand(&imageConfig.Config, buildArgs); err != nil {
return nil, err return nil, err
} }
// Don't snapshot if it's not the final stage and not the final command
// Also don't snapshot if it's the final stage, not the final command, and single snapshot is set
if (!stage.FinalStage && !finalCmd) || (stage.FinalStage && !finalCmd && opts.SingleSnapshot) {
continue
}
// Now, we get the files to snapshot from this command and take the snapshot
snapshotFiles := dockerCommand.FilesToSnapshot() snapshotFiles := dockerCommand.FilesToSnapshot()
if finalCmd { var contents []byte
snapshotFiles = nil
// If this is an intermediate stage, we only snapshot for the last command and we
// want to snapshot the entire filesystem since we aren't tracking what was changed
// by previous commands.
if !stage.FinalStage {
if finalCmd {
contents, err = snapshotter.TakeSnapshotFS()
}
} else {
// If we are in single snapshot mode, we only take a snapshot once, after all
// commands have completed.
if opts.SingleSnapshot {
if finalCmd {
contents, err = snapshotter.TakeSnapshotFS()
}
} else {
// Otherwise, in the final stage we take a snapshot at each command. If we know
// the files that were changed, we'll snapshot those explicitly, otherwise we'll
// check if anything in the filesystem changed.
if snapshotFiles != nil {
contents, err = snapshotter.TakeSnapshot(snapshotFiles)
} else {
contents, err = snapshotter.TakeSnapshotFS()
}
}
} }
contents, err := snapshotter.TakeSnapshot(snapshotFiles)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("Error taking snapshot of files for command %s: %s", dockerCommand, err)
} }
util.MoveVolumeWhitelistToWhitelist() util.MoveVolumeWhitelistToWhitelist()
if contents == nil { if contents == nil {
logrus.Info("No files were changed, appending empty layer to config.") logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
continue continue
} }
// Append the layer to the image // Append the layer to the image

View File

@ -17,6 +17,7 @@ limitations under the License.
package snapshot package snapshot
import ( import (
"fmt"
"path/filepath" "path/filepath"
"strings" "strings"
) )
@ -82,6 +83,20 @@ func (l *LayeredMap) MaybeAddWhiteout(s string) (bool, error) {
return true, nil return true, nil
} }
// Add will add the specified file s to the layered map.
func (l *LayeredMap) Add(s string) error {
newV, err := l.hasher(s)
if err != nil {
return fmt.Errorf("Error creating hash for %s: %s", s, err)
}
l.layers[len(l.layers)-1][s] = newV
return nil
}
// MaybeAdd will add the specified file s to the layered map if
// the layered map's hashing function determines it has changed. If
// it has not changed, it will not be added. Returns true if the file
// was added.
func (l *LayeredMap) MaybeAdd(s string) (bool, error) { func (l *LayeredMap) MaybeAdd(s string) (bool, error) {
oldV, ok := l.Get(s) oldV, ok := l.Get(s)
newV, err := l.hasher(s) newV, err := l.hasher(s)

View File

@ -17,12 +17,13 @@ limitations under the License.
package snapshot package snapshot
import ( import (
"archive/tar"
"bytes" "bytes"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"syscall"
"github.com/GoogleContainerTools/kaniko/pkg/constants" "github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/GoogleContainerTools/kaniko/pkg/util"
@ -33,7 +34,6 @@ import (
type Snapshotter struct { type Snapshotter struct {
l *LayeredMap l *LayeredMap
directory string directory string
hardlinks map[uint64]string
} }
// NewSnapshotter creates a new snapshotter rooted at d // NewSnapshotter creates a new snapshotter rooted at d
@ -49,17 +49,11 @@ func (s *Snapshotter) Init() error {
return nil return nil
} }
// TakeSnapshot takes a snapshot of the filesystem, avoiding directories in the whitelist, and creates // TakeSnapshot takes a snapshot of the specified files, avoiding directories in the whitelist, and creates
// a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed // a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed
func (s *Snapshotter) TakeSnapshot(files []string) ([]byte, error) { func (s *Snapshotter) TakeSnapshot(files []string) ([]byte, error) {
buf := bytes.NewBuffer([]byte{}) buf := bytes.NewBuffer([]byte{})
var filesAdded bool filesAdded, err := s.snapshotFiles(buf, files)
var err error
if files == nil {
filesAdded, err = s.snapShotFS(buf)
} else {
filesAdded, err = s.snapshotFiles(buf, files)
}
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -70,10 +64,39 @@ func (s *Snapshotter) TakeSnapshot(files []string) ([]byte, error) {
return contents, err return contents, err
} }
// snapshotFiles takes a snapshot of specific files // TakeSnapshotFS takes a snapshot of the filesystem, avoiding directories in the whitelist, and creates
// Used for ADD/COPY commands, when we know which files have changed // a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed
func (s *Snapshotter) TakeSnapshotFS() ([]byte, error) {
buf := bytes.NewBuffer([]byte{})
filesAdded, err := s.snapShotFS(buf)
if err != nil {
return nil, err
}
contents := buf.Bytes()
if !filesAdded {
return nil, nil
}
return contents, err
}
func shouldSnapshot(file string, snapshottedFiles map[string]bool) (bool, error) {
if val, ok := snapshottedFiles[file]; ok && val {
return false, nil
}
whitelisted, err := util.CheckWhitelist(file)
if err != nil {
return false, fmt.Errorf("Error checking for %s in whitelist: %s", file, err)
}
if whitelisted && !isBuildFile(file) {
logrus.Infof("Not adding %s to layer, as it's whitelisted", file)
return false, nil
}
return true, nil
}
// snapshotFiles creates a snapshot (tar) and adds the specified files.
// It will not add files which are whitelisted.
func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) { func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) {
s.hardlinks = map[uint64]string{}
s.l.Snapshot() s.l.Snapshot()
if len(files) == 0 { if len(files) == 0 {
logrus.Info("No files changed in this command, skipping snapshotting.") logrus.Info("No files changed in this command, skipping snapshotting.")
@ -81,45 +104,61 @@ func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) {
} }
logrus.Infof("Taking snapshot of files %v...", files) logrus.Infof("Taking snapshot of files %v...", files)
snapshottedFiles := make(map[string]bool) snapshottedFiles := make(map[string]bool)
for _, file := range files {
parentDirs := util.ParentDirectories(file)
files = append(parentDirs, files...)
}
filesAdded := false filesAdded := false
w := tar.NewWriter(f)
defer w.Close()
// Now create the tar. t := util.NewTar(f)
defer t.Close()
// First add to the tar any parent directories that haven't been added
parentDirs := []string{}
for _, file := range files { for _, file := range files {
parents := util.ParentDirectories(file)
parentDirs = append(parentDirs, parents...)
}
for _, file := range parentDirs {
file = filepath.Clean(file) file = filepath.Clean(file)
if val, ok := snapshottedFiles[file]; ok && val { shouldSnapshot, err := shouldSnapshot(file, snapshottedFiles)
continue
}
whitelisted, err := util.CheckWhitelist(file)
if err != nil { if err != nil {
return false, err return false, fmt.Errorf("Error checking if parent dir %s can be snapshotted: %s", file, err)
} }
if whitelisted && !isBuildFile(file) { if !shouldSnapshot {
logrus.Infof("Not adding %s to layer, as it's whitelisted", file)
continue continue
} }
snapshottedFiles[file] = true snapshottedFiles[file] = true
info, err := os.Lstat(file)
fileAdded, err := s.l.MaybeAdd(file)
if err != nil { if err != nil {
return false, err return false, fmt.Errorf("Unable to add parent dir %s to layered map: %s", file, err)
} }
// Only add to the tar if we add it to the layeredmap.
addFile, err := s.l.MaybeAdd(file) if fileAdded {
if err != nil { err = t.AddFileToTar(file)
return false, err if err != nil {
} return false, fmt.Errorf("Error adding parent dir %s to tar: %s", file, err)
if addFile {
filesAdded = true
if err := util.AddToTar(file, info, s.hardlinks, w); err != nil {
return false, err
} }
filesAdded = true
} }
} }
// Next add the files themselves to the tar
for _, file := range files {
file = filepath.Clean(file)
shouldSnapshot, err := shouldSnapshot(file, snapshottedFiles)
if err != nil {
return false, fmt.Errorf("Error checking if file %s can be snapshotted: %s", file, err)
}
if !shouldSnapshot {
continue
}
snapshottedFiles[file] = true
if err = s.l.Add(file); err != nil {
return false, fmt.Errorf("Unable to add file %s to layered map: %s", file, err)
}
if err = t.AddFileToTar(file); err != nil {
return false, fmt.Errorf("Error adding file %s to tar: %s", file, err)
}
filesAdded = true
}
return filesAdded, nil return filesAdded, nil
} }
@ -132,14 +171,22 @@ func isBuildFile(file string) bool {
return false return false
} }
// shapShotFS creates a snapshot (tar) of all files in the system which are not
// whitelisted and which have changed.
func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) { func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) {
logrus.Info("Taking snapshot of full filesystem...") logrus.Info("Taking snapshot of full filesystem...")
s.hardlinks = map[uint64]string{}
// Some of the operations that follow (e.g. hashing) depend on the file system being synced,
// for example the hashing function that determines if files are equal uses the mtime of the files,
// which can lag if sync is not called. Unfortunately there can still be lag if too much data needs
// to be flushed or the disk does its own caching/buffering.
syscall.Sync()
s.l.Snapshot() s.l.Snapshot()
existingPaths := s.l.GetFlattenedPathsForWhiteOut() existingPaths := s.l.GetFlattenedPathsForWhiteOut()
filesAdded := false filesAdded := false
w := tar.NewWriter(f) t := util.NewTar(f)
defer w.Close() defer t.Close()
// Save the fs state in a map to iterate over later. // Save the fs state in a map to iterate over later.
memFs := map[string]os.FileInfo{} memFs := map[string]os.FileInfo{}
@ -163,7 +210,7 @@ func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) {
if addWhiteout { if addWhiteout {
logrus.Infof("Adding whiteout for %s", path) logrus.Infof("Adding whiteout for %s", path)
filesAdded = true filesAdded = true
if err := util.Whiteout(path, w); err != nil { if err := t.Whiteout(path); err != nil {
return false, err return false, err
} }
} }
@ -171,7 +218,7 @@ func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) {
} }
// Now create the tar. // Now create the tar.
for path, info := range memFs { for path := range memFs {
whitelisted, err := util.CheckWhitelist(path) whitelisted, err := util.CheckWhitelist(path)
if err != nil { if err != nil {
return false, err return false, err
@ -189,7 +236,7 @@ func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) {
if maybeAdd { if maybeAdd {
logrus.Debugf("Adding %s to layer, because it was changed.", path) logrus.Debugf("Adding %s to layer, because it was changed.", path)
filesAdded = true filesAdded = true
if err := util.AddToTar(path, info, s.hardlinks, w); err != nil { if err := t.AddFileToTar(path); err != nil {
return false, err return false, err
} }
} }

View File

@ -29,7 +29,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
func TestSnapshotFileChange(t *testing.T) { func TestSnapshotFSFileChange(t *testing.T) {
testDir, snapshotter, err := setUpTestDir() testDir, snapshotter, err := setUpTestDir()
defer os.RemoveAll(testDir) defer os.RemoveAll(testDir)
@ -45,7 +45,7 @@ func TestSnapshotFileChange(t *testing.T) {
t.Fatalf("Error setting up fs: %s", err) t.Fatalf("Error setting up fs: %s", err)
} }
// Take another snapshot // Take another snapshot
contents, err := snapshotter.TakeSnapshot(nil) contents, err := snapshotter.TakeSnapshotFS()
if err != nil { if err != nil {
t.Fatalf("Error taking snapshot of fs: %s", err) t.Fatalf("Error taking snapshot of fs: %s", err)
} }
@ -81,7 +81,7 @@ func TestSnapshotFileChange(t *testing.T) {
} }
} }
func TestSnapshotChangePermissions(t *testing.T) { func TestSnapshotFSChangePermissions(t *testing.T) {
testDir, snapshotter, err := setUpTestDir() testDir, snapshotter, err := setUpTestDir()
defer os.RemoveAll(testDir) defer os.RemoveAll(testDir)
if err != nil { if err != nil {
@ -93,7 +93,7 @@ func TestSnapshotChangePermissions(t *testing.T) {
t.Fatalf("Error changing permissions on %s: %v", batPath, err) t.Fatalf("Error changing permissions on %s: %v", batPath, err)
} }
// Take another snapshot // Take another snapshot
contents, err := snapshotter.TakeSnapshot(nil) contents, err := snapshotter.TakeSnapshotFS()
if err != nil { if err != nil {
t.Fatalf("Error taking snapshot of fs: %s", err) t.Fatalf("Error taking snapshot of fs: %s", err)
} }
@ -141,7 +141,6 @@ func TestSnapshotFiles(t *testing.T) {
} }
filesToSnapshot := []string{ filesToSnapshot := []string{
filepath.Join(testDir, "foo"), filepath.Join(testDir, "foo"),
filepath.Join(testDir, "kaniko/file"),
} }
contents, err := snapshotter.TakeSnapshot(filesToSnapshot) contents, err := snapshotter.TakeSnapshot(filesToSnapshot)
if err != nil { if err != nil {
@ -166,14 +165,14 @@ func TestSnapshotFiles(t *testing.T) {
testutil.CheckErrorAndDeepEqual(t, false, nil, expectedFiles, actualFiles) testutil.CheckErrorAndDeepEqual(t, false, nil, expectedFiles, actualFiles)
} }
func TestEmptySnapshot(t *testing.T) { func TestEmptySnapshotFS(t *testing.T) {
testDir, snapshotter, err := setUpTestDir() testDir, snapshotter, err := setUpTestDir()
defer os.RemoveAll(testDir) defer os.RemoveAll(testDir)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Take snapshot with no changes // Take snapshot with no changes
contents, err := snapshotter.TakeSnapshot(nil) contents, err := snapshotter.TakeSnapshotFS()
if err != nil { if err != nil {
t.Fatalf("Error taking snapshot of fs: %s", err) t.Fatalf("Error taking snapshot of fs: %s", err)
} }

View File

@ -155,7 +155,9 @@ func ChildDirInWhitelist(path, directory string) bool {
return false return false
} }
func unTar(r io.Reader, dest string) error { // unTar returns a list of files that have been extracted from the tar archive at r to the path at dest
func unTar(r io.Reader, dest string) ([]string, error) {
var extractedFiles []string
tr := tar.NewReader(r) tr := tar.NewReader(r)
for { for {
hdr, err := tr.Next() hdr, err := tr.Next()
@ -163,13 +165,14 @@ func unTar(r io.Reader, dest string) error {
break break
} }
if err != nil { if err != nil {
return err return nil, err
} }
if err := extractFile(dest, hdr, tr); err != nil { if err := extractFile(dest, hdr, tr); err != nil {
return err return nil, err
} }
extractedFiles = append(extractedFiles, dest)
} }
return nil return extractedFiles, nil
} }
func extractFile(dest string, hdr *tar.Header, tr io.Reader) error { func extractFile(dest string, hdr *tar.Header, tr io.Reader) error {
@ -349,24 +352,6 @@ func RelativeFiles(fp string, root string) ([]string, error) {
return files, err return files, err
} }
// Files returns a list of all files rooted at root
func Files(root string) ([]string, error) {
var files []string
logrus.Debugf("Getting files and contents at root %s", root)
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
whitelisted, err := CheckWhitelist(path)
if err != nil {
return err
}
if whitelisted {
return nil
}
files = append(files, path)
return err
})
return files, err
}
// ParentDirectories returns a list of paths to all parent directories // ParentDirectories returns a list of paths to all parent directories
// Ex. /some/temp/dir -> [/, /some, /some/temp, /some/temp/dir] // Ex. /some/temp/dir -> [/, /some, /some/temp, /some/temp/dir]
func ParentDirectories(path string) []string { func ParentDirectories(path string) []string {
@ -459,16 +444,18 @@ func DownloadFileToDest(rawurl, dest string) error {
} }
// CopyDir copies the file or directory at src to dest // CopyDir copies the file or directory at src to dest
func CopyDir(src, dest string) error { // It returns a list of files it copied over
func CopyDir(src, dest string) ([]string, error) {
files, err := RelativeFiles("", src) files, err := RelativeFiles("", src)
if err != nil { if err != nil {
return err return nil, err
} }
var copiedFiles []string
for _, file := range files { for _, file := range files {
fullPath := filepath.Join(src, file) fullPath := filepath.Join(src, file)
fi, err := os.Lstat(fullPath) fi, err := os.Lstat(fullPath)
if err != nil { if err != nil {
return err return nil, err
} }
destPath := filepath.Join(dest, file) destPath := filepath.Join(dest, file)
if fi.IsDir() { if fi.IsDir() {
@ -478,24 +465,25 @@ func CopyDir(src, dest string) error {
gid := int(fi.Sys().(*syscall.Stat_t).Gid) gid := int(fi.Sys().(*syscall.Stat_t).Gid)
if err := os.MkdirAll(destPath, fi.Mode()); err != nil { if err := os.MkdirAll(destPath, fi.Mode()); err != nil {
return err return nil, err
} }
if err := os.Chown(destPath, uid, gid); err != nil { if err := os.Chown(destPath, uid, gid); err != nil {
return err return nil, err
} }
} else if fi.Mode()&os.ModeSymlink != 0 { } else if fi.Mode()&os.ModeSymlink != 0 {
// If file is a symlink, we want to create the same relative symlink // If file is a symlink, we want to create the same relative symlink
if err := CopySymlink(fullPath, destPath); err != nil { if err := CopySymlink(fullPath, destPath); err != nil {
return err return nil, err
} }
} else { } else {
// ... Else, we want to copy over a file // ... Else, we want to copy over a file
if err := CopyFile(fullPath, destPath); err != nil { if err := CopyFile(fullPath, destPath); err != nil {
return err return nil, err
} }
} }
copiedFiles = append(copiedFiles, destPath)
} }
return nil return copiedFiles, nil
} }
// CopySymlink copies the symlink at src to dest // CopySymlink copies the symlink at src to dest

View File

@ -20,6 +20,7 @@ import (
"archive/tar" "archive/tar"
"compress/bzip2" "compress/bzip2"
"compress/gzip" "compress/gzip"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -31,8 +32,32 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
// AddToTar adds the file i to tar w at path p // Tar knows how to write files to a tar file.
func AddToTar(p string, i os.FileInfo, hardlinks map[uint64]string, w *tar.Writer) error { type Tar struct {
hardlinks map[uint64]string
w *tar.Writer
}
// NewTar will create an instance of Tar that can write files to the writer at f.
func NewTar(f io.Writer) Tar {
w := tar.NewWriter(f)
return Tar{
w: w,
hardlinks: map[uint64]string{},
}
}
// Close will close any open streams used by Tar.
func (t *Tar) Close() {
t.w.Close()
}
// AddFileToTar adds the file at path p to the tar
func (t *Tar) AddFileToTar(p string) error {
i, err := os.Lstat(p)
if err != nil {
return fmt.Errorf("Failed to get file info for %s: %s", p, err)
}
linkDst := "" linkDst := ""
if i.Mode()&os.ModeSymlink != 0 { if i.Mode()&os.ModeSymlink != 0 {
var err error var err error
@ -51,13 +76,13 @@ func AddToTar(p string, i os.FileInfo, hardlinks map[uint64]string, w *tar.Write
} }
hdr.Name = p hdr.Name = p
hardlink, linkDst := checkHardlink(p, hardlinks, i) hardlink, linkDst := t.checkHardlink(p, i)
if hardlink { if hardlink {
hdr.Linkname = linkDst hdr.Linkname = linkDst
hdr.Typeflag = tar.TypeLink hdr.Typeflag = tar.TypeLink
hdr.Size = 0 hdr.Size = 0
} }
if err := w.WriteHeader(hdr); err != nil { if err := t.w.WriteHeader(hdr); err != nil {
return err return err
} }
if !(i.Mode().IsRegular()) || hardlink { if !(i.Mode().IsRegular()) || hardlink {
@ -68,13 +93,13 @@ func AddToTar(p string, i os.FileInfo, hardlinks map[uint64]string, w *tar.Write
return err return err
} }
defer r.Close() defer r.Close()
if _, err := io.Copy(w, r); err != nil { if _, err := io.Copy(t.w, r); err != nil {
return err return err
} }
return nil return nil
} }
func Whiteout(p string, w *tar.Writer) error { func (t *Tar) Whiteout(p string) error {
dir := filepath.Dir(p) dir := filepath.Dir(p)
name := ".wh." + filepath.Base(p) name := ".wh." + filepath.Base(p)
@ -82,7 +107,7 @@ func Whiteout(p string, w *tar.Writer) error {
Name: filepath.Join(dir, name), Name: filepath.Join(dir, name),
Size: 0, Size: 0,
} }
if err := w.WriteHeader(th); err != nil { if err := t.w.WriteHeader(th); err != nil {
return err return err
} }
@ -90,7 +115,7 @@ func Whiteout(p string, w *tar.Writer) error {
} }
// Returns true if path is hardlink, and the link destination // Returns true if path is hardlink, and the link destination
func checkHardlink(p string, hardlinks map[uint64]string, i os.FileInfo) (bool, string) { func (t *Tar) checkHardlink(p string, i os.FileInfo) (bool, string) {
hardlink := false hardlink := false
linkDst := "" linkDst := ""
if sys := i.Sys(); sys != nil { if sys := i.Sys(); sys != nil {
@ -98,12 +123,12 @@ func checkHardlink(p string, hardlinks map[uint64]string, i os.FileInfo) (bool,
nlinks := stat.Nlink nlinks := stat.Nlink
if nlinks > 1 { if nlinks > 1 {
inode := stat.Ino inode := stat.Ino
if original, exists := hardlinks[inode]; exists && original != p { if original, exists := t.hardlinks[inode]; exists && original != p {
hardlink = true hardlink = true
logrus.Debugf("%s inode exists in hardlinks map, linking to %s", p, original) logrus.Debugf("%s inode exists in hardlinks map, linking to %s", p, original)
linkDst = original linkDst = original
} else { } else {
hardlinks[inode] = p t.hardlinks[inode] = p
} }
} }
} }
@ -112,17 +137,17 @@ func checkHardlink(p string, hardlinks map[uint64]string, i os.FileInfo) (bool,
} }
// UnpackLocalTarArchive unpacks the tar archive at path to the directory dest // UnpackLocalTarArchive unpacks the tar archive at path to the directory dest
// Returns true if the path was actually unpacked // Returns the files extracted from the tar archive
func UnpackLocalTarArchive(path, dest string) error { func UnpackLocalTarArchive(path, dest string) ([]string, error) {
// First, we need to check if the path is a local tar archive // First, we need to check if the path is a local tar archive
if compressed, compressionLevel := fileIsCompressedTar(path); compressed { if compressed, compressionLevel := fileIsCompressedTar(path); compressed {
file, err := os.Open(path) file, err := os.Open(path)
if err != nil { if err != nil {
return err return nil, err
} }
defer file.Close() defer file.Close()
if compressionLevel == archive.Gzip { if compressionLevel == archive.Gzip {
return UnpackCompressedTar(path, dest) return nil, UnpackCompressedTar(path, dest)
} else if compressionLevel == archive.Bzip2 { } else if compressionLevel == archive.Bzip2 {
bzr := bzip2.NewReader(file) bzr := bzip2.NewReader(file)
return unTar(bzr, dest) return unTar(bzr, dest)
@ -131,12 +156,12 @@ func UnpackLocalTarArchive(path, dest string) error {
if fileIsUncompressedTar(path) { if fileIsUncompressedTar(path) {
file, err := os.Open(path) file, err := os.Open(path)
if err != nil { if err != nil {
return err return nil, err
} }
defer file.Close() defer file.Close()
return unTar(file, dest) return unTar(file, dest)
} }
return errors.New("path does not lead to local tar archive") return nil, errors.New("path does not lead to local tar archive")
} }
//IsFileLocalTarArchive returns true if the file is a local tar archive //IsFileLocalTarArchive returns true if the file is a local tar archive
@ -198,5 +223,6 @@ func UnpackCompressedTar(path, dir string) error {
return err return err
} }
defer gzr.Close() defer gzr.Close()
return unTar(gzr, dir) _, err = unTar(gzr, dir)
return err
} }

View File

@ -17,7 +17,6 @@ limitations under the License.
package util package util
import ( import (
"archive/tar"
"compress/gzip" "compress/gzip"
"io" "io"
"io/ioutil" "io/ioutil"
@ -92,16 +91,11 @@ func setUpFilesAndTars(testDir string) error {
} }
func createTar(testdir string, writer io.Writer) error { func createTar(testdir string, writer io.Writer) error {
t := NewTar(writer)
w := tar.NewWriter(writer) defer t.Close()
defer w.Close()
for _, regFile := range regularFiles { for _, regFile := range regularFiles {
filePath := filepath.Join(testdir, regFile) filePath := filepath.Join(testdir, regFile)
fi, err := os.Stat(filePath) if err := t.AddFileToTar(filePath); err != nil {
if err != nil {
return err
}
if err := AddToTar(filePath, fi, map[uint64]string{}, w); err != nil {
return err return err
} }
} }

View File

@ -19,12 +19,13 @@ package util
import ( import (
"crypto/md5" "crypto/md5"
"encoding/hex" "encoding/hex"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"io" "io"
"os" "os"
"strconv" "strconv"
"syscall" "syscall"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
) )
// SetLogLevel sets the logrus logging level // SetLogLevel sets the logrus logging level
@ -68,7 +69,8 @@ func Hasher() func(string) (string, error) {
return hasher return hasher
} }
// MtimeHasher returns a hash function, which only looks at mtime to determine if a file has changed // MtimeHasher returns a hash function, which only looks at mtime to determine if a file has changed.
// Note that the mtime can lag, so it's possible that a file will have changed but the mtime may look the same.
func MtimeHasher() func(string) (string, error) { func MtimeHasher() func(string) (string, error) {
hasher := func(p string) (string, error) { hasher := func(p string) (string, error) {
h := md5.New() h := md5.New()

View File

@ -40,6 +40,7 @@ func SetupFiles(path string, files map[string]string) error {
} }
func CheckErrorAndDeepEqual(t *testing.T, shouldErr bool, err error, expected, actual interface{}) { func CheckErrorAndDeepEqual(t *testing.T, shouldErr bool, err error, expected, actual interface{}) {
t.Helper()
if err := checkErr(shouldErr, err); err != nil { if err := checkErr(shouldErr, err); err != nil {
t.Error(err) t.Error(err)
return return