This commit is contained in:
Kartik Verma 2018-11-14 12:39:01 +05:30
commit b4be847fd2
No known key found for this signature in database
GPG Key ID: 6ED342ADBA03D74B
26 changed files with 366 additions and 184 deletions

View File

@ -1,3 +1,28 @@
# v0.6.0 Release - 11/06/2018
## New Features
* parse arg commands at the top of dockerfiles [#404](https://github.com/GoogleContainerTools/kaniko/pull/404)
* Add buffering for large layers. [#428](https://github.com/GoogleContainerTools/kaniko/pull/428)
* Separate Insecure Pull Options [#409](https://github.com/GoogleContainerTools/kaniko/pull/409)
* Add support for .dockerignore file [#394](https://github.com/GoogleContainerTools/kaniko/pull/394)
* Support insecure pull [#401](https://github.com/GoogleContainerTools/kaniko/pull/401)
## Updates
* Preserve options when doing a cache push [#423](https://github.com/GoogleContainerTools/kaniko/pull/423)
* More cache cleanups: [#397](https://github.com/GoogleContainerTools/kaniko/pull/397)
* adding documentation for base image caching [#421](https://github.com/GoogleContainerTools/kaniko/pull/421)
* Update go-containerregistry [#420](https://github.com/GoogleContainerTools/kaniko/pull/420)
* Update README [#419](https://github.com/GoogleContainerTools/kaniko/pull/419)
* Use remoteImage function when getting digest for cache [#413](https://github.com/GoogleContainerTools/kaniko/pull/413)
* adding exit 1 when there are not enough command line vars passed to `… [#415](https://github.com/GoogleContainerTools/kaniko/pull/415)
* "Container Builder" - > "Cloud Build" [#414](https://github.com/GoogleContainerTools/kaniko/pull/414)
* adding the cache warmer to the release process [#412](https://github.com/GoogleContainerTools/kaniko/pull/412)
## Bug Fixes
* Fix bugs with .dockerignore and improve integration test [#424](https://github.com/GoogleContainerTools/kaniko/pull/424)
* fix releasing the cache warmer [#418](https://github.com/GoogleContainerTools/kaniko/pull/418)
# v0.5.0 Release - 10/16/2018
## New Features

View File

@ -14,7 +14,7 @@
# Bump these on release
VERSION_MAJOR ?= 0
VERSION_MINOR ?= 3
VERSION_MINOR ?= 6
VERSION_BUILD ?= 0
VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)

View File

@ -33,6 +33,7 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
var (
@ -114,9 +115,9 @@ func addKanikoOptionsFlags(cmd *cobra.Command) {
// addHiddenFlags marks certain flags as hidden from the executor help text
func addHiddenFlags(cmd *cobra.Command) {
// This flag is added in a vendored directory, hide so that it doesn't come up via --help
RootCmd.PersistentFlags().MarkHidden("azure-container-registry-config")
pflag.CommandLine.MarkHidden("azure-container-registry-config")
// Hide this flag as we want to encourage people to use the --context flag instead
RootCmd.PersistentFlags().MarkHidden("bucket")
cmd.PersistentFlags().MarkHidden("bucket")
}
func checkContained() bool {

View File

@ -17,4 +17,7 @@
# if the cache is implemented correctly
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
WORKDIR /foo
RUN apt-get update && apt-get install -y make
COPY context/bar /context
RUN echo "hey" > foo

View File

@ -0,0 +1,17 @@
ARG REGISTRY=gcr.io
ARG REPO=google-appengine
ARG WORD=hello
ARG W0RD2=hey
FROM ${REGISTRY}/${REPO}/debian9 as stage1
# Should evaluate WORD and create /tmp/hello
ARG WORD
RUN touch /${WORD}
FROM ${REGISTRY}/${REPO}/debian9
COPY --from=stage1 /hello /tmp
# /tmp/hey should not get created without the ARG statement
RUN touch /tmp/${WORD2}

View File

@ -131,3 +131,7 @@ func (a *AddCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerfi
logrus.Infof("Using files from context: %v", files)
return files, nil
}
func (a *AddCommand) MetadataOnly() bool {
return false
}

View File

@ -42,7 +42,13 @@ func (r *ArgCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui
return err
}
resolvedValue = &value
} else {
meta := buildArgs.GetAllMeta()
if value, ok := meta[resolvedKey]; ok {
resolvedValue = &value
}
}
buildArgs.AddArg(resolvedKey, resolvedValue)
return nil
}

View File

@ -35,3 +35,15 @@ func (b *BaseCommand) FilesToSnapshot() []string {
func (b *BaseCommand) FilesUsedFromContext(_ *v1.Config, _ *dockerfile.BuildArgs) ([]string, error) {
return []string{}, nil
}
func (b *BaseCommand) MetadataOnly() bool {
return true
}
func (b *BaseCommand) RequiresUnpackedFS() bool {
return false
}
func (b *BaseCommand) ShouldCacheOutput() bool {
return false
}

View File

@ -36,11 +36,18 @@ type DockerCommand interface {
String() string
// A list of files to snapshot, empty for metadata commands or nil if we don't know
FilesToSnapshot() []string
// Return a cache-aware implementation of this command, if it exists.
CacheCommand(v1.Image) DockerCommand
// Return true if this command depends on the build context.
FilesUsedFromContext(*v1.Config, *dockerfile.BuildArgs) ([]string, error)
MetadataOnly() bool
RequiresUnpackedFS() bool
ShouldCacheOutput() bool
}
func GetCommand(cmd instructions.Command, buildcontext string) (DockerCommand, error) {

View File

@ -134,3 +134,7 @@ func (c *CopyCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerf
logrus.Infof("Using files from context: %v", files)
return files, nil
}
func (c *CopyCommand) MetadataOnly() bool {
return false
}

View File

@ -161,6 +161,18 @@ func (r *RunCommand) CacheCommand(img v1.Image) DockerCommand {
}
}
func (r *RunCommand) MetadataOnly() bool {
return false
}
func (r *RunCommand) RequiresUnpackedFS() bool {
return true
}
func (r *RunCommand) ShouldCacheOutput() bool {
return true
}
type CachingRunCommand struct {
BaseCommand
img v1.Image

View File

@ -67,3 +67,7 @@ func (w *WorkdirCommand) FilesToSnapshot() []string {
func (w *WorkdirCommand) String() string {
return w.cmd.String()
}
func (w *WorkdirCommand) MetadataOnly() bool {
return false
}

View File

@ -25,4 +25,5 @@ type KanikoStage struct {
Final bool
BaseImageStoredLocally bool
SaveStage bool
MetaArgs []instructions.ArgCommand
}

View File

@ -20,6 +20,7 @@ import (
"strings"
d "github.com/docker/docker/builder/dockerfile"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
)
type BuildArgs struct {
@ -53,3 +54,11 @@ func (b *BuildArgs) ReplacementEnvs(envs []string) []string {
filtered := b.FilterAllowed(envs)
return append(envs, filtered...)
}
// AddMetaArgs adds the supplied args map to b's allowedMetaArgs
func (b *BuildArgs) AddMetaArgs(metaArgs []instructions.ArgCommand) {
for _, arg := range metaArgs {
v := arg.Value
b.AddMetaArg(arg.Key, v)
}
}

View File

@ -38,7 +38,7 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) {
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("reading dockerfile at path %s", opts.DockerfilePath))
}
stages, err := Parse(d)
stages, metaArgs, err := Parse(d)
if err != nil {
return nil, errors.Wrap(err, "parsing dockerfile")
}
@ -60,11 +60,13 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) {
BaseImageStoredLocally: (baseImageIndex(index, stages) != -1),
SaveStage: saveStage(index, stages),
Final: index == targetStage,
MetaArgs: metaArgs,
})
if index == targetStage {
break
}
}
return kanikoStages, nil
}
@ -83,16 +85,16 @@ func baseImageIndex(currentStage int, stages []instructions.Stage) int {
}
// Parse parses the contents of a Dockerfile and returns a list of commands
func Parse(b []byte) ([]instructions.Stage, error) {
func Parse(b []byte) ([]instructions.Stage, []instructions.ArgCommand, error) {
p, err := parser.Parse(bytes.NewReader(b))
if err != nil {
return nil, err
return nil, nil, err
}
stages, _, err := instructions.Parse(p.AST)
stages, metaArgs, err := instructions.Parse(p.AST)
if err != nil {
return nil, err
return nil, nil, err
}
return stages, err
return stages, metaArgs, err
}
// targetStage returns the index of the target stage kaniko is trying to build

View File

@ -35,7 +35,7 @@ func Test_resolveStages(t *testing.T) {
FROM scratch
COPY --from=second /hi2 /hi3
`
stages, err := Parse([]byte(dockerfile))
stages, _, err := Parse([]byte(dockerfile))
if err != nil {
t.Fatal(err)
}
@ -63,7 +63,7 @@ func Test_targetStage(t *testing.T) {
FROM scratch
COPY --from=second /hi2 /hi3
`
stages, err := Parse([]byte(dockerfile))
stages, _, err := Parse([]byte(dockerfile))
if err != nil {
t.Fatal(err)
}
@ -142,7 +142,7 @@ func Test_SaveStage(t *testing.T) {
expected: false,
},
}
stages, err := Parse([]byte(testutil.Dockerfile))
stages, _, err := Parse([]byte(testutil.Dockerfile))
if err != nil {
t.Fatalf("couldn't retrieve stages from Dockerfile: %v", err)
}
@ -177,7 +177,7 @@ func Test_baseImageIndex(t *testing.T) {
},
}
stages, err := Parse([]byte(testutil.Dockerfile))
stages, _, err := Parse([]byte(testutil.Dockerfile))
if err != nil {
t.Fatalf("couldn't retrieve stages from Dockerfile: %v", err)
}

View File

@ -17,15 +17,14 @@ limitations under the License.
package executor
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"time"
"golang.org/x/sync/errgroup"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/mutate"
@ -42,6 +41,9 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/util"
)
// This is the size of an empty tar in Go
const emptyTarSize = 1024
// stageBuilder contains all fields necessary to build one stage of a Dockerfile
type stageBuilder struct {
stage config.KanikoStage
@ -54,7 +56,7 @@ type stageBuilder struct {
// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*stageBuilder, error) {
sourceImage, err := util.RetrieveSourceImage(stage, opts.BuildArgs, opts)
sourceImage, err := util.RetrieveSourceImage(stage, opts)
if err != nil {
return nil, err
}
@ -86,42 +88,40 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*sta
}, nil
}
func (s *stageBuilder) build() error {
// Unpack file system to root
if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
return err
}
// Take initial snapshot
if err := s.snapshotter.Init(); err != nil {
return err
}
// Set the initial cache key to be the base image digest, the build args and the SrcContext.
compositeKey := NewCompositeCache(s.baseImageDigest)
compositeKey.AddKey(s.opts.BuildArgs...)
cmds := []commands.DockerCommand{}
for _, cmd := range s.stage.Commands {
command, err := commands.GetCommand(cmd, s.opts.SrcContext)
if err != nil {
return err
}
cmds = append(cmds, command)
func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config, cmds []commands.DockerCommand, args *dockerfile.BuildArgs) error {
if !s.opts.Cache {
return nil
}
layerCache := &cache.RegistryCache{
Opts: s.opts,
}
if s.opts.Cache {
// Possibly replace commands with their cached implementations.
for i, command := range cmds {
if command == nil {
continue
}
ck, err := compositeKey.Hash()
if err != nil {
// Possibly replace commands with their cached implementations.
// We walk through all the commands, running any commands that only operate on metadata.
// We throw the metadata away after, but we need it to properly track command dependencies
// for things like COPY ${FOO} or RUN commands that use environment variables.
for i, command := range cmds {
if command == nil {
continue
}
compositeKey.AddKey(command.String())
// If the command uses files from the context, add them.
files, err := command.FilesUsedFromContext(&cfg, args)
if err != nil {
return err
}
for _, f := range files {
if err := compositeKey.AddPath(f); err != nil {
return err
}
}
ck, err := compositeKey.Hash()
if err != nil {
return err
}
if command.ShouldCacheOutput() {
img, err := layerCache.RetrieveLayer(ck)
if err != nil {
logrus.Infof("No cached layer found for cmd %s", command.String())
@ -133,9 +133,65 @@ func (s *stageBuilder) build() error {
cmds[i] = cacheCmd
}
}
// Mutate the config for any commands that require it.
if command.MetadataOnly() {
if err := command.ExecuteCommand(&cfg, args); err != nil {
return err
}
}
}
return nil
}
func (s *stageBuilder) build() error {
// Set the initial cache key to be the base image digest, the build args and the SrcContext.
compositeKey := NewCompositeCache(s.baseImageDigest)
compositeKey.AddKey(s.opts.BuildArgs...)
cmds := []commands.DockerCommand{}
for _, cmd := range s.stage.Commands {
command, err := commands.GetCommand(cmd, s.opts.SrcContext)
if err != nil {
return err
}
if command == nil {
continue
}
cmds = append(cmds, command)
}
args := dockerfile.NewBuildArgs(s.opts.BuildArgs)
args.AddMetaArgs(s.stage.MetaArgs)
// Apply optimizations to the instructions.
if err := s.optimize(*compositeKey, s.cf.Config, cmds, args); err != nil {
return err
}
// Unpack file system to root if we need to.
shouldUnpack := false
for _, cmd := range cmds {
if cmd.RequiresUnpackedFS() {
logrus.Infof("Unpacking rootfs as cmd %s requires it.", cmd.String())
shouldUnpack = true
break
}
}
if shouldUnpack {
if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
return err
}
}
if err := util.DetectFilesystemWhitelist(constants.WhitelistPath); err != nil {
return err
}
// Take initial snapshot
if err := s.snapshotter.Init(); err != nil {
return err
}
cacheGroup := errgroup.Group{}
for index, command := range cmds {
if command == nil {
continue
@ -160,36 +216,48 @@ func (s *stageBuilder) build() error {
return err
}
files = command.FilesToSnapshot()
var contents []byte
if !s.shouldTakeSnapshot(index, files) {
continue
}
if files == nil || s.opts.SingleSnapshot {
contents, err = s.snapshotter.TakeSnapshotFS()
} else {
// Volumes are very weird. They get created in their command, but snapshotted in the next one.
// Add them to the list of files to snapshot.
for v := range s.cf.Config.Volumes {
files = append(files, v)
}
contents, err = s.snapshotter.TakeSnapshot(files)
}
tarPath, err := s.takeSnapshot(files)
if err != nil {
return err
}
ck, err := compositeKey.Hash()
if err != nil {
return err
}
if err := s.saveSnapshot(command.String(), ck, contents); err != nil {
// Push layer to cache (in parallel) now along with new config file
if s.opts.Cache && command.ShouldCacheOutput() {
cacheGroup.Go(func() error {
return pushLayerToCache(s.opts, ck, tarPath, command.String())
})
}
if err := s.saveSnapshotToImage(command.String(), tarPath); err != nil {
return err
}
}
if err := cacheGroup.Wait(); err != nil {
logrus.Warnf("error uploading layer to cache: %s", err)
}
return nil
}
func (s *stageBuilder) takeSnapshot(files []string) (string, error) {
if files == nil || s.opts.SingleSnapshot {
return s.snapshotter.TakeSnapshotFS()
}
// Volumes are very weird. They get created in their command, but snapshotted in the next one.
// Add them to the list of files to snapshot.
for v := range s.cf.Config.Volumes {
files = append(files, v)
}
return s.snapshotter.TakeSnapshot(files)
}
func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool {
isLastCommand := index == len(s.stage.Commands)-1
@ -215,24 +283,22 @@ func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool {
return true
}
func (s *stageBuilder) saveSnapshot(createdBy string, ck string, contents []byte) error {
if contents == nil {
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
func (s *stageBuilder) saveSnapshotToImage(createdBy string, tarPath string) error {
if tarPath == "" {
return nil
}
// Append the layer to the image
opener := func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(contents)), nil
}
layer, err := tarball.LayerFromOpener(opener)
fi, err := os.Stat(tarPath)
if err != nil {
return err
}
// Push layer to cache now along with new config file
if s.opts.Cache {
if err := pushLayerToCache(s.opts, ck, layer, createdBy); err != nil {
return err
}
if fi.Size() <= emptyTarSize {
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
return nil
}
layer, err := tarball.LayerFromFile(tarPath)
if err != nil {
return err
}
s.image, err = mutate.Append(s.image,
mutate.Addendum{
@ -306,7 +372,7 @@ func extractImageToDependecyDir(index int, image v1.Image) error {
if err := os.MkdirAll(dependencyDir, 0755); err != nil {
return err
}
logrus.Infof("trying to extract to %s", dependencyDir)
logrus.Debugf("trying to extract to %s", dependencyDir)
_, err := util.GetFSFromImage(dependencyDir, image)
return err
}

View File

@ -66,7 +66,7 @@ func Test_reviewConfig(t *testing.T) {
}
func stage(t *testing.T, d string) config.KanikoStage {
stages, err := dockerfile.Parse([]byte(d))
stages, _, err := dockerfile.Parse([]byte(d))
if err != nil {
t.Fatalf("error parsing dockerfile: %v", err)
}

View File

@ -107,7 +107,11 @@ func DoPush(image v1.Image, opts *config.KanikoOptions) error {
// pushLayerToCache pushes layer (tagged with cacheKey) to opts.Cache
// if opts.Cache doesn't exist, infer the cache from the given destination
func pushLayerToCache(opts *config.KanikoOptions, cacheKey string, layer v1.Layer, createdBy string) error {
func pushLayerToCache(opts *config.KanikoOptions, cacheKey string, tarPath string, createdBy string) error {
layer, err := tarball.LayerFromFile(tarPath)
if err != nil {
return err
}
cache, err := cache.Destination(opts, cacheKey)
if err != nil {
return errors.Wrap(err, "getting cache destination")
@ -126,7 +130,7 @@ func pushLayerToCache(opts *config.KanikoOptions, cacheKey string, layer v1.Laye
if err != nil {
return errors.Wrap(err, "appending layer onto empty image")
}
return DoPush(empty, &config.KanikoOptions{
Destinations: []string{cache},
})
cacheOpts := *opts
cacheOpts.Destinations = []string{cache}
return DoPush(empty, &cacheOpts)
}

View File

@ -92,13 +92,13 @@ func (l *LayeredMap) GetWhiteout(s string) (string, bool) {
return "", false
}
func (l *LayeredMap) MaybeAddWhiteout(s string) (bool, error) {
func (l *LayeredMap) MaybeAddWhiteout(s string) bool {
whiteout, ok := l.GetWhiteout(s)
if ok && whiteout == s {
return false, nil
return false
}
l.whiteouts[len(l.whiteouts)-1][s] = s
return true, nil
return true
}
// Add will add the specified file s to the layered map.

View File

@ -17,18 +17,21 @@ limitations under the License.
package snapshot
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"syscall"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/sirupsen/logrus"
)
// For testing
var snapshotPathPrefix = constants.KanikoDir
// Snapshotter holds the root directory from which to take snapshots, and a list of snapshots taken
type Snapshotter struct {
l *LayeredMap
@ -42,10 +45,8 @@ func NewSnapshotter(l *LayeredMap, d string) *Snapshotter {
// Init initializes a new snapshotter
func (s *Snapshotter) Init() error {
if _, err := s.snapShotFS(ioutil.Discard); err != nil {
return err
}
return nil
_, err := s.TakeSnapshotFS()
return err
}
// Key returns a string based on the current state of the file system
@ -55,46 +56,21 @@ func (s *Snapshotter) Key() (string, error) {
// TakeSnapshot takes a snapshot of the specified files, avoiding directories in the whitelist, and creates
// a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed
func (s *Snapshotter) TakeSnapshot(files []string) ([]byte, error) {
buf := bytes.NewBuffer([]byte{})
filesAdded, err := s.snapshotFiles(buf, files)
func (s *Snapshotter) TakeSnapshot(files []string) (string, error) {
f, err := ioutil.TempFile(snapshotPathPrefix, "")
if err != nil {
return nil, err
return "", err
}
contents := buf.Bytes()
if !filesAdded {
return nil, nil
}
return contents, err
}
defer f.Close()
// TakeSnapshotFS takes a snapshot of the filesystem, avoiding directories in the whitelist, and creates
// a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed
func (s *Snapshotter) TakeSnapshotFS() ([]byte, error) {
buf := bytes.NewBuffer([]byte{})
filesAdded, err := s.snapShotFS(buf)
if err != nil {
return nil, err
}
contents := buf.Bytes()
if !filesAdded {
return nil, nil
}
return contents, err
}
// snapshotFiles creates a snapshot (tar) and adds the specified files.
// It will not add files which are whitelisted.
func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) {
s.l.Snapshot()
if len(files) == 0 {
logrus.Info("No files changed in this command, skipping snapshotting.")
return false, nil
return "", nil
}
logrus.Info("Taking snapshot of files...")
logrus.Debugf("Taking snapshot of files %v", files)
snapshottedFiles := make(map[string]bool)
filesAdded := false
t := util.NewTar(f)
defer t.Close()
@ -114,15 +90,14 @@ func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) {
fileAdded, err := s.l.MaybeAdd(file)
if err != nil {
return false, fmt.Errorf("Unable to add parent dir %s to layered map: %s", file, err)
return "", fmt.Errorf("Unable to add parent dir %s to layered map: %s", file, err)
}
if fileAdded {
err = t.AddFileToTar(file)
if err != nil {
return false, fmt.Errorf("Error adding parent dir %s to tar: %s", file, err)
return "", fmt.Errorf("Error adding parent dir %s to tar: %s", file, err)
}
filesAdded = true
}
}
// Next add the files themselves to the tar
@ -134,21 +109,26 @@ func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) {
snapshottedFiles[file] = true
if err := s.l.Add(file); err != nil {
return false, fmt.Errorf("Unable to add file %s to layered map: %s", file, err)
return "", fmt.Errorf("Unable to add file %s to layered map: %s", file, err)
}
if err := t.AddFileToTar(file); err != nil {
return false, fmt.Errorf("Error adding file %s to tar: %s", file, err)
return "", fmt.Errorf("Error adding file %s to tar: %s", file, err)
}
filesAdded = true
}
return filesAdded, nil
return f.Name(), nil
}
// shapShotFS creates a snapshot (tar) of all files in the system which are not
// whitelisted and which have changed.
func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) {
// TakeSnapshotFS takes a snapshot of the filesystem, avoiding directories in the whitelist, and creates
// a tarball of the changed files.
func (s *Snapshotter) TakeSnapshotFS() (string, error) {
logrus.Info("Taking snapshot of full filesystem...")
f, err := ioutil.TempFile(snapshotPathPrefix, "")
if err != nil {
return "", err
}
defer f.Close()
// Some of the operations that follow (e.g. hashing) depend on the file system being synced,
// for example the hashing function that determines if files are equal uses the mtime of the files,
// which can lag if sync is not called. Unfortunately there can still be lag if too much data needs
@ -157,7 +137,6 @@ func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) {
s.l.Snapshot()
existingPaths := s.l.GetFlattenedPathsForWhiteOut()
filesAdded := false
t := util.NewTar(f)
defer t.Close()
@ -176,15 +155,10 @@ func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) {
// Only add the whiteout if the directory for the file still exists.
dir := filepath.Dir(path)
if _, ok := memFs[dir]; ok {
addWhiteout, err := s.l.MaybeAddWhiteout(path)
if err != nil {
return false, nil
}
if addWhiteout {
if s.l.MaybeAddWhiteout(path) {
logrus.Infof("Adding whiteout for %s", path)
filesAdded = true
if err := t.Whiteout(path); err != nil {
return false, err
return "", err
}
}
}
@ -194,7 +168,7 @@ func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) {
for path := range memFs {
whitelisted, err := util.CheckWhitelist(path)
if err != nil {
return false, err
return "", err
}
if whitelisted {
logrus.Debugf("Not adding %s to layer, as it's whitelisted", path)
@ -204,16 +178,15 @@ func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) {
// Only add to the tar if we add it to the layeredmap.
maybeAdd, err := s.l.MaybeAdd(path)
if err != nil {
return false, err
return "", err
}
if maybeAdd {
logrus.Debugf("Adding %s to layer, because it was changed.", path)
filesAdded = true
if err := t.AddFileToTar(path); err != nil {
return false, err
return "", err
}
}
}
return filesAdded, nil
return f.Name(), nil
}

View File

@ -17,7 +17,6 @@ package snapshot
import (
"archive/tar"
"bytes"
"io"
"io/ioutil"
"os"
@ -30,9 +29,8 @@ import (
)
func TestSnapshotFSFileChange(t *testing.T) {
testDir, snapshotter, err := setUpTestDir()
defer os.RemoveAll(testDir)
testDir, snapshotter, cleanup, err := setUpTestDir()
defer cleanup()
if err != nil {
t.Fatal(err)
}
@ -45,16 +43,17 @@ func TestSnapshotFSFileChange(t *testing.T) {
t.Fatalf("Error setting up fs: %s", err)
}
// Take another snapshot
contents, err := snapshotter.TakeSnapshotFS()
tarPath, err := snapshotter.TakeSnapshotFS()
if err != nil {
t.Fatalf("Error taking snapshot of fs: %s", err)
}
if contents == nil {
t.Fatal("No files added to snapshot.")
f, err := os.Open(tarPath)
if err != nil {
t.Fatal(err)
}
// Check contents of the snapshot, make sure contents is equivalent to snapshotFiles
reader := bytes.NewReader(contents)
tr := tar.NewReader(reader)
tr := tar.NewReader(f)
fooPath := filepath.Join(testDir, "foo")
batPath := filepath.Join(testDir, "bar/bat")
snapshotFiles := map[string]string{
@ -82,8 +81,8 @@ func TestSnapshotFSFileChange(t *testing.T) {
}
func TestSnapshotFSChangePermissions(t *testing.T) {
testDir, snapshotter, err := setUpTestDir()
defer os.RemoveAll(testDir)
testDir, snapshotter, cleanup, err := setUpTestDir()
defer cleanup()
if err != nil {
t.Fatal(err)
}
@ -93,16 +92,16 @@ func TestSnapshotFSChangePermissions(t *testing.T) {
t.Fatalf("Error changing permissions on %s: %v", batPath, err)
}
// Take another snapshot
contents, err := snapshotter.TakeSnapshotFS()
tarPath, err := snapshotter.TakeSnapshotFS()
if err != nil {
t.Fatalf("Error taking snapshot of fs: %s", err)
}
if contents == nil {
t.Fatal("No files added to snapshot.")
f, err := os.Open(tarPath)
if err != nil {
t.Fatal(err)
}
// Check contents of the snapshot, make sure contents is equivalent to snapshotFiles
reader := bytes.NewReader(contents)
tr := tar.NewReader(reader)
tr := tar.NewReader(f)
snapshotFiles := map[string]string{
batPath: "baz2",
}
@ -127,8 +126,8 @@ func TestSnapshotFSChangePermissions(t *testing.T) {
}
func TestSnapshotFiles(t *testing.T) {
testDir, snapshotter, err := setUpTestDir()
defer os.RemoveAll(testDir)
testDir, snapshotter, cleanup, err := setUpTestDir()
defer cleanup()
if err != nil {
t.Fatal(err)
}
@ -142,15 +141,20 @@ func TestSnapshotFiles(t *testing.T) {
filesToSnapshot := []string{
filepath.Join(testDir, "foo"),
}
contents, err := snapshotter.TakeSnapshot(filesToSnapshot)
tarPath, err := snapshotter.TakeSnapshot(filesToSnapshot)
if err != nil {
t.Fatal(err)
}
defer os.Remove(tarPath)
expectedFiles := []string{"/", "/tmp", filepath.Join(testDir, "foo")}
f, err := os.Open(tarPath)
if err != nil {
t.Fatal(err)
}
// Check contents of the snapshot, make sure contents is equivalent to snapshotFiles
reader := bytes.NewReader(contents)
tr := tar.NewReader(reader)
tr := tar.NewReader(f)
var actualFiles []string
for {
hdr, err := tr.Next()
@ -166,27 +170,42 @@ func TestSnapshotFiles(t *testing.T) {
}
func TestEmptySnapshotFS(t *testing.T) {
testDir, snapshotter, err := setUpTestDir()
defer os.RemoveAll(testDir)
_, snapshotter, cleanup, err := setUpTestDir()
if err != nil {
t.Fatal(err)
}
defer cleanup()
// Take snapshot with no changes
contents, err := snapshotter.TakeSnapshotFS()
tarPath, err := snapshotter.TakeSnapshotFS()
if err != nil {
t.Fatalf("Error taking snapshot of fs: %s", err)
}
// Since we took a snapshot with no changes, contents should be nil
if contents != nil {
t.Fatal("Files added even though no changes to file system were made.")
f, err := os.Open(tarPath)
if err != nil {
t.Fatal(err)
}
tr := tar.NewReader(f)
if _, err := tr.Next(); err != io.EOF {
t.Fatal("no files expected in tar, found files.")
}
}
func setUpTestDir() (string, *Snapshotter, error) {
func setUpTestDir() (string, *Snapshotter, func(), error) {
testDir, err := ioutil.TempDir("", "")
if err != nil {
return testDir, nil, errors.Wrap(err, "setting up temp dir")
return "", nil, nil, errors.Wrap(err, "setting up temp dir")
}
snapshotPath, err := ioutil.TempDir("", "")
if err != nil {
return "", nil, nil, errors.Wrap(err, "setting up temp dir")
}
snapshotPathPrefix = snapshotPath
files := map[string]string{
"foo": "baz1",
"bar/bat": "baz2",
@ -194,14 +213,20 @@ func setUpTestDir() (string, *Snapshotter, error) {
}
// Set up initial files
if err := testutil.SetupFiles(testDir, files); err != nil {
return testDir, nil, errors.Wrap(err, "setting up file system")
return "", nil, nil, errors.Wrap(err, "setting up file system")
}
// Take the initial snapshot
l := NewLayeredMap(util.Hasher(), util.CacheHasher())
snapshotter := NewSnapshotter(l, testDir)
if err := snapshotter.Init(); err != nil {
return testDir, nil, errors.Wrap(err, "initializing snapshotter")
return "", nil, nil, errors.Wrap(err, "initializing snapshotter")
}
return testDir, snapshotter, nil
cleanup := func() {
os.RemoveAll(snapshotPath)
os.RemoveAll(testDir)
}
return testDir, snapshotter, cleanup, nil
}

View File

@ -62,8 +62,7 @@ var whitelist = []WhitelistEntry{
// GetFSFromImage extracts the layers of img to root
// It returns a list of all files extracted
func GetFSFromImage(root string, img v1.Image) ([]string, error) {
whitelist, err := fileSystemWhitelist(constants.WhitelistPath)
if err != nil {
if err := DetectFilesystemWhitelist(constants.WhitelistPath); err != nil {
return nil, err
}
logrus.Debugf("Mounted directories: %v", whitelist)
@ -74,7 +73,7 @@ func GetFSFromImage(root string, img v1.Image) ([]string, error) {
extractedFiles := []string{}
for i, l := range layers {
logrus.Infof("Extracting layer %d", i)
logrus.Debugf("Extracting layer %d", i)
r, err := l.Uncompressed()
if err != nil {
return nil, err
@ -303,10 +302,10 @@ func checkWhitelistRoot(root string) bool {
// (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
// Where (5) is the mount point relative to the process's root
// From: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func fileSystemWhitelist(path string) ([]WhitelistEntry, error) {
func DetectFilesystemWhitelist(path string) error {
f, err := os.Open(path)
if err != nil {
return nil, err
return err
}
defer f.Close()
reader := bufio.NewReader(f)
@ -314,7 +313,7 @@ func fileSystemWhitelist(path string) ([]WhitelistEntry, error) {
line, err := reader.ReadString('\n')
logrus.Debugf("Read the following line from %s: %s", path, line)
if err != nil && err != io.EOF {
return nil, err
return err
}
lineArr := strings.Split(line, " ")
if len(lineArr) < 5 {
@ -336,7 +335,7 @@ func fileSystemWhitelist(path string) ([]WhitelistEntry, error) {
break
}
}
return whitelist, nil
return nil
}
// RelativeFiles returns a list of all files at the filepath relative to root

View File

@ -29,7 +29,7 @@ import (
"github.com/GoogleContainerTools/kaniko/testutil"
)
func Test_fileSystemWhitelist(t *testing.T) {
func Test_DetectFilesystemWhitelist(t *testing.T) {
testDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Error creating tempdir: %s", err)
@ -49,7 +49,7 @@ func Test_fileSystemWhitelist(t *testing.T) {
t.Fatalf("Error writing file contents to %s: %s", path, err)
}
actualWhitelist, err := fileSystemWhitelist(path)
err = DetectFilesystemWhitelist(path)
expectedWhitelist := []WhitelistEntry{
{"/kaniko", false},
{"/proc", false},
@ -59,6 +59,7 @@ func Test_fileSystemWhitelist(t *testing.T) {
{"/var/run", false},
{"/etc/mtab", false},
}
actualWhitelist := whitelist
sort.Slice(actualWhitelist, func(i, j int) bool {
return actualWhitelist[i].Path < actualWhitelist[j].Path
})

View File

@ -18,6 +18,7 @@ package util
import (
"crypto/tls"
"fmt"
"net/http"
"path/filepath"
"strconv"
@ -44,7 +45,13 @@ var (
)
// RetrieveSourceImage returns the base image of the stage at index
func RetrieveSourceImage(stage config.KanikoStage, buildArgs []string, opts *config.KanikoOptions) (v1.Image, error) {
func RetrieveSourceImage(stage config.KanikoStage, opts *config.KanikoOptions) (v1.Image, error) {
buildArgs := opts.BuildArgs
var metaArgsString []string
for _, arg := range stage.MetaArgs {
metaArgsString = append(metaArgsString, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString()))
}
buildArgs = append(buildArgs, metaArgsString...)
currentBaseName, err := ResolveEnvironmentReplacement(stage.BaseName, buildArgs, false)
if err != nil {
return nil, err

View File

@ -57,7 +57,7 @@ func Test_StandardImage(t *testing.T) {
retrieveRemoteImage = mock
actual, err := RetrieveSourceImage(config.KanikoStage{
Stage: stages[0],
}, nil, &config.KanikoOptions{})
}, &config.KanikoOptions{})
testutil.CheckErrorAndDeepEqual(t, false, err, nil, actual)
}
func Test_ScratchImage(t *testing.T) {
@ -67,7 +67,7 @@ func Test_ScratchImage(t *testing.T) {
}
actual, err := RetrieveSourceImage(config.KanikoStage{
Stage: stages[1],
}, nil, &config.KanikoOptions{})
}, &config.KanikoOptions{})
expected := empty.Image
testutil.CheckErrorAndDeepEqual(t, false, err, expected, actual)
}
@ -89,7 +89,7 @@ func Test_TarImage(t *testing.T) {
BaseImageStoredLocally: true,
BaseImageIndex: 0,
Stage: stages[2],
}, nil, &config.KanikoOptions{})
}, &config.KanikoOptions{})
testutil.CheckErrorAndDeepEqual(t, false, err, nil, actual)
}