Rebasing
This commit is contained in:
		
						commit
						b4be847fd2
					
				
							
								
								
									
										25
									
								
								CHANGELOG.md
								
								
								
								
							
							
						
						
									
										25
									
								
								CHANGELOG.md
								
								
								
								
							|  | @ -1,3 +1,28 @@ | ||||||
|  | # v0.6.0 Release - 11/06/2018 | ||||||
|  | 
 | ||||||
|  | ## New Features | ||||||
|  | * parse arg commands at the top of dockerfiles [#404](https://github.com/GoogleContainerTools/kaniko/pull/404) | ||||||
|  | * Add buffering for large layers. [#428](https://github.com/GoogleContainerTools/kaniko/pull/428) | ||||||
|  | * Separate Insecure Pull Options [#409](https://github.com/GoogleContainerTools/kaniko/pull/409) | ||||||
|  | * Add support for .dockerignore file [#394](https://github.com/GoogleContainerTools/kaniko/pull/394) | ||||||
|  | * Support insecure pull [#401](https://github.com/GoogleContainerTools/kaniko/pull/401) | ||||||
|  | 
 | ||||||
|  | ## Updates | ||||||
|  | * Preserve options when doing a cache push [#423](https://github.com/GoogleContainerTools/kaniko/pull/423) | ||||||
|  | * More cache cleanups: [#397](https://github.com/GoogleContainerTools/kaniko/pull/397) | ||||||
|  | *  adding documentation for base image caching [#421](https://github.com/GoogleContainerTools/kaniko/pull/421) | ||||||
|  | * Update go-containerregistry [#420](https://github.com/GoogleContainerTools/kaniko/pull/420) | ||||||
|  | * Update README [#419](https://github.com/GoogleContainerTools/kaniko/pull/419) | ||||||
|  | * Use remoteImage function when getting digest for cache [#413](https://github.com/GoogleContainerTools/kaniko/pull/413) | ||||||
|  | * adding exit 1 when there are not enough command line vars passed to `… [#415](https://github.com/GoogleContainerTools/kaniko/pull/415) | ||||||
|  | * "Container Builder" - > "Cloud Build" [#414](https://github.com/GoogleContainerTools/kaniko/pull/414) | ||||||
|  | * adding the cache warmer to the release process [#412](https://github.com/GoogleContainerTools/kaniko/pull/412) | ||||||
|  | 
 | ||||||
|  | ## Bug Fixes | ||||||
|  | * Fix bugs with .dockerignore and improve integration test [#424](https://github.com/GoogleContainerTools/kaniko/pull/424) | ||||||
|  | * fix releasing the cache warmer [#418](https://github.com/GoogleContainerTools/kaniko/pull/418) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| # v0.5.0 Release - 10/16/2018 | # v0.5.0 Release - 10/16/2018 | ||||||
| 
 | 
 | ||||||
| ## New Features | ## New Features | ||||||
|  |  | ||||||
							
								
								
									
										2
									
								
								Makefile
								
								
								
								
							
							
						
						
									
										2
									
								
								Makefile
								
								
								
								
							|  | @ -14,7 +14,7 @@ | ||||||
| 
 | 
 | ||||||
| # Bump these on release
 | # Bump these on release
 | ||||||
| VERSION_MAJOR ?= 0 | VERSION_MAJOR ?= 0 | ||||||
| VERSION_MINOR ?= 3 | VERSION_MINOR ?= 6 | ||||||
| VERSION_BUILD ?= 0 | VERSION_BUILD ?= 0 | ||||||
| 
 | 
 | ||||||
| VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) | VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) | ||||||
|  |  | ||||||
|  | @ -33,6 +33,7 @@ import ( | ||||||
| 	"github.com/pkg/errors" | 	"github.com/pkg/errors" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| 	"github.com/spf13/cobra" | 	"github.com/spf13/cobra" | ||||||
|  | 	"github.com/spf13/pflag" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| var ( | var ( | ||||||
|  | @ -114,9 +115,9 @@ func addKanikoOptionsFlags(cmd *cobra.Command) { | ||||||
| // addHiddenFlags marks certain flags as hidden from the executor help text
 | // addHiddenFlags marks certain flags as hidden from the executor help text
 | ||||||
| func addHiddenFlags(cmd *cobra.Command) { | func addHiddenFlags(cmd *cobra.Command) { | ||||||
| 	// This flag is added in a vendored directory, hide so that it doesn't come up via --help
 | 	// This flag is added in a vendored directory, hide so that it doesn't come up via --help
 | ||||||
| 	RootCmd.PersistentFlags().MarkHidden("azure-container-registry-config") | 	pflag.CommandLine.MarkHidden("azure-container-registry-config") | ||||||
| 	// Hide this flag as we want to encourage people to use the --context flag instead
 | 	// Hide this flag as we want to encourage people to use the --context flag instead
 | ||||||
| 	RootCmd.PersistentFlags().MarkHidden("bucket") | 	cmd.PersistentFlags().MarkHidden("bucket") | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func checkContained() bool { | func checkContained() bool { | ||||||
|  |  | ||||||
|  | @ -17,4 +17,7 @@ | ||||||
| # if the cache is implemented correctly | # if the cache is implemented correctly | ||||||
| 
 | 
 | ||||||
| FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0 | FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0 | ||||||
|  | WORKDIR /foo | ||||||
| RUN apt-get update && apt-get install -y make | RUN apt-get update && apt-get install -y make | ||||||
|  | COPY context/bar /context | ||||||
|  | RUN echo "hey" > foo | ||||||
|  |  | ||||||
|  | @ -0,0 +1,17 @@ | ||||||
|  | ARG REGISTRY=gcr.io | ||||||
|  | ARG REPO=google-appengine | ||||||
|  | ARG WORD=hello | ||||||
|  | ARG W0RD2=hey | ||||||
|  | 
 | ||||||
|  | FROM ${REGISTRY}/${REPO}/debian9 as stage1 | ||||||
|  | 
 | ||||||
|  | # Should evaluate WORD and create /tmp/hello | ||||||
|  | ARG WORD | ||||||
|  | RUN touch /${WORD} | ||||||
|  | 
 | ||||||
|  | FROM ${REGISTRY}/${REPO}/debian9 | ||||||
|  | 
 | ||||||
|  | COPY --from=stage1 /hello /tmp | ||||||
|  | 
 | ||||||
|  | # /tmp/hey should not get created without the ARG statement | ||||||
|  | RUN touch /tmp/${WORD2} | ||||||
|  | @ -131,3 +131,7 @@ func (a *AddCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerfi | ||||||
| 	logrus.Infof("Using files from context: %v", files) | 	logrus.Infof("Using files from context: %v", files) | ||||||
| 	return files, nil | 	return files, nil | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | func (a *AddCommand) MetadataOnly() bool { | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -42,7 +42,13 @@ func (r *ArgCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui | ||||||
| 			return err | 			return err | ||||||
| 		} | 		} | ||||||
| 		resolvedValue = &value | 		resolvedValue = &value | ||||||
|  | 	} else { | ||||||
|  | 		meta := buildArgs.GetAllMeta() | ||||||
|  | 		if value, ok := meta[resolvedKey]; ok { | ||||||
|  | 			resolvedValue = &value | ||||||
|  | 		} | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
| 	buildArgs.AddArg(resolvedKey, resolvedValue) | 	buildArgs.AddArg(resolvedKey, resolvedValue) | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -35,3 +35,15 @@ func (b *BaseCommand) FilesToSnapshot() []string { | ||||||
| func (b *BaseCommand) FilesUsedFromContext(_ *v1.Config, _ *dockerfile.BuildArgs) ([]string, error) { | func (b *BaseCommand) FilesUsedFromContext(_ *v1.Config, _ *dockerfile.BuildArgs) ([]string, error) { | ||||||
| 	return []string{}, nil | 	return []string{}, nil | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | func (b *BaseCommand) MetadataOnly() bool { | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *BaseCommand) RequiresUnpackedFS() bool { | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (b *BaseCommand) ShouldCacheOutput() bool { | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -36,11 +36,18 @@ type DockerCommand interface { | ||||||
| 	String() string | 	String() string | ||||||
| 	// A list of files to snapshot, empty for metadata commands or nil if we don't know
 | 	// A list of files to snapshot, empty for metadata commands or nil if we don't know
 | ||||||
| 	FilesToSnapshot() []string | 	FilesToSnapshot() []string | ||||||
|  | 
 | ||||||
| 	// Return a cache-aware implementation of this command, if it exists.
 | 	// Return a cache-aware implementation of this command, if it exists.
 | ||||||
| 	CacheCommand(v1.Image) DockerCommand | 	CacheCommand(v1.Image) DockerCommand | ||||||
| 
 | 
 | ||||||
| 	// Return true if this command depends on the build context.
 | 	// Return true if this command depends on the build context.
 | ||||||
| 	FilesUsedFromContext(*v1.Config, *dockerfile.BuildArgs) ([]string, error) | 	FilesUsedFromContext(*v1.Config, *dockerfile.BuildArgs) ([]string, error) | ||||||
|  | 
 | ||||||
|  | 	MetadataOnly() bool | ||||||
|  | 
 | ||||||
|  | 	RequiresUnpackedFS() bool | ||||||
|  | 
 | ||||||
|  | 	ShouldCacheOutput() bool | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func GetCommand(cmd instructions.Command, buildcontext string) (DockerCommand, error) { | func GetCommand(cmd instructions.Command, buildcontext string) (DockerCommand, error) { | ||||||
|  |  | ||||||
|  | @ -134,3 +134,7 @@ func (c *CopyCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerf | ||||||
| 	logrus.Infof("Using files from context: %v", files) | 	logrus.Infof("Using files from context: %v", files) | ||||||
| 	return files, nil | 	return files, nil | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | func (c *CopyCommand) MetadataOnly() bool { | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -161,6 +161,18 @@ func (r *RunCommand) CacheCommand(img v1.Image) DockerCommand { | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func (r *RunCommand) MetadataOnly() bool { | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (r *RunCommand) RequiresUnpackedFS() bool { | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (r *RunCommand) ShouldCacheOutput() bool { | ||||||
|  | 	return true | ||||||
|  | } | ||||||
|  | 
 | ||||||
| type CachingRunCommand struct { | type CachingRunCommand struct { | ||||||
| 	BaseCommand | 	BaseCommand | ||||||
| 	img            v1.Image | 	img            v1.Image | ||||||
|  |  | ||||||
|  | @ -67,3 +67,7 @@ func (w *WorkdirCommand) FilesToSnapshot() []string { | ||||||
| func (w *WorkdirCommand) String() string { | func (w *WorkdirCommand) String() string { | ||||||
| 	return w.cmd.String() | 	return w.cmd.String() | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | func (w *WorkdirCommand) MetadataOnly() bool { | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -25,4 +25,5 @@ type KanikoStage struct { | ||||||
| 	Final                  bool | 	Final                  bool | ||||||
| 	BaseImageStoredLocally bool | 	BaseImageStoredLocally bool | ||||||
| 	SaveStage              bool | 	SaveStage              bool | ||||||
|  | 	MetaArgs               []instructions.ArgCommand | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -20,6 +20,7 @@ import ( | ||||||
| 	"strings" | 	"strings" | ||||||
| 
 | 
 | ||||||
| 	d "github.com/docker/docker/builder/dockerfile" | 	d "github.com/docker/docker/builder/dockerfile" | ||||||
|  | 	"github.com/moby/buildkit/frontend/dockerfile/instructions" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type BuildArgs struct { | type BuildArgs struct { | ||||||
|  | @ -53,3 +54,11 @@ func (b *BuildArgs) ReplacementEnvs(envs []string) []string { | ||||||
| 	filtered := b.FilterAllowed(envs) | 	filtered := b.FilterAllowed(envs) | ||||||
| 	return append(envs, filtered...) | 	return append(envs, filtered...) | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | // AddMetaArgs adds the supplied args map to b's allowedMetaArgs
 | ||||||
|  | func (b *BuildArgs) AddMetaArgs(metaArgs []instructions.ArgCommand) { | ||||||
|  | 	for _, arg := range metaArgs { | ||||||
|  | 		v := arg.Value | ||||||
|  | 		b.AddMetaArg(arg.Key, v) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -38,7 +38,7 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) { | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, errors.Wrap(err, fmt.Sprintf("reading dockerfile at path %s", opts.DockerfilePath)) | 		return nil, errors.Wrap(err, fmt.Sprintf("reading dockerfile at path %s", opts.DockerfilePath)) | ||||||
| 	} | 	} | ||||||
| 	stages, err := Parse(d) | 	stages, metaArgs, err := Parse(d) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, errors.Wrap(err, "parsing dockerfile") | 		return nil, errors.Wrap(err, "parsing dockerfile") | ||||||
| 	} | 	} | ||||||
|  | @ -60,11 +60,13 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) { | ||||||
| 			BaseImageStoredLocally: (baseImageIndex(index, stages) != -1), | 			BaseImageStoredLocally: (baseImageIndex(index, stages) != -1), | ||||||
| 			SaveStage:              saveStage(index, stages), | 			SaveStage:              saveStage(index, stages), | ||||||
| 			Final:                  index == targetStage, | 			Final:                  index == targetStage, | ||||||
|  | 			MetaArgs:               metaArgs, | ||||||
| 		}) | 		}) | ||||||
| 		if index == targetStage { | 		if index == targetStage { | ||||||
| 			break | 			break | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
| 	return kanikoStages, nil | 	return kanikoStages, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -83,16 +85,16 @@ func baseImageIndex(currentStage int, stages []instructions.Stage) int { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Parse parses the contents of a Dockerfile and returns a list of commands
 | // Parse parses the contents of a Dockerfile and returns a list of commands
 | ||||||
| func Parse(b []byte) ([]instructions.Stage, error) { | func Parse(b []byte) ([]instructions.Stage, []instructions.ArgCommand, error) { | ||||||
| 	p, err := parser.Parse(bytes.NewReader(b)) | 	p, err := parser.Parse(bytes.NewReader(b)) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return nil, nil, err | ||||||
| 	} | 	} | ||||||
| 	stages, _, err := instructions.Parse(p.AST) | 	stages, metaArgs, err := instructions.Parse(p.AST) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return nil, nil, err | ||||||
| 	} | 	} | ||||||
| 	return stages, err | 	return stages, metaArgs, err | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // targetStage returns the index of the target stage kaniko is trying to build
 | // targetStage returns the index of the target stage kaniko is trying to build
 | ||||||
|  |  | ||||||
|  | @ -35,7 +35,7 @@ func Test_resolveStages(t *testing.T) { | ||||||
| 	FROM scratch | 	FROM scratch | ||||||
| 	COPY --from=second /hi2 /hi3 | 	COPY --from=second /hi2 /hi3 | ||||||
| 	` | 	` | ||||||
| 	stages, err := Parse([]byte(dockerfile)) | 	stages, _, err := Parse([]byte(dockerfile)) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatal(err) | 		t.Fatal(err) | ||||||
| 	} | 	} | ||||||
|  | @ -63,7 +63,7 @@ func Test_targetStage(t *testing.T) { | ||||||
| 	FROM scratch | 	FROM scratch | ||||||
| 	COPY --from=second /hi2 /hi3 | 	COPY --from=second /hi2 /hi3 | ||||||
| 	` | 	` | ||||||
| 	stages, err := Parse([]byte(dockerfile)) | 	stages, _, err := Parse([]byte(dockerfile)) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatal(err) | 		t.Fatal(err) | ||||||
| 	} | 	} | ||||||
|  | @ -142,7 +142,7 @@ func Test_SaveStage(t *testing.T) { | ||||||
| 			expected: false, | 			expected: false, | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| 	stages, err := Parse([]byte(testutil.Dockerfile)) | 	stages, _, err := Parse([]byte(testutil.Dockerfile)) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatalf("couldn't retrieve stages from Dockerfile: %v", err) | 		t.Fatalf("couldn't retrieve stages from Dockerfile: %v", err) | ||||||
| 	} | 	} | ||||||
|  | @ -177,7 +177,7 @@ func Test_baseImageIndex(t *testing.T) { | ||||||
| 		}, | 		}, | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	stages, err := Parse([]byte(testutil.Dockerfile)) | 	stages, _, err := Parse([]byte(testutil.Dockerfile)) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatalf("couldn't retrieve stages from Dockerfile: %v", err) | 		t.Fatalf("couldn't retrieve stages from Dockerfile: %v", err) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -17,15 +17,14 @@ limitations under the License. | ||||||
| package executor | package executor | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"bytes" |  | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"io" |  | ||||||
| 	"io/ioutil" |  | ||||||
| 	"os" | 	"os" | ||||||
| 	"path/filepath" | 	"path/filepath" | ||||||
| 	"strconv" | 	"strconv" | ||||||
| 	"time" | 	"time" | ||||||
| 
 | 
 | ||||||
|  | 	"golang.org/x/sync/errgroup" | ||||||
|  | 
 | ||||||
| 	"github.com/google/go-containerregistry/pkg/name" | 	"github.com/google/go-containerregistry/pkg/name" | ||||||
| 	"github.com/google/go-containerregistry/pkg/v1" | 	"github.com/google/go-containerregistry/pkg/v1" | ||||||
| 	"github.com/google/go-containerregistry/pkg/v1/mutate" | 	"github.com/google/go-containerregistry/pkg/v1/mutate" | ||||||
|  | @ -42,6 +41,9 @@ import ( | ||||||
| 	"github.com/GoogleContainerTools/kaniko/pkg/util" | 	"github.com/GoogleContainerTools/kaniko/pkg/util" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | // This is the size of an empty tar in Go
 | ||||||
|  | const emptyTarSize = 1024 | ||||||
|  | 
 | ||||||
| // stageBuilder contains all fields necessary to build one stage of a Dockerfile
 | // stageBuilder contains all fields necessary to build one stage of a Dockerfile
 | ||||||
| type stageBuilder struct { | type stageBuilder struct { | ||||||
| 	stage           config.KanikoStage | 	stage           config.KanikoStage | ||||||
|  | @ -54,7 +56,7 @@ type stageBuilder struct { | ||||||
| 
 | 
 | ||||||
| // newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
 | // newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
 | ||||||
| func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*stageBuilder, error) { | func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*stageBuilder, error) { | ||||||
| 	sourceImage, err := util.RetrieveSourceImage(stage, opts.BuildArgs, opts) | 	sourceImage, err := util.RetrieveSourceImage(stage, opts) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} | 	} | ||||||
|  | @ -86,42 +88,40 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*sta | ||||||
| 	}, nil | 	}, nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (s *stageBuilder) build() error { | func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config, cmds []commands.DockerCommand, args *dockerfile.BuildArgs) error { | ||||||
| 	// Unpack file system to root
 | 	if !s.opts.Cache { | ||||||
| 	if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil { | 		return nil | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 	// Take initial snapshot
 |  | ||||||
| 	if err := s.snapshotter.Init(); err != nil { |  | ||||||
| 		return err |  | ||||||
| 	} |  | ||||||
| 
 |  | ||||||
| 	// Set the initial cache key to be the base image digest, the build args and the SrcContext.
 |  | ||||||
| 	compositeKey := NewCompositeCache(s.baseImageDigest) |  | ||||||
| 	compositeKey.AddKey(s.opts.BuildArgs...) |  | ||||||
| 
 |  | ||||||
| 	cmds := []commands.DockerCommand{} |  | ||||||
| 	for _, cmd := range s.stage.Commands { |  | ||||||
| 		command, err := commands.GetCommand(cmd, s.opts.SrcContext) |  | ||||||
| 		if err != nil { |  | ||||||
| 			return err |  | ||||||
| 		} |  | ||||||
| 		cmds = append(cmds, command) |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	layerCache := &cache.RegistryCache{ | 	layerCache := &cache.RegistryCache{ | ||||||
| 		Opts: s.opts, | 		Opts: s.opts, | ||||||
| 	} | 	} | ||||||
| 	if s.opts.Cache { | 
 | ||||||
| 		// Possibly replace commands with their cached implementations.
 | 	// Possibly replace commands with their cached implementations.
 | ||||||
| 		for i, command := range cmds { | 	// We walk through all the commands, running any commands that only operate on metadata.
 | ||||||
| 			if command == nil { | 	// We throw the metadata away after, but we need it to properly track command dependencies
 | ||||||
| 				continue | 	// for things like COPY ${FOO} or RUN commands that use environment variables.
 | ||||||
| 			} | 	for i, command := range cmds { | ||||||
| 			ck, err := compositeKey.Hash() | 		if command == nil { | ||||||
| 			if err != nil { | 			continue | ||||||
|  | 		} | ||||||
|  | 		compositeKey.AddKey(command.String()) | ||||||
|  | 		// If the command uses files from the context, add them.
 | ||||||
|  | 		files, err := command.FilesUsedFromContext(&cfg, args) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		for _, f := range files { | ||||||
|  | 			if err := compositeKey.AddPath(f); err != nil { | ||||||
| 				return err | 				return err | ||||||
| 			} | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		ck, err := compositeKey.Hash() | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		if command.ShouldCacheOutput() { | ||||||
| 			img, err := layerCache.RetrieveLayer(ck) | 			img, err := layerCache.RetrieveLayer(ck) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				logrus.Infof("No cached layer found for cmd %s", command.String()) | 				logrus.Infof("No cached layer found for cmd %s", command.String()) | ||||||
|  | @ -133,9 +133,65 @@ func (s *stageBuilder) build() error { | ||||||
| 				cmds[i] = cacheCmd | 				cmds[i] = cacheCmd | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  | 
 | ||||||
|  | 		// Mutate the config for any commands that require it.
 | ||||||
|  | 		if command.MetadataOnly() { | ||||||
|  | 			if err := command.ExecuteCommand(&cfg, args); err != nil { | ||||||
|  | 				return err | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (s *stageBuilder) build() error { | ||||||
|  | 	// Set the initial cache key to be the base image digest, the build args and the SrcContext.
 | ||||||
|  | 	compositeKey := NewCompositeCache(s.baseImageDigest) | ||||||
|  | 	compositeKey.AddKey(s.opts.BuildArgs...) | ||||||
|  | 
 | ||||||
|  | 	cmds := []commands.DockerCommand{} | ||||||
|  | 	for _, cmd := range s.stage.Commands { | ||||||
|  | 		command, err := commands.GetCommand(cmd, s.opts.SrcContext) | ||||||
|  | 		if err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 		if command == nil { | ||||||
|  | 			continue | ||||||
|  | 		} | ||||||
|  | 		cmds = append(cmds, command) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	args := dockerfile.NewBuildArgs(s.opts.BuildArgs) | 	args := dockerfile.NewBuildArgs(s.opts.BuildArgs) | ||||||
|  | 	args.AddMetaArgs(s.stage.MetaArgs) | ||||||
|  | 
 | ||||||
|  | 	// Apply optimizations to the instructions.
 | ||||||
|  | 	if err := s.optimize(*compositeKey, s.cf.Config, cmds, args); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	// Unpack file system to root if we need to.
 | ||||||
|  | 	shouldUnpack := false | ||||||
|  | 	for _, cmd := range cmds { | ||||||
|  | 		if cmd.RequiresUnpackedFS() { | ||||||
|  | 			logrus.Infof("Unpacking rootfs as cmd %s requires it.", cmd.String()) | ||||||
|  | 			shouldUnpack = true | ||||||
|  | 			break | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if shouldUnpack { | ||||||
|  | 		if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil { | ||||||
|  | 			return err | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	if err := util.DetectFilesystemWhitelist(constants.WhitelistPath); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 	// Take initial snapshot
 | ||||||
|  | 	if err := s.snapshotter.Init(); err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	cacheGroup := errgroup.Group{} | ||||||
| 	for index, command := range cmds { | 	for index, command := range cmds { | ||||||
| 		if command == nil { | 		if command == nil { | ||||||
| 			continue | 			continue | ||||||
|  | @ -160,36 +216,48 @@ func (s *stageBuilder) build() error { | ||||||
| 			return err | 			return err | ||||||
| 		} | 		} | ||||||
| 		files = command.FilesToSnapshot() | 		files = command.FilesToSnapshot() | ||||||
| 		var contents []byte |  | ||||||
| 
 | 
 | ||||||
| 		if !s.shouldTakeSnapshot(index, files) { | 		if !s.shouldTakeSnapshot(index, files) { | ||||||
| 			continue | 			continue | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if files == nil || s.opts.SingleSnapshot { | 		tarPath, err := s.takeSnapshot(files) | ||||||
| 			contents, err = s.snapshotter.TakeSnapshotFS() |  | ||||||
| 		} else { |  | ||||||
| 			// Volumes are very weird. They get created in their command, but snapshotted in the next one.
 |  | ||||||
| 			// Add them to the list of files to snapshot.
 |  | ||||||
| 			for v := range s.cf.Config.Volumes { |  | ||||||
| 				files = append(files, v) |  | ||||||
| 			} |  | ||||||
| 			contents, err = s.snapshotter.TakeSnapshot(files) |  | ||||||
| 		} |  | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return err | 			return err | ||||||
| 		} | 		} | ||||||
|  | 
 | ||||||
| 		ck, err := compositeKey.Hash() | 		ck, err := compositeKey.Hash() | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return err | 			return err | ||||||
| 		} | 		} | ||||||
| 		if err := s.saveSnapshot(command.String(), ck, contents); err != nil { | 		// Push layer to cache (in parallel) now along with new config file
 | ||||||
|  | 		if s.opts.Cache && command.ShouldCacheOutput() { | ||||||
|  | 			cacheGroup.Go(func() error { | ||||||
|  | 				return pushLayerToCache(s.opts, ck, tarPath, command.String()) | ||||||
|  | 			}) | ||||||
|  | 		} | ||||||
|  | 		if err := s.saveSnapshotToImage(command.String(), tarPath); err != nil { | ||||||
| 			return err | 			return err | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | 	if err := cacheGroup.Wait(); err != nil { | ||||||
|  | 		logrus.Warnf("error uploading layer to cache: %s", err) | ||||||
|  | 	} | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func (s *stageBuilder) takeSnapshot(files []string) (string, error) { | ||||||
|  | 	if files == nil || s.opts.SingleSnapshot { | ||||||
|  | 		return s.snapshotter.TakeSnapshotFS() | ||||||
|  | 	} | ||||||
|  | 	// Volumes are very weird. They get created in their command, but snapshotted in the next one.
 | ||||||
|  | 	// Add them to the list of files to snapshot.
 | ||||||
|  | 	for v := range s.cf.Config.Volumes { | ||||||
|  | 		files = append(files, v) | ||||||
|  | 	} | ||||||
|  | 	return s.snapshotter.TakeSnapshot(files) | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool { | func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool { | ||||||
| 	isLastCommand := index == len(s.stage.Commands)-1 | 	isLastCommand := index == len(s.stage.Commands)-1 | ||||||
| 
 | 
 | ||||||
|  | @ -215,24 +283,22 @@ func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool { | ||||||
| 	return true | 	return true | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (s *stageBuilder) saveSnapshot(createdBy string, ck string, contents []byte) error { | func (s *stageBuilder) saveSnapshotToImage(createdBy string, tarPath string) error { | ||||||
| 	if contents == nil { | 	if tarPath == "" { | ||||||
| 		logrus.Info("No files were changed, appending empty layer to config. No layer added to image.") |  | ||||||
| 		return nil | 		return nil | ||||||
| 	} | 	} | ||||||
| 	// Append the layer to the image
 | 	fi, err := os.Stat(tarPath) | ||||||
| 	opener := func() (io.ReadCloser, error) { |  | ||||||
| 		return ioutil.NopCloser(bytes.NewReader(contents)), nil |  | ||||||
| 	} |  | ||||||
| 	layer, err := tarball.LayerFromOpener(opener) |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 	// Push layer to cache now along with new config file
 | 	if fi.Size() <= emptyTarSize { | ||||||
| 	if s.opts.Cache { | 		logrus.Info("No files were changed, appending empty layer to config. No layer added to image.") | ||||||
| 		if err := pushLayerToCache(s.opts, ck, layer, createdBy); err != nil { | 		return nil | ||||||
| 			return err | 	} | ||||||
| 		} | 
 | ||||||
|  | 	layer, err := tarball.LayerFromFile(tarPath) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
| 	} | 	} | ||||||
| 	s.image, err = mutate.Append(s.image, | 	s.image, err = mutate.Append(s.image, | ||||||
| 		mutate.Addendum{ | 		mutate.Addendum{ | ||||||
|  | @ -306,7 +372,7 @@ func extractImageToDependecyDir(index int, image v1.Image) error { | ||||||
| 	if err := os.MkdirAll(dependencyDir, 0755); err != nil { | 	if err := os.MkdirAll(dependencyDir, 0755); err != nil { | ||||||
| 		return err | 		return err | ||||||
| 	} | 	} | ||||||
| 	logrus.Infof("trying to extract to %s", dependencyDir) | 	logrus.Debugf("trying to extract to %s", dependencyDir) | ||||||
| 	_, err := util.GetFSFromImage(dependencyDir, image) | 	_, err := util.GetFSFromImage(dependencyDir, image) | ||||||
| 	return err | 	return err | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -66,7 +66,7 @@ func Test_reviewConfig(t *testing.T) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func stage(t *testing.T, d string) config.KanikoStage { | func stage(t *testing.T, d string) config.KanikoStage { | ||||||
| 	stages, err := dockerfile.Parse([]byte(d)) | 	stages, _, err := dockerfile.Parse([]byte(d)) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatalf("error parsing dockerfile: %v", err) | 		t.Fatalf("error parsing dockerfile: %v", err) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -107,7 +107,11 @@ func DoPush(image v1.Image, opts *config.KanikoOptions) error { | ||||||
| 
 | 
 | ||||||
| // pushLayerToCache pushes layer (tagged with cacheKey) to opts.Cache
 | // pushLayerToCache pushes layer (tagged with cacheKey) to opts.Cache
 | ||||||
| // if opts.Cache doesn't exist, infer the cache from the given destination
 | // if opts.Cache doesn't exist, infer the cache from the given destination
 | ||||||
| func pushLayerToCache(opts *config.KanikoOptions, cacheKey string, layer v1.Layer, createdBy string) error { | func pushLayerToCache(opts *config.KanikoOptions, cacheKey string, tarPath string, createdBy string) error { | ||||||
|  | 	layer, err := tarball.LayerFromFile(tarPath) | ||||||
|  | 	if err != nil { | ||||||
|  | 		return err | ||||||
|  | 	} | ||||||
| 	cache, err := cache.Destination(opts, cacheKey) | 	cache, err := cache.Destination(opts, cacheKey) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return errors.Wrap(err, "getting cache destination") | 		return errors.Wrap(err, "getting cache destination") | ||||||
|  | @ -126,7 +130,7 @@ func pushLayerToCache(opts *config.KanikoOptions, cacheKey string, layer v1.Laye | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return errors.Wrap(err, "appending layer onto empty image") | 		return errors.Wrap(err, "appending layer onto empty image") | ||||||
| 	} | 	} | ||||||
| 	return DoPush(empty, &config.KanikoOptions{ | 	cacheOpts := *opts | ||||||
| 		Destinations: []string{cache}, | 	cacheOpts.Destinations = []string{cache} | ||||||
| 	}) | 	return DoPush(empty, &cacheOpts) | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -92,13 +92,13 @@ func (l *LayeredMap) GetWhiteout(s string) (string, bool) { | ||||||
| 	return "", false | 	return "", false | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (l *LayeredMap) MaybeAddWhiteout(s string) (bool, error) { | func (l *LayeredMap) MaybeAddWhiteout(s string) bool { | ||||||
| 	whiteout, ok := l.GetWhiteout(s) | 	whiteout, ok := l.GetWhiteout(s) | ||||||
| 	if ok && whiteout == s { | 	if ok && whiteout == s { | ||||||
| 		return false, nil | 		return false | ||||||
| 	} | 	} | ||||||
| 	l.whiteouts[len(l.whiteouts)-1][s] = s | 	l.whiteouts[len(l.whiteouts)-1][s] = s | ||||||
| 	return true, nil | 	return true | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Add will add the specified file s to the layered map.
 | // Add will add the specified file s to the layered map.
 | ||||||
|  |  | ||||||
|  | @ -17,18 +17,21 @@ limitations under the License. | ||||||
| package snapshot | package snapshot | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"bytes" |  | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"io" |  | ||||||
| 	"io/ioutil" | 	"io/ioutil" | ||||||
| 	"os" | 	"os" | ||||||
| 	"path/filepath" | 	"path/filepath" | ||||||
| 	"syscall" | 	"syscall" | ||||||
| 
 | 
 | ||||||
|  | 	"github.com/GoogleContainerTools/kaniko/pkg/constants" | ||||||
|  | 
 | ||||||
| 	"github.com/GoogleContainerTools/kaniko/pkg/util" | 	"github.com/GoogleContainerTools/kaniko/pkg/util" | ||||||
| 	"github.com/sirupsen/logrus" | 	"github.com/sirupsen/logrus" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | // For testing
 | ||||||
|  | var snapshotPathPrefix = constants.KanikoDir | ||||||
|  | 
 | ||||||
| // Snapshotter holds the root directory from which to take snapshots, and a list of snapshots taken
 | // Snapshotter holds the root directory from which to take snapshots, and a list of snapshots taken
 | ||||||
| type Snapshotter struct { | type Snapshotter struct { | ||||||
| 	l         *LayeredMap | 	l         *LayeredMap | ||||||
|  | @ -42,10 +45,8 @@ func NewSnapshotter(l *LayeredMap, d string) *Snapshotter { | ||||||
| 
 | 
 | ||||||
| // Init initializes a new snapshotter
 | // Init initializes a new snapshotter
 | ||||||
| func (s *Snapshotter) Init() error { | func (s *Snapshotter) Init() error { | ||||||
| 	if _, err := s.snapShotFS(ioutil.Discard); err != nil { | 	_, err := s.TakeSnapshotFS() | ||||||
| 		return err | 	return err | ||||||
| 	} |  | ||||||
| 	return nil |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // Key returns a string based on the current state of the file system
 | // Key returns a string based on the current state of the file system
 | ||||||
|  | @ -55,46 +56,21 @@ func (s *Snapshotter) Key() (string, error) { | ||||||
| 
 | 
 | ||||||
| // TakeSnapshot takes a snapshot of the specified files, avoiding directories in the whitelist, and creates
 | // TakeSnapshot takes a snapshot of the specified files, avoiding directories in the whitelist, and creates
 | ||||||
| // a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed
 | // a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed
 | ||||||
| func (s *Snapshotter) TakeSnapshot(files []string) ([]byte, error) { | func (s *Snapshotter) TakeSnapshot(files []string) (string, error) { | ||||||
| 	buf := bytes.NewBuffer([]byte{}) | 	f, err := ioutil.TempFile(snapshotPathPrefix, "") | ||||||
| 	filesAdded, err := s.snapshotFiles(buf, files) |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return "", err | ||||||
| 	} | 	} | ||||||
| 	contents := buf.Bytes() | 	defer f.Close() | ||||||
| 	if !filesAdded { |  | ||||||
| 		return nil, nil |  | ||||||
| 	} |  | ||||||
| 	return contents, err |  | ||||||
| } |  | ||||||
| 
 | 
 | ||||||
| // TakeSnapshotFS takes a snapshot of the filesystem, avoiding directories in the whitelist, and creates
 |  | ||||||
| // a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed
 |  | ||||||
| func (s *Snapshotter) TakeSnapshotFS() ([]byte, error) { |  | ||||||
| 	buf := bytes.NewBuffer([]byte{}) |  | ||||||
| 	filesAdded, err := s.snapShotFS(buf) |  | ||||||
| 	if err != nil { |  | ||||||
| 		return nil, err |  | ||||||
| 	} |  | ||||||
| 	contents := buf.Bytes() |  | ||||||
| 	if !filesAdded { |  | ||||||
| 		return nil, nil |  | ||||||
| 	} |  | ||||||
| 	return contents, err |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // snapshotFiles creates a snapshot (tar) and adds the specified files.
 |  | ||||||
| // It will not add files which are whitelisted.
 |  | ||||||
| func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) { |  | ||||||
| 	s.l.Snapshot() | 	s.l.Snapshot() | ||||||
| 	if len(files) == 0 { | 	if len(files) == 0 { | ||||||
| 		logrus.Info("No files changed in this command, skipping snapshotting.") | 		logrus.Info("No files changed in this command, skipping snapshotting.") | ||||||
| 		return false, nil | 		return "", nil | ||||||
| 	} | 	} | ||||||
| 	logrus.Info("Taking snapshot of files...") | 	logrus.Info("Taking snapshot of files...") | ||||||
| 	logrus.Debugf("Taking snapshot of files %v", files) | 	logrus.Debugf("Taking snapshot of files %v", files) | ||||||
| 	snapshottedFiles := make(map[string]bool) | 	snapshottedFiles := make(map[string]bool) | ||||||
| 	filesAdded := false |  | ||||||
| 
 | 
 | ||||||
| 	t := util.NewTar(f) | 	t := util.NewTar(f) | ||||||
| 	defer t.Close() | 	defer t.Close() | ||||||
|  | @ -114,15 +90,14 @@ func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) { | ||||||
| 
 | 
 | ||||||
| 		fileAdded, err := s.l.MaybeAdd(file) | 		fileAdded, err := s.l.MaybeAdd(file) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return false, fmt.Errorf("Unable to add parent dir %s to layered map: %s", file, err) | 			return "", fmt.Errorf("Unable to add parent dir %s to layered map: %s", file, err) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if fileAdded { | 		if fileAdded { | ||||||
| 			err = t.AddFileToTar(file) | 			err = t.AddFileToTar(file) | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				return false, fmt.Errorf("Error adding parent dir %s to tar: %s", file, err) | 				return "", fmt.Errorf("Error adding parent dir %s to tar: %s", file, err) | ||||||
| 			} | 			} | ||||||
| 			filesAdded = true |  | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	// Next add the files themselves to the tar
 | 	// Next add the files themselves to the tar
 | ||||||
|  | @ -134,21 +109,26 @@ func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) { | ||||||
| 		snapshottedFiles[file] = true | 		snapshottedFiles[file] = true | ||||||
| 
 | 
 | ||||||
| 		if err := s.l.Add(file); err != nil { | 		if err := s.l.Add(file); err != nil { | ||||||
| 			return false, fmt.Errorf("Unable to add file %s to layered map: %s", file, err) | 			return "", fmt.Errorf("Unable to add file %s to layered map: %s", file, err) | ||||||
| 		} | 		} | ||||||
| 		if err := t.AddFileToTar(file); err != nil { | 		if err := t.AddFileToTar(file); err != nil { | ||||||
| 			return false, fmt.Errorf("Error adding file %s to tar: %s", file, err) | 			return "", fmt.Errorf("Error adding file %s to tar: %s", file, err) | ||||||
| 		} | 		} | ||||||
| 		filesAdded = true |  | ||||||
| 	} | 	} | ||||||
| 	return filesAdded, nil | 	return f.Name(), nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // shapShotFS creates a snapshot (tar) of all files in the system which are not
 | // TakeSnapshotFS takes a snapshot of the filesystem, avoiding directories in the whitelist, and creates
 | ||||||
| // whitelisted and which have changed.
 | // a tarball of the changed files.
 | ||||||
| func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) { | func (s *Snapshotter) TakeSnapshotFS() (string, error) { | ||||||
| 	logrus.Info("Taking snapshot of full filesystem...") | 	logrus.Info("Taking snapshot of full filesystem...") | ||||||
| 
 | 
 | ||||||
|  | 	f, err := ioutil.TempFile(snapshotPathPrefix, "") | ||||||
|  | 	if err != nil { | ||||||
|  | 		return "", err | ||||||
|  | 	} | ||||||
|  | 	defer f.Close() | ||||||
|  | 
 | ||||||
| 	// Some of the operations that follow (e.g. hashing) depend on the file system being synced,
 | 	// Some of the operations that follow (e.g. hashing) depend on the file system being synced,
 | ||||||
| 	// for example the hashing function that determines if files are equal uses the mtime of the files,
 | 	// for example the hashing function that determines if files are equal uses the mtime of the files,
 | ||||||
| 	// which can lag if sync is not called. Unfortunately there can still be lag if too much data needs
 | 	// which can lag if sync is not called. Unfortunately there can still be lag if too much data needs
 | ||||||
|  | @ -157,7 +137,6 @@ func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) { | ||||||
| 
 | 
 | ||||||
| 	s.l.Snapshot() | 	s.l.Snapshot() | ||||||
| 	existingPaths := s.l.GetFlattenedPathsForWhiteOut() | 	existingPaths := s.l.GetFlattenedPathsForWhiteOut() | ||||||
| 	filesAdded := false |  | ||||||
| 	t := util.NewTar(f) | 	t := util.NewTar(f) | ||||||
| 	defer t.Close() | 	defer t.Close() | ||||||
| 
 | 
 | ||||||
|  | @ -176,15 +155,10 @@ func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) { | ||||||
| 		// Only add the whiteout if the directory for the file still exists.
 | 		// Only add the whiteout if the directory for the file still exists.
 | ||||||
| 		dir := filepath.Dir(path) | 		dir := filepath.Dir(path) | ||||||
| 		if _, ok := memFs[dir]; ok { | 		if _, ok := memFs[dir]; ok { | ||||||
| 			addWhiteout, err := s.l.MaybeAddWhiteout(path) | 			if s.l.MaybeAddWhiteout(path) { | ||||||
| 			if err != nil { |  | ||||||
| 				return false, nil |  | ||||||
| 			} |  | ||||||
| 			if addWhiteout { |  | ||||||
| 				logrus.Infof("Adding whiteout for %s", path) | 				logrus.Infof("Adding whiteout for %s", path) | ||||||
| 				filesAdded = true |  | ||||||
| 				if err := t.Whiteout(path); err != nil { | 				if err := t.Whiteout(path); err != nil { | ||||||
| 					return false, err | 					return "", err | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  | @ -194,7 +168,7 @@ func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) { | ||||||
| 	for path := range memFs { | 	for path := range memFs { | ||||||
| 		whitelisted, err := util.CheckWhitelist(path) | 		whitelisted, err := util.CheckWhitelist(path) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return false, err | 			return "", err | ||||||
| 		} | 		} | ||||||
| 		if whitelisted { | 		if whitelisted { | ||||||
| 			logrus.Debugf("Not adding %s to layer, as it's whitelisted", path) | 			logrus.Debugf("Not adding %s to layer, as it's whitelisted", path) | ||||||
|  | @ -204,16 +178,15 @@ func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) { | ||||||
| 		// Only add to the tar if we add it to the layeredmap.
 | 		// Only add to the tar if we add it to the layeredmap.
 | ||||||
| 		maybeAdd, err := s.l.MaybeAdd(path) | 		maybeAdd, err := s.l.MaybeAdd(path) | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return false, err | 			return "", err | ||||||
| 		} | 		} | ||||||
| 		if maybeAdd { | 		if maybeAdd { | ||||||
| 			logrus.Debugf("Adding %s to layer, because it was changed.", path) | 			logrus.Debugf("Adding %s to layer, because it was changed.", path) | ||||||
| 			filesAdded = true |  | ||||||
| 			if err := t.AddFileToTar(path); err != nil { | 			if err := t.AddFileToTar(path); err != nil { | ||||||
| 				return false, err | 				return "", err | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return filesAdded, nil | 	return f.Name(), nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -17,7 +17,6 @@ package snapshot | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"archive/tar" | 	"archive/tar" | ||||||
| 	"bytes" |  | ||||||
| 	"io" | 	"io" | ||||||
| 	"io/ioutil" | 	"io/ioutil" | ||||||
| 	"os" | 	"os" | ||||||
|  | @ -30,9 +29,8 @@ import ( | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| func TestSnapshotFSFileChange(t *testing.T) { | func TestSnapshotFSFileChange(t *testing.T) { | ||||||
| 
 | 	testDir, snapshotter, cleanup, err := setUpTestDir() | ||||||
| 	testDir, snapshotter, err := setUpTestDir() | 	defer cleanup() | ||||||
| 	defer os.RemoveAll(testDir) |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatal(err) | 		t.Fatal(err) | ||||||
| 	} | 	} | ||||||
|  | @ -45,16 +43,17 @@ func TestSnapshotFSFileChange(t *testing.T) { | ||||||
| 		t.Fatalf("Error setting up fs: %s", err) | 		t.Fatalf("Error setting up fs: %s", err) | ||||||
| 	} | 	} | ||||||
| 	// Take another snapshot
 | 	// Take another snapshot
 | ||||||
| 	contents, err := snapshotter.TakeSnapshotFS() | 	tarPath, err := snapshotter.TakeSnapshotFS() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatalf("Error taking snapshot of fs: %s", err) | 		t.Fatalf("Error taking snapshot of fs: %s", err) | ||||||
| 	} | 	} | ||||||
| 	if contents == nil { | 
 | ||||||
| 		t.Fatal("No files added to snapshot.") | 	f, err := os.Open(tarPath) | ||||||
|  | 	if err != nil { | ||||||
|  | 		t.Fatal(err) | ||||||
| 	} | 	} | ||||||
| 	// Check contents of the snapshot, make sure contents is equivalent to snapshotFiles
 | 	// Check contents of the snapshot, make sure contents is equivalent to snapshotFiles
 | ||||||
| 	reader := bytes.NewReader(contents) | 	tr := tar.NewReader(f) | ||||||
| 	tr := tar.NewReader(reader) |  | ||||||
| 	fooPath := filepath.Join(testDir, "foo") | 	fooPath := filepath.Join(testDir, "foo") | ||||||
| 	batPath := filepath.Join(testDir, "bar/bat") | 	batPath := filepath.Join(testDir, "bar/bat") | ||||||
| 	snapshotFiles := map[string]string{ | 	snapshotFiles := map[string]string{ | ||||||
|  | @ -82,8 +81,8 @@ func TestSnapshotFSFileChange(t *testing.T) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func TestSnapshotFSChangePermissions(t *testing.T) { | func TestSnapshotFSChangePermissions(t *testing.T) { | ||||||
| 	testDir, snapshotter, err := setUpTestDir() | 	testDir, snapshotter, cleanup, err := setUpTestDir() | ||||||
| 	defer os.RemoveAll(testDir) | 	defer cleanup() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatal(err) | 		t.Fatal(err) | ||||||
| 	} | 	} | ||||||
|  | @ -93,16 +92,16 @@ func TestSnapshotFSChangePermissions(t *testing.T) { | ||||||
| 		t.Fatalf("Error changing permissions on %s: %v", batPath, err) | 		t.Fatalf("Error changing permissions on %s: %v", batPath, err) | ||||||
| 	} | 	} | ||||||
| 	// Take another snapshot
 | 	// Take another snapshot
 | ||||||
| 	contents, err := snapshotter.TakeSnapshotFS() | 	tarPath, err := snapshotter.TakeSnapshotFS() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatalf("Error taking snapshot of fs: %s", err) | 		t.Fatalf("Error taking snapshot of fs: %s", err) | ||||||
| 	} | 	} | ||||||
| 	if contents == nil { | 	f, err := os.Open(tarPath) | ||||||
| 		t.Fatal("No files added to snapshot.") | 	if err != nil { | ||||||
|  | 		t.Fatal(err) | ||||||
| 	} | 	} | ||||||
| 	// Check contents of the snapshot, make sure contents is equivalent to snapshotFiles
 | 	// Check contents of the snapshot, make sure contents is equivalent to snapshotFiles
 | ||||||
| 	reader := bytes.NewReader(contents) | 	tr := tar.NewReader(f) | ||||||
| 	tr := tar.NewReader(reader) |  | ||||||
| 	snapshotFiles := map[string]string{ | 	snapshotFiles := map[string]string{ | ||||||
| 		batPath: "baz2", | 		batPath: "baz2", | ||||||
| 	} | 	} | ||||||
|  | @ -127,8 +126,8 @@ func TestSnapshotFSChangePermissions(t *testing.T) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func TestSnapshotFiles(t *testing.T) { | func TestSnapshotFiles(t *testing.T) { | ||||||
| 	testDir, snapshotter, err := setUpTestDir() | 	testDir, snapshotter, cleanup, err := setUpTestDir() | ||||||
| 	defer os.RemoveAll(testDir) | 	defer cleanup() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatal(err) | 		t.Fatal(err) | ||||||
| 	} | 	} | ||||||
|  | @ -142,15 +141,20 @@ func TestSnapshotFiles(t *testing.T) { | ||||||
| 	filesToSnapshot := []string{ | 	filesToSnapshot := []string{ | ||||||
| 		filepath.Join(testDir, "foo"), | 		filepath.Join(testDir, "foo"), | ||||||
| 	} | 	} | ||||||
| 	contents, err := snapshotter.TakeSnapshot(filesToSnapshot) | 	tarPath, err := snapshotter.TakeSnapshot(filesToSnapshot) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatal(err) | 		t.Fatal(err) | ||||||
| 	} | 	} | ||||||
|  | 	defer os.Remove(tarPath) | ||||||
|  | 
 | ||||||
| 	expectedFiles := []string{"/", "/tmp", filepath.Join(testDir, "foo")} | 	expectedFiles := []string{"/", "/tmp", filepath.Join(testDir, "foo")} | ||||||
| 
 | 
 | ||||||
|  | 	f, err := os.Open(tarPath) | ||||||
|  | 	if err != nil { | ||||||
|  | 		t.Fatal(err) | ||||||
|  | 	} | ||||||
| 	// Check contents of the snapshot, make sure contents is equivalent to snapshotFiles
 | 	// Check contents of the snapshot, make sure contents is equivalent to snapshotFiles
 | ||||||
| 	reader := bytes.NewReader(contents) | 	tr := tar.NewReader(f) | ||||||
| 	tr := tar.NewReader(reader) |  | ||||||
| 	var actualFiles []string | 	var actualFiles []string | ||||||
| 	for { | 	for { | ||||||
| 		hdr, err := tr.Next() | 		hdr, err := tr.Next() | ||||||
|  | @ -166,27 +170,42 @@ func TestSnapshotFiles(t *testing.T) { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func TestEmptySnapshotFS(t *testing.T) { | func TestEmptySnapshotFS(t *testing.T) { | ||||||
| 	testDir, snapshotter, err := setUpTestDir() | 	_, snapshotter, cleanup, err := setUpTestDir() | ||||||
| 	defer os.RemoveAll(testDir) |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatal(err) | 		t.Fatal(err) | ||||||
| 	} | 	} | ||||||
|  | 	defer cleanup() | ||||||
|  | 
 | ||||||
| 	// Take snapshot with no changes
 | 	// Take snapshot with no changes
 | ||||||
| 	contents, err := snapshotter.TakeSnapshotFS() | 	tarPath, err := snapshotter.TakeSnapshotFS() | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatalf("Error taking snapshot of fs: %s", err) | 		t.Fatalf("Error taking snapshot of fs: %s", err) | ||||||
| 	} | 	} | ||||||
| 	// Since we took a snapshot with no changes, contents should be nil
 | 
 | ||||||
| 	if contents != nil { | 	f, err := os.Open(tarPath) | ||||||
| 		t.Fatal("Files added even though no changes to file system were made.") | 	if err != nil { | ||||||
|  | 		t.Fatal(err) | ||||||
|  | 	} | ||||||
|  | 	tr := tar.NewReader(f) | ||||||
|  | 
 | ||||||
|  | 	if _, err := tr.Next(); err != io.EOF { | ||||||
|  | 		t.Fatal("no files expected in tar, found files.") | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func setUpTestDir() (string, *Snapshotter, error) { | func setUpTestDir() (string, *Snapshotter, func(), error) { | ||||||
| 	testDir, err := ioutil.TempDir("", "") | 	testDir, err := ioutil.TempDir("", "") | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return testDir, nil, errors.Wrap(err, "setting up temp dir") | 		return "", nil, nil, errors.Wrap(err, "setting up temp dir") | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
|  | 	snapshotPath, err := ioutil.TempDir("", "") | ||||||
|  | 	if err != nil { | ||||||
|  | 		return "", nil, nil, errors.Wrap(err, "setting up temp dir") | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	snapshotPathPrefix = snapshotPath | ||||||
|  | 
 | ||||||
| 	files := map[string]string{ | 	files := map[string]string{ | ||||||
| 		"foo":         "baz1", | 		"foo":         "baz1", | ||||||
| 		"bar/bat":     "baz2", | 		"bar/bat":     "baz2", | ||||||
|  | @ -194,14 +213,20 @@ func setUpTestDir() (string, *Snapshotter, error) { | ||||||
| 	} | 	} | ||||||
| 	// Set up initial files
 | 	// Set up initial files
 | ||||||
| 	if err := testutil.SetupFiles(testDir, files); err != nil { | 	if err := testutil.SetupFiles(testDir, files); err != nil { | ||||||
| 		return testDir, nil, errors.Wrap(err, "setting up file system") | 		return "", nil, nil, errors.Wrap(err, "setting up file system") | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Take the initial snapshot
 | 	// Take the initial snapshot
 | ||||||
| 	l := NewLayeredMap(util.Hasher(), util.CacheHasher()) | 	l := NewLayeredMap(util.Hasher(), util.CacheHasher()) | ||||||
| 	snapshotter := NewSnapshotter(l, testDir) | 	snapshotter := NewSnapshotter(l, testDir) | ||||||
| 	if err := snapshotter.Init(); err != nil { | 	if err := snapshotter.Init(); err != nil { | ||||||
| 		return testDir, nil, errors.Wrap(err, "initializing snapshotter") | 		return "", nil, nil, errors.Wrap(err, "initializing snapshotter") | ||||||
| 	} | 	} | ||||||
| 	return testDir, snapshotter, nil | 
 | ||||||
|  | 	cleanup := func() { | ||||||
|  | 		os.RemoveAll(snapshotPath) | ||||||
|  | 		os.RemoveAll(testDir) | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	return testDir, snapshotter, cleanup, nil | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -62,8 +62,7 @@ var whitelist = []WhitelistEntry{ | ||||||
| // GetFSFromImage extracts the layers of img to root
 | // GetFSFromImage extracts the layers of img to root
 | ||||||
| // It returns a list of all files extracted
 | // It returns a list of all files extracted
 | ||||||
| func GetFSFromImage(root string, img v1.Image) ([]string, error) { | func GetFSFromImage(root string, img v1.Image) ([]string, error) { | ||||||
| 	whitelist, err := fileSystemWhitelist(constants.WhitelistPath) | 	if err := DetectFilesystemWhitelist(constants.WhitelistPath); err != nil { | ||||||
| 	if err != nil { |  | ||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} | 	} | ||||||
| 	logrus.Debugf("Mounted directories: %v", whitelist) | 	logrus.Debugf("Mounted directories: %v", whitelist) | ||||||
|  | @ -74,7 +73,7 @@ func GetFSFromImage(root string, img v1.Image) ([]string, error) { | ||||||
| 	extractedFiles := []string{} | 	extractedFiles := []string{} | ||||||
| 
 | 
 | ||||||
| 	for i, l := range layers { | 	for i, l := range layers { | ||||||
| 		logrus.Infof("Extracting layer %d", i) | 		logrus.Debugf("Extracting layer %d", i) | ||||||
| 		r, err := l.Uncompressed() | 		r, err := l.Uncompressed() | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return nil, err | 			return nil, err | ||||||
|  | @ -303,10 +302,10 @@ func checkWhitelistRoot(root string) bool { | ||||||
| // (1)(2)(3)   (4)   (5)      (6)      (7)   (8) (9)   (10)         (11)
 | // (1)(2)(3)   (4)   (5)      (6)      (7)   (8) (9)   (10)         (11)
 | ||||||
| // Where (5) is the mount point relative to the process's root
 | // Where (5) is the mount point relative to the process's root
 | ||||||
| // From: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
 | // From: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
 | ||||||
| func fileSystemWhitelist(path string) ([]WhitelistEntry, error) { | func DetectFilesystemWhitelist(path string) error { | ||||||
| 	f, err := os.Open(path) | 	f, err := os.Open(path) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return err | ||||||
| 	} | 	} | ||||||
| 	defer f.Close() | 	defer f.Close() | ||||||
| 	reader := bufio.NewReader(f) | 	reader := bufio.NewReader(f) | ||||||
|  | @ -314,7 +313,7 @@ func fileSystemWhitelist(path string) ([]WhitelistEntry, error) { | ||||||
| 		line, err := reader.ReadString('\n') | 		line, err := reader.ReadString('\n') | ||||||
| 		logrus.Debugf("Read the following line from %s: %s", path, line) | 		logrus.Debugf("Read the following line from %s: %s", path, line) | ||||||
| 		if err != nil && err != io.EOF { | 		if err != nil && err != io.EOF { | ||||||
| 			return nil, err | 			return err | ||||||
| 		} | 		} | ||||||
| 		lineArr := strings.Split(line, " ") | 		lineArr := strings.Split(line, " ") | ||||||
| 		if len(lineArr) < 5 { | 		if len(lineArr) < 5 { | ||||||
|  | @ -336,7 +335,7 @@ func fileSystemWhitelist(path string) ([]WhitelistEntry, error) { | ||||||
| 			break | 			break | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	return whitelist, nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| // RelativeFiles returns a list of all files at the filepath relative to root
 | // RelativeFiles returns a list of all files at the filepath relative to root
 | ||||||
|  |  | ||||||
|  | @ -29,7 +29,7 @@ import ( | ||||||
| 	"github.com/GoogleContainerTools/kaniko/testutil" | 	"github.com/GoogleContainerTools/kaniko/testutil" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| func Test_fileSystemWhitelist(t *testing.T) { | func Test_DetectFilesystemWhitelist(t *testing.T) { | ||||||
| 	testDir, err := ioutil.TempDir("", "") | 	testDir, err := ioutil.TempDir("", "") | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		t.Fatalf("Error creating tempdir: %s", err) | 		t.Fatalf("Error creating tempdir: %s", err) | ||||||
|  | @ -49,7 +49,7 @@ func Test_fileSystemWhitelist(t *testing.T) { | ||||||
| 		t.Fatalf("Error writing file contents to %s: %s", path, err) | 		t.Fatalf("Error writing file contents to %s: %s", path, err) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	actualWhitelist, err := fileSystemWhitelist(path) | 	err = DetectFilesystemWhitelist(path) | ||||||
| 	expectedWhitelist := []WhitelistEntry{ | 	expectedWhitelist := []WhitelistEntry{ | ||||||
| 		{"/kaniko", false}, | 		{"/kaniko", false}, | ||||||
| 		{"/proc", false}, | 		{"/proc", false}, | ||||||
|  | @ -59,6 +59,7 @@ func Test_fileSystemWhitelist(t *testing.T) { | ||||||
| 		{"/var/run", false}, | 		{"/var/run", false}, | ||||||
| 		{"/etc/mtab", false}, | 		{"/etc/mtab", false}, | ||||||
| 	} | 	} | ||||||
|  | 	actualWhitelist := whitelist | ||||||
| 	sort.Slice(actualWhitelist, func(i, j int) bool { | 	sort.Slice(actualWhitelist, func(i, j int) bool { | ||||||
| 		return actualWhitelist[i].Path < actualWhitelist[j].Path | 		return actualWhitelist[i].Path < actualWhitelist[j].Path | ||||||
| 	}) | 	}) | ||||||
|  |  | ||||||
|  | @ -18,6 +18,7 @@ package util | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
| 	"crypto/tls" | 	"crypto/tls" | ||||||
|  | 	"fmt" | ||||||
| 	"net/http" | 	"net/http" | ||||||
| 	"path/filepath" | 	"path/filepath" | ||||||
| 	"strconv" | 	"strconv" | ||||||
|  | @ -44,7 +45,13 @@ var ( | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| // RetrieveSourceImage returns the base image of the stage at index
 | // RetrieveSourceImage returns the base image of the stage at index
 | ||||||
| func RetrieveSourceImage(stage config.KanikoStage, buildArgs []string, opts *config.KanikoOptions) (v1.Image, error) { | func RetrieveSourceImage(stage config.KanikoStage, opts *config.KanikoOptions) (v1.Image, error) { | ||||||
|  | 	buildArgs := opts.BuildArgs | ||||||
|  | 	var metaArgsString []string | ||||||
|  | 	for _, arg := range stage.MetaArgs { | ||||||
|  | 		metaArgsString = append(metaArgsString, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString())) | ||||||
|  | 	} | ||||||
|  | 	buildArgs = append(buildArgs, metaArgsString...) | ||||||
| 	currentBaseName, err := ResolveEnvironmentReplacement(stage.BaseName, buildArgs, false) | 	currentBaseName, err := ResolveEnvironmentReplacement(stage.BaseName, buildArgs, false) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return nil, err | ||||||
|  |  | ||||||
|  | @ -57,7 +57,7 @@ func Test_StandardImage(t *testing.T) { | ||||||
| 	retrieveRemoteImage = mock | 	retrieveRemoteImage = mock | ||||||
| 	actual, err := RetrieveSourceImage(config.KanikoStage{ | 	actual, err := RetrieveSourceImage(config.KanikoStage{ | ||||||
| 		Stage: stages[0], | 		Stage: stages[0], | ||||||
| 	}, nil, &config.KanikoOptions{}) | 	}, &config.KanikoOptions{}) | ||||||
| 	testutil.CheckErrorAndDeepEqual(t, false, err, nil, actual) | 	testutil.CheckErrorAndDeepEqual(t, false, err, nil, actual) | ||||||
| } | } | ||||||
| func Test_ScratchImage(t *testing.T) { | func Test_ScratchImage(t *testing.T) { | ||||||
|  | @ -67,7 +67,7 @@ func Test_ScratchImage(t *testing.T) { | ||||||
| 	} | 	} | ||||||
| 	actual, err := RetrieveSourceImage(config.KanikoStage{ | 	actual, err := RetrieveSourceImage(config.KanikoStage{ | ||||||
| 		Stage: stages[1], | 		Stage: stages[1], | ||||||
| 	}, nil, &config.KanikoOptions{}) | 	}, &config.KanikoOptions{}) | ||||||
| 	expected := empty.Image | 	expected := empty.Image | ||||||
| 	testutil.CheckErrorAndDeepEqual(t, false, err, expected, actual) | 	testutil.CheckErrorAndDeepEqual(t, false, err, expected, actual) | ||||||
| } | } | ||||||
|  | @ -89,7 +89,7 @@ func Test_TarImage(t *testing.T) { | ||||||
| 		BaseImageStoredLocally: true, | 		BaseImageStoredLocally: true, | ||||||
| 		BaseImageIndex:         0, | 		BaseImageIndex:         0, | ||||||
| 		Stage:                  stages[2], | 		Stage:                  stages[2], | ||||||
| 	}, nil, &config.KanikoOptions{}) | 	}, &config.KanikoOptions{}) | ||||||
| 	testutil.CheckErrorAndDeepEqual(t, false, err, nil, actual) | 	testutil.CheckErrorAndDeepEqual(t, false, err, nil, actual) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue