diff --git a/pkg/commands/copy.go b/pkg/commands/copy.go index 4071f957b..dab5917fa 100644 --- a/pkg/commands/copy.go +++ b/pkg/commands/copy.go @@ -161,6 +161,10 @@ func (c *CopyCommand) CacheCommand(img v1.Image) DockerCommand { } } +func (c *CopyCommand) From() string { + return c.cmd.From +} + type CachingCopyCommand struct { BaseCommand img v1.Image @@ -187,6 +191,10 @@ func (cr *CachingCopyCommand) String() string { return cr.cmd.String() } +func (cr *CachingCopyCommand) From() string { + return cr.cmd.From +} + func resolveIfSymlink(destPath string) (string, error) { baseDir := filepath.Dir(destPath) if info, err := os.Lstat(baseDir); err == nil { diff --git a/pkg/config/stage.go b/pkg/config/stage.go index 56c4a3f0f..fad18049c 100644 --- a/pkg/config/stage.go +++ b/pkg/config/stage.go @@ -16,7 +16,9 @@ limitations under the License. package config -import "github.com/moby/buildkit/frontend/dockerfile/instructions" +import ( + "github.com/moby/buildkit/frontend/dockerfile/instructions" +) // KanikoStage wraps a stage of the Dockerfile and provides extra information type KanikoStage struct { diff --git a/pkg/executor/build.go b/pkg/executor/build.go index e3c48b5e1..47c456359 100644 --- a/pkg/executor/build.go +++ b/pkg/executor/build.go @@ -64,9 +64,6 @@ type stageBuilder struct { stage config.KanikoStage image v1.Image cf *v1.ConfigFile - snapshotter snapShotter - layerCache cache.LayerCache - pushCache cachePusher baseImageDigest string finalCacheKey string opts *config.KanikoOptions @@ -74,6 +71,9 @@ type stageBuilder struct { args *dockerfile.BuildArgs crossStageDeps map[int][]string digestToCacheKeyMap map[string]string + snapshotter snapShotter + layerCache cache.LayerCache + pushCache cachePusher } // newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage @@ -146,6 +146,38 @@ func initializeConfig(img partial.WithConfigFile) (*v1.ConfigFile, error) { return imageConfig, nil } +func (s *stageBuilder) populateCompositeKey(command commands.DockerCommand, files []string, compositeKey CompositeCache) (CompositeCache, error) { + // Add the next command to the cache key. + compositeKey.AddKey(command.String()) + switch v := command.(type) { + case *commands.CopyCommand: + if v.From() != "" { + digest, ok := s.digestMap[v.From()] + if ok { + ds := digest.String() + logrus.Debugf("adding digest %v from previous stage to composite key for %v", ds, command.String()) + compositeKey.AddKey(ds) + } + } + case *commands.CachingCopyCommand: + if v.From() != "" { + digest, ok := s.digestMap[v.From()] + if ok { + ds := digest.String() + logrus.Debugf("adding digest %v from previous stage to composite key for %v", ds, command.String()) + compositeKey.AddKey(ds) + } + } + } + + for _, f := range files { + if err := compositeKey.AddPath(f); err != nil { + return compositeKey, err + } + } + return compositeKey, nil +} + func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) error { if !s.opts.Cache { return nil @@ -160,16 +192,14 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro if command == nil { continue } - compositeKey.AddKey(command.String()) - // If the command uses files from the context, add them. files, err := command.FilesUsedFromContext(&cfg, s.args) if err != nil { return errors.Wrap(err, "failed to get files used from context") } - for _, f := range files { - if err := compositeKey.AddPath(f); err != nil { - return errors.Wrap(err, "failed to add path to composite key") - } + + compositeKey, err = s.populateCompositeKey(command, files, compositeKey) + if err != nil { + return err } ck, err := compositeKey.Hash() @@ -256,19 +286,19 @@ func (s *stageBuilder) build() error { continue } - // Add the next command to the cache key. - compositeKey.AddKey(command.String()) t := timing.Start("Command: " + command.String()) + // If the command uses files from the context, add them. files, err := command.FilesUsedFromContext(&s.cf.Config, s.args) if err != nil { return errors.Wrap(err, "failed to get files used from context") } - for _, f := range files { - if err := compositeKey.AddPath(f); err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to add path to composite key %v", f)) - } + + *compositeKey, err = s.populateCompositeKey(command, files, *compositeKey) + if err != nil { + return err } + logrus.Info(command.String()) if err := command.ExecuteCommand(&s.cf.Config, s.args); err != nil { @@ -303,6 +333,7 @@ func (s *stageBuilder) build() error { if err := cacheGroup.Wait(); err != nil { logrus.Warnf("error uploading layer to cache: %s", err) } + return nil } @@ -374,7 +405,6 @@ func (s *stageBuilder) saveSnapshotToImage(createdBy string, tarPath string) err }, ) return err - } func CalculateDependencies(opts *config.KanikoOptions) (map[int][]string, error) { @@ -443,6 +473,7 @@ func CalculateDependencies(opts *config.KanikoOptions) (map[int][]string, error) func DoBuild(opts *config.KanikoOptions) (v1.Image, error) { t := timing.Start("Total Build Time") digestToCacheKeyMap := make(map[string]string) + // Parse dockerfile and unpack base image to root stages, err := dockerfile.Stages(opts) if err != nil { @@ -470,6 +501,14 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) { if err := sb.build(); err != nil { return nil, errors.Wrap(err, "error building stage") } + + d, err := sb.image.Digest() + if err != nil { + return nil, err + } + + digestMap[fmt.Sprintf("%d", sb.stage.Index)] = d + reviewConfig(stage, &sb.cf.Config) sourceImage, err := mutate.Config(sb.image, sb.cf.Config) if err != nil { diff --git a/pkg/executor/build_test.go b/pkg/executor/build_test.go index 66c0ab5b5..0b4a546f8 100644 --- a/pkg/executor/build_test.go +++ b/pkg/executor/build_test.go @@ -919,3 +919,51 @@ func generateTar(t *testing.T, dir string, fileNames ...string) []byte { } return buf.Bytes() } + +//======= +// testCases := []struct { +// opts *config.KanikoOptions +// retrieve bool +// }{ +// { +// opts: &config.KanikoOptions{Cache: true}, +// }, +// { +// opts: &config.KanikoOptions{Cache: true}, +// retrieve: true, +// }, +// { +// opts: &config.KanikoOptions{Cache: false}, +// }, +// { +// opts: &config.KanikoOptions{Cache: false}, +// retrieve: true, +// }, +// } +// for i, tc := range testCases { +// t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { +// file, err := ioutil.TempFile("", "foo") +// if err != nil { +// t.Error(err) +// } + +// cf := &v1.ConfigFile{} +// snap := fakeSnapShotter{file: file.Name()} +// lc := fakeLayerCache{retrieve: tc.retrieve} +// sb := &stageBuilder{opts: tc.opts, cf: cf, snapshotter: snap, layerCache: lc, pushCache: fakeCachePush} + +// command := MockDockerCommand{ +// contextFiles: []string{file.Name()}, +// cacheCommand: MockCachedDockerCommand{ +// contextFiles: []string{file.Name()}, +// }, +// } +// sb.cmds = []commands.DockerCommand{command} +// err = sb.build() +// if err != nil { +// t.Errorf("Expected error to be nil but was %v", err) +// } +// }) +// } +//} +//>>>>>>> Add unit tests for stagebuilder build and optimize