Use cachekey not digest for COPY --from src
* use the cachekey of the src stage rather than the digest for COPY --from commands as they are reproducible unlike digests * track digest to cache keys and stage indexes to digest * add extra debug logging for troubleshooting cachekey building issues * convert Sha256 hashes to hex encoded strings rather than plain strings for easier human reading
This commit is contained in:
parent
7b4b768edf
commit
b19214ad1e
|
|
@ -61,23 +61,24 @@ type snapShotter interface {
|
|||
|
||||
// stageBuilder contains all fields necessary to build one stage of a Dockerfile
|
||||
type stageBuilder struct {
|
||||
stage config.KanikoStage
|
||||
image v1.Image
|
||||
cf *v1.ConfigFile
|
||||
baseImageDigest string
|
||||
finalCacheKey string
|
||||
opts *config.KanikoOptions
|
||||
cmds []commands.DockerCommand
|
||||
args *dockerfile.BuildArgs
|
||||
crossStageDeps map[int][]string
|
||||
digestToCacheKeyMap map[string]string
|
||||
snapshotter snapShotter
|
||||
layerCache cache.LayerCache
|
||||
pushCache cachePusher
|
||||
stage config.KanikoStage
|
||||
image v1.Image
|
||||
cf *v1.ConfigFile
|
||||
baseImageDigest string
|
||||
finalCacheKey string
|
||||
opts *config.KanikoOptions
|
||||
cmds []commands.DockerCommand
|
||||
args *dockerfile.BuildArgs
|
||||
crossStageDeps map[int][]string
|
||||
digestToCacheKey map[string]string
|
||||
stageIdxToDigest map[string]string
|
||||
snapshotter snapShotter
|
||||
layerCache cache.LayerCache
|
||||
pushCache cachePusher
|
||||
}
|
||||
|
||||
// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
|
||||
func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, crossStageDeps map[int][]string, dcm map[string]string) (*stageBuilder, error) {
|
||||
func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, crossStageDeps map[int][]string, dcm map[string]string, sid map[string]string) (*stageBuilder, error) {
|
||||
sourceImage, err := util.RetrieveSourceImage(stage, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -104,14 +105,15 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, cross
|
|||
return nil, err
|
||||
}
|
||||
s := &stageBuilder{
|
||||
stage: stage,
|
||||
image: sourceImage,
|
||||
cf: imageConfig,
|
||||
snapshotter: snapshotter,
|
||||
baseImageDigest: digest.String(),
|
||||
opts: opts,
|
||||
crossStageDeps: crossStageDeps,
|
||||
digestToCacheKeyMap: dcm,
|
||||
stage: stage,
|
||||
image: sourceImage,
|
||||
cf: imageConfig,
|
||||
snapshotter: snapshotter,
|
||||
baseImageDigest: digest.String(),
|
||||
opts: opts,
|
||||
crossStageDeps: crossStageDeps,
|
||||
digestToCacheKey: dcm,
|
||||
stageIdxToDigest: sid,
|
||||
layerCache: &cache.RegistryCache{
|
||||
Opts: opts,
|
||||
},
|
||||
|
|
@ -146,28 +148,14 @@ func initializeConfig(img partial.WithConfigFile) (*v1.ConfigFile, error) {
|
|||
return imageConfig, nil
|
||||
}
|
||||
|
||||
func (s *stageBuilder) populateCompositeKey(command commands.DockerCommand, files []string, compositeKey CompositeCache) (CompositeCache, error) {
|
||||
func (s *stageBuilder) populateCompositeKey(command fmt.Stringer, files []string, compositeKey CompositeCache) (CompositeCache, error) {
|
||||
// Add the next command to the cache key.
|
||||
compositeKey.AddKey(command.String())
|
||||
switch v := command.(type) {
|
||||
case *commands.CopyCommand:
|
||||
if v.From() != "" {
|
||||
digest, ok := s.digestMap[v.From()]
|
||||
if ok {
|
||||
ds := digest.String()
|
||||
logrus.Debugf("adding digest %v from previous stage to composite key for %v", ds, command.String())
|
||||
compositeKey.AddKey(ds)
|
||||
}
|
||||
}
|
||||
compositeKey = s.populateCopyCmdCompositeKey(command, v.From(), compositeKey)
|
||||
case *commands.CachingCopyCommand:
|
||||
if v.From() != "" {
|
||||
digest, ok := s.digestMap[v.From()]
|
||||
if ok {
|
||||
ds := digest.String()
|
||||
logrus.Debugf("adding digest %v from previous stage to composite key for %v", ds, command.String())
|
||||
compositeKey.AddKey(ds)
|
||||
}
|
||||
}
|
||||
compositeKey = s.populateCopyCmdCompositeKey(command, v.From(), compositeKey)
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
|
|
@ -178,6 +166,22 @@ func (s *stageBuilder) populateCompositeKey(command commands.DockerCommand, file
|
|||
return compositeKey, nil
|
||||
}
|
||||
|
||||
func (s *stageBuilder) populateCopyCmdCompositeKey(command fmt.Stringer, from string, compositeKey CompositeCache) CompositeCache {
|
||||
if from != "" {
|
||||
digest, ok := s.stageIdxToDigest[from]
|
||||
if ok {
|
||||
ds := digest
|
||||
cacheKey, ok := s.digestToCacheKey[ds]
|
||||
if ok {
|
||||
logrus.Debugf("adding digest %v from previous stage to composite key for %v", ds, command.String())
|
||||
compositeKey.AddKey(cacheKey)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return compositeKey
|
||||
}
|
||||
|
||||
func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) error {
|
||||
if !s.opts.Cache {
|
||||
return nil
|
||||
|
|
@ -202,10 +206,12 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro
|
|||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("optimize: composite key for command %v %v", command.String(), compositeKey)
|
||||
ck, err := compositeKey.Hash()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to hash composite key")
|
||||
}
|
||||
logrus.Debugf("optimize: cache key for command %v %v", command.String(), ck)
|
||||
s.finalCacheKey = ck
|
||||
if command.ShouldCacheOutput() && !stopCache {
|
||||
img, err := s.layerCache.RetrieveLayer(ck)
|
||||
|
|
@ -236,7 +242,7 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro
|
|||
func (s *stageBuilder) build() error {
|
||||
// Set the initial cache key to be the base image digest, the build args and the SrcContext.
|
||||
var compositeKey *CompositeCache
|
||||
if cacheKey, ok := s.digestToCacheKeyMap[s.baseImageDigest]; ok {
|
||||
if cacheKey, ok := s.digestToCacheKey[s.baseImageDigest]; ok {
|
||||
compositeKey = NewCompositeCache(cacheKey)
|
||||
} else {
|
||||
compositeKey = NewCompositeCache(s.baseImageDigest)
|
||||
|
|
@ -316,6 +322,7 @@ func (s *stageBuilder) build() error {
|
|||
return errors.Wrap(err, "failed to take snapshot")
|
||||
}
|
||||
|
||||
logrus.Debugf("build: composite key for command %v %v", command.String(), compositeKey)
|
||||
ck, err := compositeKey.Hash()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to hash composite key")
|
||||
|
|
@ -472,7 +479,8 @@ func CalculateDependencies(opts *config.KanikoOptions) (map[int][]string, error)
|
|||
// DoBuild executes building the Dockerfile
|
||||
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
||||
t := timing.Start("Total Build Time")
|
||||
digestToCacheKeyMap := make(map[string]string)
|
||||
digestToCacheKey := make(map[string]string)
|
||||
stageIdxToDigest := make(map[string]string)
|
||||
|
||||
// Parse dockerfile and unpack base image to root
|
||||
stages, err := dockerfile.Stages(opts)
|
||||
|
|
@ -494,7 +502,7 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
logrus.Infof("Built cross stage deps: %v", crossStageDependencies)
|
||||
|
||||
for index, stage := range stages {
|
||||
sb, err := newStageBuilder(opts, stage, crossStageDependencies, digestToCacheKeyMap)
|
||||
sb, err := newStageBuilder(opts, stage, crossStageDependencies, digestToCacheKey, stageIdxToDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -502,23 +510,24 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
return nil, errors.Wrap(err, "error building stage")
|
||||
}
|
||||
|
||||
d, err := sb.image.Digest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
digestMap[fmt.Sprintf("%d", sb.stage.Index)] = d
|
||||
|
||||
reviewConfig(stage, &sb.cf.Config)
|
||||
|
||||
sourceImage, err := mutate.Config(sb.image, sb.cf.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, err := sourceImage.Digest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
digestToCacheKeyMap[d.String()] = sb.finalCacheKey
|
||||
|
||||
stageIdxToDigest[fmt.Sprintf("%d", sb.stage.Index)] = d.String()
|
||||
logrus.Debugf("mapping stage idx %v to digest %v", sb.stage.Index, d.String())
|
||||
|
||||
digestToCacheKey[d.String()] = sb.finalCacheKey
|
||||
logrus.Debugf("mapping digest %v to cachekey %v", d.String(), sb.finalCacheKey)
|
||||
|
||||
if stage.Final {
|
||||
sourceImage, err = mutate.CreatedAt(sourceImage, v1.Time{Time: time.Now()})
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -888,6 +888,7 @@ func tempDirAndFile(t *testing.T) (string, []string) {
|
|||
|
||||
return dir, filenames
|
||||
}
|
||||
|
||||
func generateTar(t *testing.T, dir string, fileNames ...string) []byte {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
writer := tar.NewWriter(buf)
|
||||
|
|
@ -919,51 +920,3 @@ func generateTar(t *testing.T, dir string, fileNames ...string) []byte {
|
|||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
//=======
|
||||
// testCases := []struct {
|
||||
// opts *config.KanikoOptions
|
||||
// retrieve bool
|
||||
// }{
|
||||
// {
|
||||
// opts: &config.KanikoOptions{Cache: true},
|
||||
// },
|
||||
// {
|
||||
// opts: &config.KanikoOptions{Cache: true},
|
||||
// retrieve: true,
|
||||
// },
|
||||
// {
|
||||
// opts: &config.KanikoOptions{Cache: false},
|
||||
// },
|
||||
// {
|
||||
// opts: &config.KanikoOptions{Cache: false},
|
||||
// retrieve: true,
|
||||
// },
|
||||
// }
|
||||
// for i, tc := range testCases {
|
||||
// t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) {
|
||||
// file, err := ioutil.TempFile("", "foo")
|
||||
// if err != nil {
|
||||
// t.Error(err)
|
||||
// }
|
||||
|
||||
// cf := &v1.ConfigFile{}
|
||||
// snap := fakeSnapShotter{file: file.Name()}
|
||||
// lc := fakeLayerCache{retrieve: tc.retrieve}
|
||||
// sb := &stageBuilder{opts: tc.opts, cf: cf, snapshotter: snap, layerCache: lc, pushCache: fakeCachePush}
|
||||
|
||||
// command := MockDockerCommand{
|
||||
// contextFiles: []string{file.Name()},
|
||||
// cacheCommand: MockCachedDockerCommand{
|
||||
// contextFiles: []string{file.Name()},
|
||||
// },
|
||||
// }
|
||||
// sb.cmds = []commands.DockerCommand{command}
|
||||
// err = sb.build()
|
||||
// if err != nil {
|
||||
// t.Errorf("Expected error to be nil but was %v", err)
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
//}
|
||||
//>>>>>>> Add unit tests for stagebuilder build and optimize
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package executor
|
|||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
|
@ -75,7 +76,7 @@ func (s *CompositeCache) AddPath(p string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
s.keys = append(s.keys, string(sha.Sum(nil)))
|
||||
s.keys = append(s.keys, fmt.Sprintf("%x", sha.Sum(nil)))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -98,5 +99,5 @@ func HashDir(p string) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
return string(sha.Sum(nil)), nil
|
||||
return fmt.Sprintf("%x", sha.Sum(nil)), nil
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue