Refactor the build loop. (#385)

This change refactors the build loop a bit to make cache optimization easier in the future. Some notable changes:

The special casing around volume snapshots is removed. Every volume is added to the snapshotFiles list for every command that will snapshot anyway.
Snapshot saving was extracted to a sub-function
The decision on whether or not to snapshot was extracted
This commit is contained in:
dlorenc 2018-10-09 12:15:17 -05:00 committed by GitHub
parent 0a13e042c2
commit 9a0e29c441
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 88 additions and 79 deletions

View File

@ -30,8 +30,7 @@ import (
type VolumeCommand struct {
BaseCommand
cmd *instructions.VolumeCommand
snapshotFiles []string
cmd *instructions.VolumeCommand
}
func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
@ -57,7 +56,6 @@ func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.
// Only create and snapshot the dir if it didn't exist already
if _, err := os.Stat(volume); os.IsNotExist(err) {
logrus.Infof("Creating directory %s", volume)
v.snapshotFiles = append(v.snapshotFiles, volume)
if err := os.MkdirAll(volume, 0755); err != nil {
return fmt.Errorf("Could not create directory for volume %s: %s", volume, err)
}
@ -69,7 +67,7 @@ func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.
}
func (v *VolumeCommand) FilesToSnapshot() []string {
return v.snapshotFiles
return []string{}
}
func (v *VolumeCommand) String() string {

View File

@ -43,7 +43,6 @@ func TestUpdateVolume(t *testing.T) {
cmd: &instructions.VolumeCommand{
Volumes: volumes,
},
snapshotFiles: []string{},
}
expectedVolumes := map[string]struct{}{

View File

@ -62,9 +62,6 @@ const (
// Docker command names
Cmd = "cmd"
Entrypoint = "entrypoint"
// VolumeCmdName is the name of the volume command
VolumeCmdName = "volume"
)
// KanikoBuildFiles is the list of files required to build kaniko

View File

@ -49,6 +49,7 @@ type stageBuilder struct {
cf *v1.ConfigFile
snapshotter *snapshot.Snapshotter
baseImageDigest string
opts *config.KanikoOptions
}
// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
@ -81,6 +82,7 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*sta
cf: imageConfig,
snapshotter: snapshotter,
baseImageDigest: digest.String(),
opts: opts,
}, nil
}
@ -111,7 +113,7 @@ func (s *stageBuilder) extractCachedLayer(layer v1.Image, createdBy string) erro
return err
}
func (s *stageBuilder) build(opts *config.KanikoOptions) error {
func (s *stageBuilder) build() error {
// Unpack file system to root
if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
return err
@ -120,23 +122,26 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
if err := s.snapshotter.Init(); err != nil {
return err
}
var volumes []string
// Set the initial cache key to be the base image digest, the build args and the SrcContext.
compositeKey := NewCompositeCache(s.baseImageDigest)
contextHash, err := HashDir(opts.SrcContext)
contextHash, err := HashDir(s.opts.SrcContext)
if err != nil {
return err
}
compositeKey.AddKey(opts.BuildArgs...)
compositeKey.AddKey(s.opts.BuildArgs...)
args := dockerfile.NewBuildArgs(opts.BuildArgs)
for index, cmd := range s.stage.Commands {
finalCmd := index == len(s.stage.Commands)-1
command, err := commands.GetCommand(cmd, opts.SrcContext)
cmds := []commands.DockerCommand{}
for _, cmd := range s.stage.Commands {
command, err := commands.GetCommand(cmd, s.opts.SrcContext)
if err != nil {
return err
}
cmds = append(cmds, command)
}
args := dockerfile.NewBuildArgs(s.opts.BuildArgs)
for index, command := range cmds {
if command == nil {
continue
}
@ -153,8 +158,8 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
return err
}
if command.CacheCommand() && opts.Cache {
image, err := cache.RetrieveLayer(opts, ck)
if command.CacheCommand() && s.opts.Cache {
image, err := cache.RetrieveLayer(s.opts, ck)
if err == nil {
if err := s.extractCachedLayer(image, command.String()); err != nil {
return errors.Wrap(err, "extracting cached layer")
@ -163,84 +168,94 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
}
logrus.Info("No cached layer found, executing command...")
}
if err := command.ExecuteCommand(&s.cf.Config, args); err != nil {
return err
}
files := command.FilesToSnapshot()
if cmd.Name() == constants.VolumeCmdName {
volumes = append(volumes, files...)
continue
}
var contents []byte
// If this is an intermediate stage, we only snapshot for the last command and we
// want to snapshot the entire filesystem since we aren't tracking what was changed
// by previous commands.
if !s.stage.Final {
if finalCmd {
contents, err = s.snapshotter.TakeSnapshotFS()
}
} else {
// If we are in single snapshot mode, we only take a snapshot once, after all
// commands have completed.
if opts.SingleSnapshot {
if finalCmd {
contents, err = s.snapshotter.TakeSnapshotFS()
}
} else {
// Otherwise, in the final stage we take a snapshot at each command. If we know
// the files that were changed, we'll snapshot those explicitly, otherwise we'll
// check if anything in the filesystem changed.
if files != nil {
if len(files) > 0 {
files = append(files, volumes...)
volumes = []string{}
}
contents, err = s.snapshotter.TakeSnapshot(files)
} else {
contents, err = s.snapshotter.TakeSnapshotFS()
volumes = []string{}
}
}
}
if err != nil {
return fmt.Errorf("Error taking snapshot of files for command %s: %s", command, err)
}
if contents == nil {
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
if !s.shouldTakeSnapshot(index, files) {
continue
}
// Append the layer to the image
opener := func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(contents)), nil
if files == nil || s.opts.SingleSnapshot {
contents, err = s.snapshotter.TakeSnapshotFS()
} else {
// Volumes are very weird. They get created in their command, but snapshotted in the next one.
// Add them to the list of files to snapshot.
for v := range s.cf.Config.Volumes {
files = append(files, v)
}
contents, err = s.snapshotter.TakeSnapshot(files)
}
layer, err := tarball.LayerFromOpener(opener)
if err != nil {
return err
}
// Push layer to cache now along with new config file
if command.CacheCommand() && opts.Cache {
if err := pushLayerToCache(opts, ck, layer, command.String()); err != nil {
return err
}
}
s.image, err = mutate.Append(s.image,
mutate.Addendum{
Layer: layer,
History: v1.History{
Author: constants.Author,
CreatedBy: command.String(),
},
},
)
if err != nil {
if err := s.saveSnapshot(command, ck, contents); err != nil {
return err
}
}
return nil
}
func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool {
isLastCommand := index == len(s.stage.Commands)-1
// We only snapshot the very end of intermediate stages.
if !s.stage.Final {
return isLastCommand
}
// We only snapshot the very end with single snapshot mode on.
if s.opts.SingleSnapshot {
return isLastCommand
}
// nil means snapshot everything.
if files == nil {
return true
}
// Don't snapshot an empty list.
if len(files) == 0 {
return false
}
return true
}
func (s *stageBuilder) saveSnapshot(command commands.DockerCommand, ck string, contents []byte) error {
if contents == nil {
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
return nil
}
// Append the layer to the image
opener := func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(contents)), nil
}
layer, err := tarball.LayerFromOpener(opener)
if err != nil {
return err
}
// Push layer to cache now along with new config file
if command.CacheCommand() && s.opts.Cache {
if err := pushLayerToCache(s.opts, ck, layer, command.String()); err != nil {
return err
}
}
s.image, err = mutate.Append(s.image,
mutate.Addendum{
Layer: layer,
History: v1.History{
Author: constants.Author,
CreatedBy: command.String(),
},
},
)
return err
}
// DoBuild executes building the Dockerfile
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
// Parse dockerfile and unpack base image to root
@ -253,7 +268,7 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("getting stage builder for stage %d", index))
}
if err := sb.build(opts); err != nil {
if err := sb.build(); err != nil {
return nil, errors.Wrap(err, "error building stage")
}
reviewConfig(stage, &sb.cf.Config)