Refactor the build loop. (#385)
This change refactors the build loop a bit to make cache optimization easier in the future. Some notable changes: The special casing around volume snapshots is removed. Every volume is added to the snapshotFiles list for every command that will snapshot anyway. Snapshot saving was extracted to a sub-function The decision on whether or not to snapshot was extracted
This commit is contained in:
parent
0a13e042c2
commit
9a0e29c441
|
|
@ -30,8 +30,7 @@ import (
|
||||||
|
|
||||||
type VolumeCommand struct {
|
type VolumeCommand struct {
|
||||||
BaseCommand
|
BaseCommand
|
||||||
cmd *instructions.VolumeCommand
|
cmd *instructions.VolumeCommand
|
||||||
snapshotFiles []string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
|
func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
|
||||||
|
|
@ -57,7 +56,6 @@ func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.
|
||||||
// Only create and snapshot the dir if it didn't exist already
|
// Only create and snapshot the dir if it didn't exist already
|
||||||
if _, err := os.Stat(volume); os.IsNotExist(err) {
|
if _, err := os.Stat(volume); os.IsNotExist(err) {
|
||||||
logrus.Infof("Creating directory %s", volume)
|
logrus.Infof("Creating directory %s", volume)
|
||||||
v.snapshotFiles = append(v.snapshotFiles, volume)
|
|
||||||
if err := os.MkdirAll(volume, 0755); err != nil {
|
if err := os.MkdirAll(volume, 0755); err != nil {
|
||||||
return fmt.Errorf("Could not create directory for volume %s: %s", volume, err)
|
return fmt.Errorf("Could not create directory for volume %s: %s", volume, err)
|
||||||
}
|
}
|
||||||
|
|
@ -69,7 +67,7 @@ func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *VolumeCommand) FilesToSnapshot() []string {
|
func (v *VolumeCommand) FilesToSnapshot() []string {
|
||||||
return v.snapshotFiles
|
return []string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *VolumeCommand) String() string {
|
func (v *VolumeCommand) String() string {
|
||||||
|
|
|
||||||
|
|
@ -43,7 +43,6 @@ func TestUpdateVolume(t *testing.T) {
|
||||||
cmd: &instructions.VolumeCommand{
|
cmd: &instructions.VolumeCommand{
|
||||||
Volumes: volumes,
|
Volumes: volumes,
|
||||||
},
|
},
|
||||||
snapshotFiles: []string{},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedVolumes := map[string]struct{}{
|
expectedVolumes := map[string]struct{}{
|
||||||
|
|
|
||||||
|
|
@ -62,9 +62,6 @@ const (
|
||||||
// Docker command names
|
// Docker command names
|
||||||
Cmd = "cmd"
|
Cmd = "cmd"
|
||||||
Entrypoint = "entrypoint"
|
Entrypoint = "entrypoint"
|
||||||
|
|
||||||
// VolumeCmdName is the name of the volume command
|
|
||||||
VolumeCmdName = "volume"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// KanikoBuildFiles is the list of files required to build kaniko
|
// KanikoBuildFiles is the list of files required to build kaniko
|
||||||
|
|
|
||||||
|
|
@ -49,6 +49,7 @@ type stageBuilder struct {
|
||||||
cf *v1.ConfigFile
|
cf *v1.ConfigFile
|
||||||
snapshotter *snapshot.Snapshotter
|
snapshotter *snapshot.Snapshotter
|
||||||
baseImageDigest string
|
baseImageDigest string
|
||||||
|
opts *config.KanikoOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
|
// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
|
||||||
|
|
@ -81,6 +82,7 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*sta
|
||||||
cf: imageConfig,
|
cf: imageConfig,
|
||||||
snapshotter: snapshotter,
|
snapshotter: snapshotter,
|
||||||
baseImageDigest: digest.String(),
|
baseImageDigest: digest.String(),
|
||||||
|
opts: opts,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -111,7 +113,7 @@ func (s *stageBuilder) extractCachedLayer(layer v1.Image, createdBy string) erro
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stageBuilder) build(opts *config.KanikoOptions) error {
|
func (s *stageBuilder) build() error {
|
||||||
// Unpack file system to root
|
// Unpack file system to root
|
||||||
if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
|
if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
@ -120,23 +122,26 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
|
||||||
if err := s.snapshotter.Init(); err != nil {
|
if err := s.snapshotter.Init(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var volumes []string
|
|
||||||
|
|
||||||
// Set the initial cache key to be the base image digest, the build args and the SrcContext.
|
// Set the initial cache key to be the base image digest, the build args and the SrcContext.
|
||||||
compositeKey := NewCompositeCache(s.baseImageDigest)
|
compositeKey := NewCompositeCache(s.baseImageDigest)
|
||||||
contextHash, err := HashDir(opts.SrcContext)
|
contextHash, err := HashDir(s.opts.SrcContext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
compositeKey.AddKey(opts.BuildArgs...)
|
compositeKey.AddKey(s.opts.BuildArgs...)
|
||||||
|
|
||||||
args := dockerfile.NewBuildArgs(opts.BuildArgs)
|
cmds := []commands.DockerCommand{}
|
||||||
for index, cmd := range s.stage.Commands {
|
for _, cmd := range s.stage.Commands {
|
||||||
finalCmd := index == len(s.stage.Commands)-1
|
command, err := commands.GetCommand(cmd, s.opts.SrcContext)
|
||||||
command, err := commands.GetCommand(cmd, opts.SrcContext)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
cmds = append(cmds, command)
|
||||||
|
}
|
||||||
|
|
||||||
|
args := dockerfile.NewBuildArgs(s.opts.BuildArgs)
|
||||||
|
for index, command := range cmds {
|
||||||
if command == nil {
|
if command == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -153,8 +158,8 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if command.CacheCommand() && opts.Cache {
|
if command.CacheCommand() && s.opts.Cache {
|
||||||
image, err := cache.RetrieveLayer(opts, ck)
|
image, err := cache.RetrieveLayer(s.opts, ck)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if err := s.extractCachedLayer(image, command.String()); err != nil {
|
if err := s.extractCachedLayer(image, command.String()); err != nil {
|
||||||
return errors.Wrap(err, "extracting cached layer")
|
return errors.Wrap(err, "extracting cached layer")
|
||||||
|
|
@ -163,84 +168,94 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
|
||||||
}
|
}
|
||||||
logrus.Info("No cached layer found, executing command...")
|
logrus.Info("No cached layer found, executing command...")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := command.ExecuteCommand(&s.cf.Config, args); err != nil {
|
if err := command.ExecuteCommand(&s.cf.Config, args); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
files := command.FilesToSnapshot()
|
files := command.FilesToSnapshot()
|
||||||
if cmd.Name() == constants.VolumeCmdName {
|
|
||||||
volumes = append(volumes, files...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var contents []byte
|
var contents []byte
|
||||||
|
|
||||||
// If this is an intermediate stage, we only snapshot for the last command and we
|
if !s.shouldTakeSnapshot(index, files) {
|
||||||
// want to snapshot the entire filesystem since we aren't tracking what was changed
|
|
||||||
// by previous commands.
|
|
||||||
if !s.stage.Final {
|
|
||||||
if finalCmd {
|
|
||||||
contents, err = s.snapshotter.TakeSnapshotFS()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// If we are in single snapshot mode, we only take a snapshot once, after all
|
|
||||||
// commands have completed.
|
|
||||||
if opts.SingleSnapshot {
|
|
||||||
if finalCmd {
|
|
||||||
contents, err = s.snapshotter.TakeSnapshotFS()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Otherwise, in the final stage we take a snapshot at each command. If we know
|
|
||||||
// the files that were changed, we'll snapshot those explicitly, otherwise we'll
|
|
||||||
// check if anything in the filesystem changed.
|
|
||||||
if files != nil {
|
|
||||||
if len(files) > 0 {
|
|
||||||
files = append(files, volumes...)
|
|
||||||
volumes = []string{}
|
|
||||||
}
|
|
||||||
contents, err = s.snapshotter.TakeSnapshot(files)
|
|
||||||
} else {
|
|
||||||
contents, err = s.snapshotter.TakeSnapshotFS()
|
|
||||||
volumes = []string{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error taking snapshot of files for command %s: %s", command, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if contents == nil {
|
|
||||||
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Append the layer to the image
|
|
||||||
opener := func() (io.ReadCloser, error) {
|
if files == nil || s.opts.SingleSnapshot {
|
||||||
return ioutil.NopCloser(bytes.NewReader(contents)), nil
|
contents, err = s.snapshotter.TakeSnapshotFS()
|
||||||
|
} else {
|
||||||
|
// Volumes are very weird. They get created in their command, but snapshotted in the next one.
|
||||||
|
// Add them to the list of files to snapshot.
|
||||||
|
for v := range s.cf.Config.Volumes {
|
||||||
|
files = append(files, v)
|
||||||
|
}
|
||||||
|
contents, err = s.snapshotter.TakeSnapshot(files)
|
||||||
}
|
}
|
||||||
layer, err := tarball.LayerFromOpener(opener)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Push layer to cache now along with new config file
|
if err := s.saveSnapshot(command, ck, contents); err != nil {
|
||||||
if command.CacheCommand() && opts.Cache {
|
|
||||||
if err := pushLayerToCache(opts, ck, layer, command.String()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.image, err = mutate.Append(s.image,
|
|
||||||
mutate.Addendum{
|
|
||||||
Layer: layer,
|
|
||||||
History: v1.History{
|
|
||||||
Author: constants.Author,
|
|
||||||
CreatedBy: command.String(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool {
|
||||||
|
isLastCommand := index == len(s.stage.Commands)-1
|
||||||
|
|
||||||
|
// We only snapshot the very end of intermediate stages.
|
||||||
|
if !s.stage.Final {
|
||||||
|
return isLastCommand
|
||||||
|
}
|
||||||
|
|
||||||
|
// We only snapshot the very end with single snapshot mode on.
|
||||||
|
if s.opts.SingleSnapshot {
|
||||||
|
return isLastCommand
|
||||||
|
}
|
||||||
|
|
||||||
|
// nil means snapshot everything.
|
||||||
|
if files == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't snapshot an empty list.
|
||||||
|
if len(files) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stageBuilder) saveSnapshot(command commands.DockerCommand, ck string, contents []byte) error {
|
||||||
|
if contents == nil {
|
||||||
|
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Append the layer to the image
|
||||||
|
opener := func() (io.ReadCloser, error) {
|
||||||
|
return ioutil.NopCloser(bytes.NewReader(contents)), nil
|
||||||
|
}
|
||||||
|
layer, err := tarball.LayerFromOpener(opener)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Push layer to cache now along with new config file
|
||||||
|
if command.CacheCommand() && s.opts.Cache {
|
||||||
|
if err := pushLayerToCache(s.opts, ck, layer, command.String()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.image, err = mutate.Append(s.image,
|
||||||
|
mutate.Addendum{
|
||||||
|
Layer: layer,
|
||||||
|
History: v1.History{
|
||||||
|
Author: constants.Author,
|
||||||
|
CreatedBy: command.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// DoBuild executes building the Dockerfile
|
// DoBuild executes building the Dockerfile
|
||||||
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
||||||
// Parse dockerfile and unpack base image to root
|
// Parse dockerfile and unpack base image to root
|
||||||
|
|
@ -253,7 +268,7 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, fmt.Sprintf("getting stage builder for stage %d", index))
|
return nil, errors.Wrap(err, fmt.Sprintf("getting stage builder for stage %d", index))
|
||||||
}
|
}
|
||||||
if err := sb.build(opts); err != nil {
|
if err := sb.build(); err != nil {
|
||||||
return nil, errors.Wrap(err, "error building stage")
|
return nil, errors.Wrap(err, "error building stage")
|
||||||
}
|
}
|
||||||
reviewConfig(stage, &sb.cf.Config)
|
reviewConfig(stage, &sb.cf.Config)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue