Refactor build into stageBuilder type

Refactoring builds by stage will make it easier to generate cache keys
for layers, since the stageBuilder type will contain everything required
to generate the key:

1. Base image with digest
2. Config file
3. Snapshotter (which will provide a key for the filesystem)
4. The current command (which will be passed in)
This commit is contained in:
Priya Wadhwa 2018-09-07 17:04:04 -07:00
parent 4dc34343b6
commit d9022dd7de
1 changed files with 145 additions and 96 deletions

View File

@ -30,6 +30,7 @@ import (
"github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/GoogleContainerTools/kaniko/pkg/commands"
@ -40,112 +41,160 @@ import (
"github.com/GoogleContainerTools/kaniko/pkg/util"
)
// stageBuilder contains all fields necessary to build one stage of a Dockerfile
type stageBuilder struct {
stage config.KanikoStage
v1.Image
*v1.ConfigFile
*snapshot.Snapshotter
baseImageDigest string
}
// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*stageBuilder, error) {
sourceImage, err := util.RetrieveSourceImage(stage, opts.BuildArgs)
if err != nil {
return nil, err
}
imageConfig, err := util.RetrieveConfigFile(sourceImage)
if err != nil {
return nil, err
}
if err := resolveOnBuild(&stage, &imageConfig.Config); err != nil {
return nil, err
}
hasher, err := getHasher(opts.SnapshotMode)
if err != nil {
return nil, err
}
l := snapshot.NewLayeredMap(hasher)
snapshotter := snapshot.NewSnapshotter(l, constants.RootDir)
digest, err := sourceImage.Digest()
if err != nil {
return nil, err
}
return &stageBuilder{
stage: stage,
Image: sourceImage,
ConfigFile: imageConfig,
Snapshotter: snapshotter,
baseImageDigest: digest.String(),
}, nil
}
// key will return a string representation of the build at the cmd
// TODO: priyawadhwa@ to fill this out when implementing caching
func (s *stageBuilder) key(cmd string) (string, error) {
return "", nil
}
// extractCachedLayer will extract the cached layer and append it to the config file
// TODO: priyawadhwa@ to fill this out when implementing caching
func (s *stageBuilder) extractCachedLayer(layer v1.Image, createdBy string) error {
return nil
}
func (s *stageBuilder) buildStage(opts *config.KanikoOptions) error {
// Unpack file system to root
if err := util.GetFSFromImage(constants.RootDir, s.Image); err != nil {
return err
}
// Take initial snapshot
if err := s.Snapshotter.Init(); err != nil {
return err
}
buildArgs := dockerfile.NewBuildArgs(opts.BuildArgs)
for index, cmd := range s.stage.Commands {
finalCmd := index == len(s.stage.Commands)-1
dockerCommand, err := commands.GetCommand(cmd, opts.SrcContext)
if err != nil {
return err
}
if dockerCommand == nil {
continue
}
logrus.Info(dockerCommand.String())
if err := dockerCommand.ExecuteCommand(&s.ConfigFile.Config, buildArgs); err != nil {
return err
}
snapshotFiles := dockerCommand.FilesToSnapshot()
var contents []byte
// If this is an intermediate stage, we only snapshot for the last command and we
// want to snapshot the entire filesystem since we aren't tracking what was changed
// by previous commands.
if !s.stage.FinalStage {
if finalCmd {
contents, err = s.Snapshotter.TakeSnapshotFS()
}
} else {
// If we are in single snapshot mode, we only take a snapshot once, after all
// commands have completed.
if opts.SingleSnapshot {
if finalCmd {
contents, err = s.Snapshotter.TakeSnapshotFS()
}
} else {
// Otherwise, in the final stage we take a snapshot at each command. If we know
// the files that were changed, we'll snapshot those explicitly, otherwise we'll
// check if anything in the filesystem changed.
if snapshotFiles != nil {
contents, err = s.Snapshotter.TakeSnapshot(snapshotFiles)
} else {
contents, err = s.Snapshotter.TakeSnapshotFS()
}
}
}
if err != nil {
return fmt.Errorf("Error taking snapshot of files for command %s: %s", dockerCommand, err)
}
util.MoveVolumeWhitelistToWhitelist()
if contents == nil {
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
continue
}
// Append the layer to the image
opener := func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(contents)), nil
}
layer, err := tarball.LayerFromOpener(opener)
if err != nil {
return err
}
s.Image, err = mutate.Append(s.Image,
mutate.Addendum{
Layer: layer,
History: v1.History{
Author: constants.Author,
CreatedBy: dockerCommand.String(),
},
},
)
if err != nil {
return err
}
}
return nil
}
// DoBuild executes building the Dockerfile
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
// Parse dockerfile and unpack base image to root
stages, err := dockerfile.Stages(opts)
if err != nil {
return nil, err
}
hasher, err := getHasher(opts.SnapshotMode)
if err != nil {
return nil, err
}
for index, stage := range stages {
// Unpack file system to root
sourceImage, err := util.RetrieveSourceImage(stage, opts.BuildArgs)
stageBuilder, err := newStageBuilder(opts, stage)
if err != nil {
return nil, err
return nil, errors.Wrap(err, fmt.Sprintf("getting stage builder for stage %d", index))
}
if err := util.GetFSFromImage(constants.RootDir, sourceImage); err != nil {
return nil, err
if err := stageBuilder.buildStage(opts); err != nil {
return nil, errors.Wrap(err, "error building stage")
}
l := snapshot.NewLayeredMap(hasher)
snapshotter := snapshot.NewSnapshotter(l, constants.RootDir)
// Take initial snapshot
if err := snapshotter.Init(); err != nil {
return nil, err
}
imageConfig, err := util.RetrieveConfigFile(sourceImage)
if err != nil {
return nil, err
}
if err := resolveOnBuild(&stage, &imageConfig.Config); err != nil {
return nil, err
}
buildArgs := dockerfile.NewBuildArgs(opts.BuildArgs)
for index, cmd := range stage.Commands {
finalCmd := index == len(stage.Commands)-1
dockerCommand, err := commands.GetCommand(cmd, opts.SrcContext)
if err != nil {
return nil, err
}
if dockerCommand == nil {
continue
}
logrus.Info(dockerCommand.String())
if err := dockerCommand.ExecuteCommand(&imageConfig.Config, buildArgs); err != nil {
return nil, err
}
snapshotFiles := dockerCommand.FilesToSnapshot()
var contents []byte
// If this is an intermediate stage, we only snapshot for the last command and we
// want to snapshot the entire filesystem since we aren't tracking what was changed
// by previous commands.
if !stage.FinalStage {
if finalCmd {
contents, err = snapshotter.TakeSnapshotFS()
}
} else {
// If we are in single snapshot mode, we only take a snapshot once, after all
// commands have completed.
if opts.SingleSnapshot {
if finalCmd {
contents, err = snapshotter.TakeSnapshotFS()
}
} else {
// Otherwise, in the final stage we take a snapshot at each command. If we know
// the files that were changed, we'll snapshot those explicitly, otherwise we'll
// check if anything in the filesystem changed.
if snapshotFiles != nil {
contents, err = snapshotter.TakeSnapshot(snapshotFiles)
} else {
contents, err = snapshotter.TakeSnapshotFS()
}
}
}
if err != nil {
return nil, fmt.Errorf("Error taking snapshot of files for command %s: %s", dockerCommand, err)
}
util.MoveVolumeWhitelistToWhitelist()
if contents == nil {
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
continue
}
// Append the layer to the image
opener := func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(contents)), nil
}
layer, err := tarball.LayerFromOpener(opener)
if err != nil {
return nil, err
}
sourceImage, err = mutate.Append(sourceImage,
mutate.Addendum{
Layer: layer,
History: v1.History{
Author: constants.Author,
CreatedBy: dockerCommand.String(),
},
},
)
if err != nil {
return nil, err
}
}
sourceImage, err = mutate.Config(sourceImage, imageConfig.Config)
sourceImage, err := mutate.Config(stageBuilder.Image, stageBuilder.ConfigFile.Config)
if err != nil {
return nil, err
}