Refactor build into stageBuilder type
Refactoring builds by stage will make it easier to generate cache keys for layers, since the stageBuilder type will contain everything required to generate the key: 1. Base image with digest 2. Config file 3. Snapshotter (which will provide a key for the filesystem) 4. The current command (which will be passed in)
This commit is contained in:
parent
4dc34343b6
commit
d9022dd7de
|
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/commands"
|
||||
|
|
@ -40,32 +41,21 @@ import (
|
|||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
)
|
||||
|
||||
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
||||
// Parse dockerfile and unpack base image to root
|
||||
stages, err := dockerfile.Stages(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// stageBuilder contains all fields necessary to build one stage of a Dockerfile
|
||||
type stageBuilder struct {
|
||||
stage config.KanikoStage
|
||||
v1.Image
|
||||
*v1.ConfigFile
|
||||
*snapshot.Snapshotter
|
||||
baseImageDigest string
|
||||
}
|
||||
|
||||
hasher, err := getHasher(opts.SnapshotMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for index, stage := range stages {
|
||||
// Unpack file system to root
|
||||
// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
|
||||
func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*stageBuilder, error) {
|
||||
sourceImage, err := util.RetrieveSourceImage(stage, opts.BuildArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := util.GetFSFromImage(constants.RootDir, sourceImage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l := snapshot.NewLayeredMap(hasher)
|
||||
snapshotter := snapshot.NewSnapshotter(l, constants.RootDir)
|
||||
// Take initial snapshot
|
||||
if err := snapshotter.Init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
imageConfig, err := util.RetrieveConfigFile(sourceImage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -73,19 +63,60 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
if err := resolveOnBuild(&stage, &imageConfig.Config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buildArgs := dockerfile.NewBuildArgs(opts.BuildArgs)
|
||||
for index, cmd := range stage.Commands {
|
||||
finalCmd := index == len(stage.Commands)-1
|
||||
dockerCommand, err := commands.GetCommand(cmd, opts.SrcContext)
|
||||
hasher, err := getHasher(opts.SnapshotMode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l := snapshot.NewLayeredMap(hasher)
|
||||
snapshotter := snapshot.NewSnapshotter(l, constants.RootDir)
|
||||
|
||||
digest, err := sourceImage.Digest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &stageBuilder{
|
||||
stage: stage,
|
||||
Image: sourceImage,
|
||||
ConfigFile: imageConfig,
|
||||
Snapshotter: snapshotter,
|
||||
baseImageDigest: digest.String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// key will return a string representation of the build at the cmd
|
||||
// TODO: priyawadhwa@ to fill this out when implementing caching
|
||||
func (s *stageBuilder) key(cmd string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// extractCachedLayer will extract the cached layer and append it to the config file
|
||||
// TODO: priyawadhwa@ to fill this out when implementing caching
|
||||
func (s *stageBuilder) extractCachedLayer(layer v1.Image, createdBy string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stageBuilder) buildStage(opts *config.KanikoOptions) error {
|
||||
// Unpack file system to root
|
||||
if err := util.GetFSFromImage(constants.RootDir, s.Image); err != nil {
|
||||
return err
|
||||
}
|
||||
// Take initial snapshot
|
||||
if err := s.Snapshotter.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
buildArgs := dockerfile.NewBuildArgs(opts.BuildArgs)
|
||||
for index, cmd := range s.stage.Commands {
|
||||
finalCmd := index == len(s.stage.Commands)-1
|
||||
dockerCommand, err := commands.GetCommand(cmd, opts.SrcContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dockerCommand == nil {
|
||||
continue
|
||||
}
|
||||
logrus.Info(dockerCommand.String())
|
||||
if err := dockerCommand.ExecuteCommand(&imageConfig.Config, buildArgs); err != nil {
|
||||
return nil, err
|
||||
if err := dockerCommand.ExecuteCommand(&s.ConfigFile.Config, buildArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
snapshotFiles := dockerCommand.FilesToSnapshot()
|
||||
var contents []byte
|
||||
|
|
@ -93,30 +124,30 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
// If this is an intermediate stage, we only snapshot for the last command and we
|
||||
// want to snapshot the entire filesystem since we aren't tracking what was changed
|
||||
// by previous commands.
|
||||
if !stage.FinalStage {
|
||||
if !s.stage.FinalStage {
|
||||
if finalCmd {
|
||||
contents, err = snapshotter.TakeSnapshotFS()
|
||||
contents, err = s.Snapshotter.TakeSnapshotFS()
|
||||
}
|
||||
} else {
|
||||
// If we are in single snapshot mode, we only take a snapshot once, after all
|
||||
// commands have completed.
|
||||
if opts.SingleSnapshot {
|
||||
if finalCmd {
|
||||
contents, err = snapshotter.TakeSnapshotFS()
|
||||
contents, err = s.Snapshotter.TakeSnapshotFS()
|
||||
}
|
||||
} else {
|
||||
// Otherwise, in the final stage we take a snapshot at each command. If we know
|
||||
// the files that were changed, we'll snapshot those explicitly, otherwise we'll
|
||||
// check if anything in the filesystem changed.
|
||||
if snapshotFiles != nil {
|
||||
contents, err = snapshotter.TakeSnapshot(snapshotFiles)
|
||||
contents, err = s.Snapshotter.TakeSnapshot(snapshotFiles)
|
||||
} else {
|
||||
contents, err = snapshotter.TakeSnapshotFS()
|
||||
contents, err = s.Snapshotter.TakeSnapshotFS()
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error taking snapshot of files for command %s: %s", dockerCommand, err)
|
||||
return fmt.Errorf("Error taking snapshot of files for command %s: %s", dockerCommand, err)
|
||||
}
|
||||
|
||||
util.MoveVolumeWhitelistToWhitelist()
|
||||
|
|
@ -130,9 +161,9 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
}
|
||||
layer, err := tarball.LayerFromOpener(opener)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
sourceImage, err = mutate.Append(sourceImage,
|
||||
s.Image, err = mutate.Append(s.Image,
|
||||
mutate.Addendum{
|
||||
Layer: layer,
|
||||
History: v1.History{
|
||||
|
|
@ -141,11 +172,29 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DoBuild executes building the Dockerfile
|
||||
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
||||
// Parse dockerfile and unpack base image to root
|
||||
stages, err := dockerfile.Stages(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for index, stage := range stages {
|
||||
stageBuilder, err := newStageBuilder(opts, stage)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("getting stage builder for stage %d", index))
|
||||
}
|
||||
sourceImage, err = mutate.Config(sourceImage, imageConfig.Config)
|
||||
if err := stageBuilder.buildStage(opts); err != nil {
|
||||
return nil, errors.Wrap(err, "error building stage")
|
||||
}
|
||||
sourceImage, err := mutate.Config(stageBuilder.Image, stageBuilder.ConfigFile.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue