Fixed merge conflict
This commit is contained in:
commit
a34ba5c233
14
CHANGELOG.md
14
CHANGELOG.md
|
|
@ -1,3 +1,17 @@
|
|||
# v0.7.0 Release - 12/10/2018
|
||||
|
||||
## New Features
|
||||
* Add support for COPY --from an unrelated image
|
||||
|
||||
## Updates
|
||||
* Speed up snapshotting by using filepath.SkipDir
|
||||
* Improve layer cache upload performance
|
||||
* Skip unpacking the base image in certain cases
|
||||
|
||||
## Bug Fixes
|
||||
* Fix bug with call loop
|
||||
* Fix caching for multi-step builds
|
||||
|
||||
# v0.6.0 Release - 11/06/2018
|
||||
|
||||
## New Features
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
# Bump these on release
|
||||
VERSION_MAJOR ?= 0
|
||||
VERSION_MINOR ?= 6
|
||||
VERSION_MINOR ?= 7
|
||||
VERSION_BUILD ?= 0
|
||||
|
||||
VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD)
|
||||
|
|
|
|||
|
|
@ -15,3 +15,4 @@ FROM fedora@sha256:c4cc32b09c6ae3f1353e7e33a8dda93dc41676b923d6d89afa996b421cc5a
|
|||
FROM base
|
||||
ARG file
|
||||
COPY --from=second /foo ${file}
|
||||
COPY --from=gcr.io/google-appengine/debian9@sha256:00109fa40230a081f5ecffe0e814725042ff62a03e2d1eae0563f1f82eaeae9b /etc/os-release /new
|
||||
|
|
|
|||
|
|
@ -124,6 +124,7 @@ func resolveStages(stages []instructions.Stage) {
|
|||
if val, ok := nameToIndex[c.From]; ok {
|
||||
c.From = val
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
|
|
@ -277,13 +279,18 @@ func (s *stageBuilder) takeSnapshot(files []string) (string, error) {
|
|||
func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool {
|
||||
isLastCommand := index == len(s.stage.Commands)-1
|
||||
|
||||
// We only snapshot the very end of intermediate stages.
|
||||
if !s.stage.Final {
|
||||
// We only snapshot the very end with single snapshot mode on.
|
||||
if s.opts.SingleSnapshot {
|
||||
return isLastCommand
|
||||
}
|
||||
|
||||
// We only snapshot the very end with single snapshot mode on.
|
||||
if s.opts.SingleSnapshot {
|
||||
// Always take snapshots if we're using the cache.
|
||||
if s.opts.Cache {
|
||||
return true
|
||||
}
|
||||
|
||||
// We only snapshot the very end of intermediate stages.
|
||||
if !s.stage.Final {
|
||||
return isLastCommand
|
||||
}
|
||||
|
||||
|
|
@ -296,6 +303,7 @@ func (s *stageBuilder) shouldTakeSnapshot(index int, files []string) bool {
|
|||
if len(files) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
@ -340,6 +348,10 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
if err := util.GetExcludedFiles(opts.SrcContext); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Some stages may refer to other random images, not previous stages
|
||||
if err := fetchExtraStages(stages, opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for index, stage := range stages {
|
||||
sb, err := newStageBuilder(opts, stage)
|
||||
if err != nil {
|
||||
|
|
@ -383,10 +395,10 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
return sourceImage, nil
|
||||
}
|
||||
if stage.SaveStage {
|
||||
if err := saveStageAsTarball(index, sourceImage); err != nil {
|
||||
if err := saveStageAsTarball(strconv.Itoa(index), sourceImage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := extractImageToDependecyDir(index, sourceImage); err != nil {
|
||||
if err := extractImageToDependecyDir(strconv.Itoa(index), sourceImage); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
@ -399,8 +411,38 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
func extractImageToDependecyDir(index int, image v1.Image) error {
|
||||
dependencyDir := filepath.Join(constants.KanikoDir, strconv.Itoa(index))
|
||||
func fetchExtraStages(stages []config.KanikoStage, opts *config.KanikoOptions) error {
|
||||
for _, s := range stages {
|
||||
for _, cmd := range s.Commands {
|
||||
c, ok := cmd.(*instructions.CopyCommand)
|
||||
if !ok || c.From == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// FROMs at this point are guaranteed to be either an integer referring to a previous stage or
|
||||
// a name of a remote image.
|
||||
if _, err := strconv.Atoi(c.From); err == nil {
|
||||
continue
|
||||
}
|
||||
// This must be an image name, fetch it.
|
||||
logrus.Debugf("Found extra base image stage %s", c.From)
|
||||
sourceImage, err := util.RetrieveRemoteImage(c.From, opts, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := saveStageAsTarball(c.From, sourceImage); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := extractImageToDependecyDir(c.From, sourceImage); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func extractImageToDependecyDir(name string, image v1.Image) error {
|
||||
dependencyDir := filepath.Join(constants.KanikoDir, name)
|
||||
if err := os.MkdirAll(dependencyDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -409,16 +451,16 @@ func extractImageToDependecyDir(index int, image v1.Image) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func saveStageAsTarball(stageIndex int, image v1.Image) error {
|
||||
func saveStageAsTarball(path string, image v1.Image) error {
|
||||
destRef, err := name.NewTag("temp/tag", name.WeakValidation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(constants.KanikoIntermediateStagesDir, 0750); err != nil {
|
||||
tarPath := filepath.Join(constants.KanikoIntermediateStagesDir, path)
|
||||
logrus.Infof("Storing source image from stage %s at path %s", path, tarPath)
|
||||
if err := os.MkdirAll(filepath.Dir(tarPath), 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
tarPath := filepath.Join(constants.KanikoIntermediateStagesDir, strconv.Itoa(stageIndex))
|
||||
logrus.Infof("Storing source image from stage %d at path %s", stageIndex, tarPath)
|
||||
return tarball.WriteToFile(tarPath, destRef, image)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@ package executor
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
|
|
@ -74,3 +76,107 @@ func stage(t *testing.T, d string) config.KanikoStage {
|
|||
Stage: stages[0],
|
||||
}
|
||||
}
|
||||
|
||||
type MockCommand struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (m *MockCommand) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
func Test_stageBuilder_shouldTakeSnapshot(t *testing.T) {
|
||||
commands := []instructions.Command{
|
||||
&MockCommand{name: "command1"},
|
||||
&MockCommand{name: "command2"},
|
||||
&MockCommand{name: "command3"},
|
||||
}
|
||||
|
||||
stage := instructions.Stage{
|
||||
Commands: commands,
|
||||
}
|
||||
|
||||
type fields struct {
|
||||
stage config.KanikoStage
|
||||
opts *config.KanikoOptions
|
||||
}
|
||||
type args struct {
|
||||
index int
|
||||
files []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "final stage not last command",
|
||||
fields: fields{
|
||||
stage: config.KanikoStage{
|
||||
Final: true,
|
||||
Stage: stage,
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
index: 1,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "not final stage last command",
|
||||
fields: fields{
|
||||
stage: config.KanikoStage{
|
||||
Final: false,
|
||||
Stage: stage,
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
index: len(commands) - 1,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "not final stage not last command",
|
||||
fields: fields{
|
||||
stage: config.KanikoStage{
|
||||
Final: false,
|
||||
Stage: stage,
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
index: 0,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "caching enabled intermediate container",
|
||||
fields: fields{
|
||||
stage: config.KanikoStage{
|
||||
Final: false,
|
||||
Stage: stage,
|
||||
},
|
||||
opts: &config.KanikoOptions{Cache: true},
|
||||
},
|
||||
args: args{
|
||||
index: 0,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
if tt.fields.opts == nil {
|
||||
tt.fields.opts = &config.KanikoOptions{}
|
||||
}
|
||||
s := &stageBuilder{
|
||||
stage: tt.fields.stage,
|
||||
opts: tt.fields.opts,
|
||||
}
|
||||
if got := s.shouldTakeSnapshot(tt.args.index, tt.args.files); got != tt.want {
|
||||
t.Errorf("stageBuilder.shouldTakeSnapshot() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,8 +39,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
// For testing
|
||||
retrieveRemoteImage = remoteImage
|
||||
// RetrieveRemoteImage downloads an image from a remote location
|
||||
RetrieveRemoteImage = remoteImage
|
||||
retrieveTarImage = tarballImage
|
||||
)
|
||||
|
||||
|
|
@ -67,21 +67,8 @@ func RetrieveSourceImage(stage config.KanikoStage, opts *config.KanikoOptions) (
|
|||
return retrieveTarImage(stage.BaseImageIndex)
|
||||
}
|
||||
|
||||
// Next, check if local caching is enabled
|
||||
// If so, look in the local cache before trying the remote registry
|
||||
if opts.Cache && opts.CacheDir != "" {
|
||||
cachedImage, err := cachedImage(opts, currentBaseName)
|
||||
if cachedImage != nil {
|
||||
return cachedImage, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logrus.Warnf("Error while retrieving image from cache: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, initialize image as usual
|
||||
return retrieveRemoteImage(currentBaseName, opts)
|
||||
return RetrieveRemoteImage(currentBaseName, opts, false)
|
||||
}
|
||||
|
||||
// RetrieveConfigFile returns the config file for an image
|
||||
|
|
@ -102,8 +89,20 @@ func tarballImage(index int) (v1.Image, error) {
|
|||
return tarball.ImageFromPath(tarPath, nil)
|
||||
}
|
||||
|
||||
func remoteImage(image string, opts *config.KanikoOptions) (v1.Image, error) {
|
||||
func remoteImage(image string, opts *config.KanikoOptions, forceNoCache bool) (v1.Image, error) {
|
||||
logrus.Infof("Downloading base image %s", image)
|
||||
// First, check if local caching is enabled
|
||||
// If so, look in the local cache before trying the remote registry
|
||||
if opts.Cache && opts.CacheDir != "" && !forceNoCache {
|
||||
cachedImage, err := cachedImage(opts, image)
|
||||
if cachedImage != nil {
|
||||
return cachedImage, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logrus.Warnf("Error while retrieving image from cache: %v", err)
|
||||
}
|
||||
}
|
||||
ref, err := name.ParseReference(image, name.WeakValidation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -149,7 +148,7 @@ func cachedImage(opts *config.KanikoOptions, image string) (v1.Image, error) {
|
|||
if d, ok := ref.(name.Digest); ok {
|
||||
cacheKey = d.DigestStr()
|
||||
} else {
|
||||
img, err := remoteImage(image, opts)
|
||||
img, err := remoteImage(image, opts, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -47,14 +47,14 @@ func Test_StandardImage(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
original := retrieveRemoteImage
|
||||
original := RetrieveRemoteImage
|
||||
defer func() {
|
||||
retrieveRemoteImage = original
|
||||
RetrieveRemoteImage = original
|
||||
}()
|
||||
mock := func(image string, opts *config.KanikoOptions) (v1.Image, error) {
|
||||
mock := func(image string, opts *config.KanikoOptions, forceNoCache bool) (v1.Image, error) {
|
||||
return nil, nil
|
||||
}
|
||||
retrieveRemoteImage = mock
|
||||
RetrieveRemoteImage = mock
|
||||
actual, err := RetrieveSourceImage(config.KanikoStage{
|
||||
Stage: stages[0],
|
||||
}, &config.KanikoOptions{})
|
||||
|
|
|
|||
Loading…
Reference in New Issue