Merge branch 'master' into registry-mirror

This commit is contained in:
Tejal Desai 2019-12-09 15:40:56 -08:00 committed by GitHub
commit fbdb8f39c8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 1108 additions and 74 deletions

191
.golangci.yaml Normal file
View File

@ -0,0 +1,191 @@
# This file contains all available configuration options
# with their default values.
# options for analysis running
run:
# default concurrency is a available CPU number
concurrency: 4
# timeout for analysis, e.g. 30s, 5m, default is 1m
deadline: 1m
# exit code when at least one issue was found, default is 1
issues-exit-code: 1
# include test files or not, default is true
tests: true
# list of build tags, all linters use it. Default is empty list.
build-tags:
# which dirs to skip: they won't be analyzed;
# can use regexp here: generated.*, regexp is applied on full path;
# default value is empty list, but next dirs are always skipped independently
# from this option's value:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
skip-dirs:
# which files to skip: they will be analyzed, but issues from them
# won't be reported. Default value is empty list, but there is
# no need to include all autogenerated files, we confidently recognize
# autogenerated files. If it's not please let us know.
skip-files:
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number"
format: colored-line-number
# print lines of code with issue, default is true
print-issued-lines: true
# print linter name in the end of issue text, default is true
print-linter-name: true
# all available settings of specific linters
linters-settings:
errcheck:
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
# default is false: such cases aren't reported by default.
check-type-assertions: false
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
# default is false: such cases aren't reported by default.
check-blank: false
govet:
# report about shadowed variables
#check-shadowing: true
# Obtain type information from installed (to $GOPATH/pkg) package files:
# golangci-lint will execute `go install -i` and `go test -i` for analyzed packages
# before analyzing them.
# By default this option is disabled and govet gets type information by loader from source code.
# Loading from source code is slow, but it's done only once for all linters.
# Go-installing of packages first time is much slower than loading them from source code,
# therefore this option is disabled by default.
# But repeated installation is fast in go >= 1.10 because of build caching.
# Enable this option only if all conditions are met:
# 1. you use only "fast" linters (--fast e.g.): no program loading occurs
# 2. you use go >= 1.10
# 3. you do repeated runs (false for CI) or cache $GOPATH/pkg or `go env GOCACHE` dir in CI.
#use-installed-packages: false
golint:
# minimal confidence for issues, default is 0.8
min-confidence: 0.8
gofmt:
# simplify code: gofmt with `-s` option, true by default
simplify: true
#gocyclo:
# # minimal code complexity to report, 30 by default (but we recommend 10-20)
# min-complexity: 10
maligned:
# print struct with more effective memory layout or not, false by default
suggest-new: true
#dupl:
# # tokens count to trigger issue, 150 by default
# threshold: 100
goconst:
# minimal length of string constant, 3 by default
min-len: 3
# minimal occurrences count to trigger, 3 by default
min-occurrences: 3
#depguard:
# list-type: blacklist
# include-go-root: false
# packages:
# - github.com/davecgh/go-spew/spew
misspell:
# Correct spellings using locale preferences for US or UK.
# Default is to use a neutral variety of English.
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
locale: US
#lll:
# # max line length, lines longer will be reported. Default is 120.
# # '\t' is counted as 1 character by default, and can be changed with the tab-width option
# line-length: 120
# # tab width in spaces. Default to 1.
# tab-width: 1
unused:
# treat code as a program (not a library) and report unused exported identifiers; default is false.
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
# with golangci-lint call it on a directory with the changed file.
check-exported: false
unparam:
# call graph construction algorithm (cha, rta). In general, use cha for libraries,
# and rta for programs with main packages. Default is cha.
algo: cha
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
# with golangci-lint call it on a directory with the changed file.
check-exported: false
#nakedret:
# # make an issue if func has more lines of code than this setting and it has naked returns; default is 30
# max-func-lines: 30
#prealloc:
# # XXX: we don't recommend using this linter before doing performance profiling.
# # For most programs usage of prealloc will be a premature optimization.
# # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
# # True by default.
# simple: true
# range-loops: true # Report preallocation suggestions on range loops, true by default
# for-loops: false # Report preallocation suggestions on for loops, false by default
linters:
enable:
- goconst
- goimports
- golint
- interfacer
- maligned
- misspell
- unconvert
- unparam
enable-all: false
disable:
- errcheck
- gas
disable-all: false
presets:
- bugs
- unused
fast: false
issues:
# List of regexps of issue texts to exclude, empty list by default.
# But independently from this option we use default exclude patterns,
# it can be disabled by `exclude-use-default: false`. To list all
# excluded by default patterns execute `golangci-lint run --help`
exclude:
# Independently from option `exclude` we use default exclude patterns,
# it can be disabled by this option. To list all
# excluded by default patterns execute `golangci-lint run --help`.
# Default value for this option is true.
exclude-use-default: true
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-per-linter: 50
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same: 3
# Show only new issues: if there are unstaged changes or untracked files,
# only those changes are analyzed, else only changes in HEAD~ are analyzed.
# It's a super-useful option for integration of golangci-lint into existing
# large codebase. It's not practical to fix all existing issues at the moment
# of integration: much better don't allow issues in new code.
# Default is false.
new: false
## Show only new issues created after git revision `REV`
#new-from-rev: REV
## Show only new issues created in git patch with set file path.
#new-from-patch: path/to/patch/file

View File

@ -46,33 +46,33 @@ _If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPME
- [Pushing to Docker Hub](#pushing-to-docker-hub)
- [Pushing to Amazon ECR](#pushing-to-amazon-ecr)
- [Additional Flags](#additional-flags)
- [--build-arg](#build-arg)
- [--cache](#cache)
- [--cache-dir](#cache-dir)
- [--cache-repo](#cache-repo)
- [--digest-file](#digest-file)
- [--oci-layout-path](#oci-layout-path)
- [--insecure-registry](#insecure-registry)
- [--skip-tls-verify-registry](#skip-tls-verify-registry)
- [--cleanup](#cleanup)
- [--insecure](#insecure)
- [--insecure-pull](#insecure-pull)
- [--no-push](#no-push)
- [--build-arg](#--build-arg)
- [--cache](#--cache)
- [--cache-dir](#--cache-dir)
- [--cache-repo](#--cache-repo)
- [--digest-file](#--digest-file)
- [--oci-layout-path](#--oci-layout-path)
- [--insecure-registry](#--insecure-registry)
- [--skip-tls-verify-registry](#--skip-tls-verify-registry)
- [--cleanup](#--cleanup)
- [--insecure](#--insecure)
- [--insecure-pull](#--insecure-pull)
- [--no-push](#--no-push)
- [--registry-mirror](#--registry-mirror)
- [--reproducible](#reproducible)
- [--single-snapshot](#single-snapshot)
- [--skip-tls-verify](#skip-tls-verify)
- [--skip-tls-verify-pull](#skip-tls-verify-pull)
- [--snapshotMode](#snapshotmode)
- [--target](#target)
- [--tarPath](#tarpath)
- [--verbosity](#verbosity)
- [Debug Image](#debug-image)
- [--reproducible](#--reproducible)
- [--single-snapshot](#--single-snapshot)
- [--skip-tls-verify](#--skip-tls-verify)
- [--skip-tls-verify-pull](#--skip-tls-verify-pull)
- [--snapshotMode](#--snapshotmode)
- [--target](#--target)
- [--tarPath](#--tarpath)
- [--verbosity](#--verbosity)
- [Debug Image](#debug-image)
- [Security](#security)
- [Comparison with Other Tools](#comparison-with-other-tools)
- [Community](#community)
- [Limitations](#limitations)
- [mtime and snapshotting](#mtime-and-snapshotting)
- [mtime and snapshotting](#mtime-and-snapshotting)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
@ -288,7 +288,7 @@ as a remote image destination:
### Caching
#### Caching Layers
kaniko currently can cache layers created by `RUN` commands in a remote repository.
kaniko can cache layers created by `RUN` commands in a remote repository.
Before executing a command, kaniko checks the cache for the layer.
If it exists, kaniko will pull and extract the cached layer instead of executing the command.
If not, kaniko will execute the command and then push the newly created layer to the cache.
@ -299,7 +299,7 @@ If this flag isn't provided, a cached repo will be inferred from the `--destinat
#### Caching Base Images
kaniko can cache images in a local directory that can be volume mounted into the kaniko image.
kaniko can cache images in a local directory that can be volume mounted into the kaniko pod.
To do so, the cache must first be populated, as it is read-only. We provide a kaniko cache warming
image at `gcr.io/kaniko-project/warmer`:
@ -310,7 +310,7 @@ docker run -v $(pwd):/workspace gcr.io/kaniko-project/warmer:latest --cache-dir=
`--image` can be specified for any number of desired images.
This command will cache those images by digest in a local directory named `cache`.
Once the cache is populated, caching is opted into with the same `--cache=true` flag as above.
The location of the local cache is provided via the `--cache-dir` flag, defaulting at `/cache` as with the cache warmer.
The location of the local cache is provided via the `--cache-dir` flag, defaulting to `/cache` as with the cache warmer.
See the `examples` directory for how to use with kubernetes clusters and persistent cache volumes.
### Pushing to Different Registries
@ -323,7 +323,7 @@ kaniko comes with support for GCR, Docker `config.json` and Amazon ECR, but conf
Get your docker registry user and password encoded in base64
echo USER:PASSWORD | base64
echo -n USER:PASSWORD | base64
Create a `config.json` file with your Docker registry url and the previous generated base64 string
@ -346,7 +346,7 @@ Run kaniko with the `config.json` inside `/kaniko/.docker/config.json`
The Amazon ECR [credential helper](https://github.com/awslabs/amazon-ecr-credential-helper) is built in to the kaniko executor image.
To configure credentials, you will need to do the following:
1. Update the `credHelpers` section of [config.json](https://github.com/GoogleContainerTools/kaniko/blob/master/files/config.json) with the specific URI of your ECR registry:
1. Update the `credHelpers` section of [config.json](https://github.com/awslabs/amazon-ecr-credential-helper#configuration) with the specific URI of your ECR registry:
```json
{

View File

@ -23,15 +23,4 @@ if ! [ -x "$(command -v golangci-lint)" ]; then
${DIR}/install_golint.sh -b $GOPATH/bin v1.9.3
fi
golangci-lint run \
--no-config \
-E goconst \
-E goimports \
-E golint \
-E interfacer \
-E maligned \
-E misspell \
-E unconvert \
-E unparam \
-D errcheck \
-D gas
golangci-lint run

View File

@ -17,6 +17,7 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
@ -24,6 +25,12 @@ import (
"github.com/sirupsen/logrus"
)
var RootDir string
func init() {
RootDir = constants.RootDir
}
type CurrentCacheKey func() (string, error)
type DockerCommand interface {

View File

@ -171,7 +171,7 @@ type CachingCopyCommand struct {
func (cr *CachingCopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Infof("Found cached layer, extracting to filesystem")
var err error
cr.extractedFiles, err = util.GetFSFromImage(constants.RootDir, cr.img)
cr.extractedFiles, err = util.GetFSFromImage(RootDir, cr.img)
logrus.Infof("extractedFiles: %s", cr.extractedFiles)
if err != nil {
return errors.Wrap(err, "extracting fs from image")

View File

@ -203,6 +203,7 @@ func targetStage(stages []instructions.Stage, target string) (int, error) {
// resolveStages resolves any calls to previous stages with names to indices
// Ex. --from=second_stage should be --from=1 for easier processing later on
// As third party library lowers stage name in FROM instruction, this function resolves stage case insensitively.
func resolveStages(stages []instructions.Stage) {
nameToIndex := make(map[string]string)
for i, stage := range stages {
@ -214,7 +215,7 @@ func resolveStages(stages []instructions.Stage) {
switch c := cmd.(type) {
case *instructions.CopyCommand:
if c.From != "" {
if val, ok := nameToIndex[c.From]; ok {
if val, ok := nameToIndex[strings.ToLower(c.From)]; ok {
c.From = val
}

View File

@ -197,8 +197,14 @@ func Test_resolveStages(t *testing.T) {
FROM scratch AS second
COPY --from=0 /hi /hi2
FROM scratch
FROM scratch AS tHiRd
COPY --from=second /hi2 /hi3
COPY --from=1 /hi2 /hi3
FROM scratch
COPY --from=thIrD /hi3 /hi4
COPY --from=third /hi3 /hi4
COPY --from=2 /hi3 /hi4
`
stages, _, err := Parse([]byte(dockerfile))
if err != nil {
@ -209,11 +215,14 @@ func Test_resolveStages(t *testing.T) {
if index == 0 {
continue
}
copyCmd := stage.Commands[0].(*instructions.CopyCommand)
expectedStage := strconv.Itoa(index - 1)
if copyCmd.From != expectedStage {
t.Fatalf("unexpected copy command: %s resolved to stage %s, expected %s", copyCmd.String(), copyCmd.From, expectedStage)
for _, command := range stage.Commands {
copyCmd := command.(*instructions.CopyCommand)
if copyCmd.From != expectedStage {
t.Fatalf("unexpected copy command: %s resolved to stage %s, expected %s", copyCmd.String(), copyCmd.From, expectedStage)
}
}
}
}

View File

@ -23,7 +23,7 @@ import (
"strconv"
"time"
"github.com/otiai10/copy"
otiai10Cpy "github.com/otiai10/copy"
"github.com/google/go-containerregistry/pkg/v1/partial"
@ -52,12 +52,21 @@ import (
// This is the size of an empty tar in Go
const emptyTarSize = 1024
type cachePusher func(*config.KanikoOptions, string, string, string) error
type snapShotter interface {
Init() error
TakeSnapshotFS() (string, error)
TakeSnapshot([]string) (string, error)
}
// stageBuilder contains all fields necessary to build one stage of a Dockerfile
type stageBuilder struct {
stage config.KanikoStage
image v1.Image
cf *v1.ConfigFile
snapshotter *snapshot.Snapshotter
snapshotter snapShotter
layerCache cache.LayerCache
pushCache cachePusher
baseImageDigest string
finalCacheKey string
opts *config.KanikoOptions
@ -103,6 +112,10 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, cross
opts: opts,
crossStageDeps: crossStageDeps,
digestToCacheKeyMap: dcm,
layerCache: &cache.RegistryCache{
Opts: opts,
},
pushCache: pushLayerToCache,
}
for _, cmd := range s.stage.Commands {
@ -138,9 +151,6 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro
return nil
}
layerCache := &cache.RegistryCache{
Opts: s.opts,
}
stopCache := false
// Possibly replace commands with their cached implementations.
// We walk through all the commands, running any commands that only operate on metadata.
@ -154,21 +164,21 @@ func (s *stageBuilder) optimize(compositeKey CompositeCache, cfg v1.Config) erro
// If the command uses files from the context, add them.
files, err := command.FilesUsedFromContext(&cfg, s.args)
if err != nil {
return err
return errors.Wrap(err, "failed to get files used from context")
}
for _, f := range files {
if err := compositeKey.AddPath(f); err != nil {
return err
return errors.Wrap(err, "failed to add path to composite key")
}
}
ck, err := compositeKey.Hash()
if err != nil {
return err
return errors.Wrap(err, "failed to hash composite key")
}
s.finalCacheKey = ck
if command.ShouldCacheOutput() && !stopCache {
img, err := layerCache.RetrieveLayer(ck)
img, err := s.layerCache.RetrieveLayer(ck)
if err != nil {
logrus.Debugf("Failed to retrieve layer: %s", err)
logrus.Infof("No cached layer found for cmd %s", command.String())
@ -205,7 +215,7 @@ func (s *stageBuilder) build() error {
// Apply optimizations to the instructions.
if err := s.optimize(*compositeKey, s.cf.Config); err != nil {
return err
return errors.Wrap(err, "failed to optimize instructions")
}
// Unpack file system to root if we need to.
@ -224,14 +234,14 @@ func (s *stageBuilder) build() error {
if shouldUnpack {
t := timing.Start("FS Unpacking")
if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
return err
return errors.Wrap(err, "failed to get filesystem from image")
}
timing.DefaultRun.Stop(t)
} else {
logrus.Info("Skipping unpacking as no commands require it.")
}
if err := util.DetectFilesystemWhitelist(constants.WhitelistPath); err != nil {
return err
return errors.Wrap(err, "failed to check filesystem whitelist")
}
// Take initial snapshot
t := timing.Start("Initial FS snapshot")
@ -252,17 +262,17 @@ func (s *stageBuilder) build() error {
// If the command uses files from the context, add them.
files, err := command.FilesUsedFromContext(&s.cf.Config, s.args)
if err != nil {
return err
return errors.Wrap(err, "failed to get files used from context")
}
for _, f := range files {
if err := compositeKey.AddPath(f); err != nil {
return err
return errors.Wrap(err, fmt.Sprintf("failed to add path to composite key %v", f))
}
}
logrus.Info(command.String())
if err := command.ExecuteCommand(&s.cf.Config, s.args); err != nil {
return err
return errors.Wrap(err, "failed to execute command")
}
files = command.FilesToSnapshot()
timing.DefaultRun.Stop(t)
@ -273,21 +283,21 @@ func (s *stageBuilder) build() error {
tarPath, err := s.takeSnapshot(files)
if err != nil {
return err
return errors.Wrap(err, "failed to take snapshot")
}
ck, err := compositeKey.Hash()
if err != nil {
return err
return errors.Wrap(err, "failed to hash composite key")
}
// Push layer to cache (in parallel) now along with new config file
if s.opts.Cache && command.ShouldCacheOutput() {
cacheGroup.Go(func() error {
return pushLayerToCache(s.opts, ck, tarPath, command.String())
return s.pushCache(s.opts, ck, tarPath, command.String())
})
}
if err := s.saveSnapshotToImage(command.String(), tarPath); err != nil {
return err
return errors.Wrap(err, "failed to save snapshot to image")
}
}
if err := cacheGroup.Wait(); err != nil {
@ -343,7 +353,7 @@ func (s *stageBuilder) saveSnapshotToImage(createdBy string, tarPath string) err
}
fi, err := os.Stat(tarPath)
if err != nil {
return err
return errors.Wrap(err, "tar file path does not exist")
}
if fi.Size() <= emptyTarSize {
logrus.Info("No files were changed, appending empty layer to config. No layer added to image.")
@ -505,7 +515,7 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
}
for _, p := range filesToSave {
logrus.Infof("Saving file %s for later use.", p)
copy.Copy(p, filepath.Join(dstDir, p))
otiai10Cpy.Copy(p, filepath.Join(dstDir, p))
}
// Delete the filesystem

View File

@ -17,6 +17,9 @@ limitations under the License.
package executor
import (
"archive/tar"
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
@ -24,6 +27,7 @@ import (
"sort"
"testing"
"github.com/GoogleContainerTools/kaniko/pkg/commands"
"github.com/GoogleContainerTools/kaniko/pkg/config"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/testutil"
@ -32,6 +36,7 @@ import (
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/sirupsen/logrus"
)
func Test_reviewConfig(t *testing.T) {
@ -462,3 +467,455 @@ func TestInitializeConfig(t *testing.T) {
testutil.CheckDeepEqual(t, tt.expected, actual.Config)
}
}
func Test_stageBuilder_optimize(t *testing.T) {
testCases := []struct {
opts *config.KanikoOptions
retrieve bool
name string
}{
{
name: "cache enabled and layer not present in cache",
opts: &config.KanikoOptions{Cache: true},
},
{
name: "cache enabled and layer present in cache",
opts: &config.KanikoOptions{Cache: true},
retrieve: true,
},
{
name: "cache disabled and layer not present in cache",
opts: &config.KanikoOptions{Cache: false},
},
{
name: "cache disabled and layer present in cache",
opts: &config.KanikoOptions{Cache: false},
retrieve: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
cf := &v1.ConfigFile{}
snap := fakeSnapShotter{}
lc := &fakeLayerCache{retrieve: tc.retrieve}
sb := &stageBuilder{opts: tc.opts, cf: cf, snapshotter: snap, layerCache: lc}
ck := CompositeCache{}
file, err := ioutil.TempFile("", "foo")
if err != nil {
t.Error(err)
}
command := MockDockerCommand{
contextFiles: []string{file.Name()},
cacheCommand: MockCachedDockerCommand{},
}
sb.cmds = []commands.DockerCommand{command}
err = sb.optimize(ck, cf.Config)
if err != nil {
t.Errorf("Expected error to be nil but was %v", err)
}
})
}
}
func Test_stageBuilder_build(t *testing.T) {
type testcase struct {
description string
opts *config.KanikoOptions
layerCache *fakeLayerCache
expectedCacheKeys []string
pushedCacheKeys []string
commands []commands.DockerCommand
fileName string
rootDir string
image v1.Image
config *v1.ConfigFile
}
testCases := []testcase{
func() testcase {
dir, files := tempDirAndFile(t)
file := files[0]
filePath := filepath.Join(dir, file)
ch := NewCompositeCache("", "meow")
ch.AddPath(filePath)
hash, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
}
command := MockDockerCommand{
contextFiles: []string{filePath},
cacheCommand: MockCachedDockerCommand{
contextFiles: []string{filePath},
},
}
destDir, err := ioutil.TempDir("", "baz")
if err != nil {
t.Errorf("could not create temp dir %v", err)
}
return testcase{
description: "fake command cache enabled but key not in cache",
config: &v1.ConfigFile{Config: v1.Config{WorkingDir: destDir}},
opts: &config.KanikoOptions{Cache: true},
expectedCacheKeys: []string{hash},
pushedCacheKeys: []string{hash},
commands: []commands.DockerCommand{command},
rootDir: dir,
}
}(),
func() testcase {
dir, files := tempDirAndFile(t)
file := files[0]
filePath := filepath.Join(dir, file)
ch := NewCompositeCache("", "meow")
ch.AddPath(filePath)
hash, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
}
command := MockDockerCommand{
contextFiles: []string{filePath},
cacheCommand: MockCachedDockerCommand{
contextFiles: []string{filePath},
},
}
destDir, err := ioutil.TempDir("", "baz")
if err != nil {
t.Errorf("could not create temp dir %v", err)
}
return testcase{
description: "fake command cache enabled and key in cache",
opts: &config.KanikoOptions{Cache: true},
config: &v1.ConfigFile{Config: v1.Config{WorkingDir: destDir}},
layerCache: &fakeLayerCache{
retrieve: true,
},
expectedCacheKeys: []string{hash},
pushedCacheKeys: []string{},
commands: []commands.DockerCommand{command},
rootDir: dir,
}
}(),
{
description: "fake command cache disabled and key not in cache",
opts: &config.KanikoOptions{Cache: false},
},
{
description: "fake command cache disabled and key in cache",
opts: &config.KanikoOptions{Cache: false},
layerCache: &fakeLayerCache{
retrieve: true,
},
},
func() testcase {
dir, filenames := tempDirAndFile(t)
filename := filenames[0]
filepath := filepath.Join(dir, filename)
tarContent := generateTar(t, dir, filename)
ch := NewCompositeCache("", "")
ch.AddPath(filepath)
logrus.SetLevel(logrus.DebugLevel)
hash, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
}
copyCommandCacheKey := hash
return testcase{
description: "copy command cache enabled and key in cache",
opts: &config.KanikoOptions{Cache: true},
layerCache: &fakeLayerCache{
retrieve: true,
img: fakeImage{
ImageLayers: []v1.Layer{
fakeLayer{
TarContent: tarContent,
},
},
},
},
rootDir: dir,
expectedCacheKeys: []string{copyCommandCacheKey},
// CachingCopyCommand is not pushed to the cache
pushedCacheKeys: []string{},
commands: getCommands(dir, []instructions.Command{
&instructions.CopyCommand{
SourcesAndDest: []string{
filename, "foo.txt",
},
},
}),
fileName: filename,
}
}(),
func() testcase {
dir, filenames := tempDirAndFile(t)
filename := filenames[0]
tarContent := []byte{}
destDir, err := ioutil.TempDir("", "baz")
if err != nil {
t.Errorf("could not create temp dir %v", err)
}
filePath := filepath.Join(dir, filename)
ch := NewCompositeCache("", "")
ch.AddPath(filePath)
logrus.SetLevel(logrus.DebugLevel)
hash, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
}
return testcase{
description: "copy command cache enabled and key is not in cache",
opts: &config.KanikoOptions{Cache: true},
config: &v1.ConfigFile{Config: v1.Config{WorkingDir: destDir}},
layerCache: &fakeLayerCache{},
image: fakeImage{
ImageLayers: []v1.Layer{
fakeLayer{
TarContent: tarContent,
},
},
},
rootDir: dir,
expectedCacheKeys: []string{hash},
pushedCacheKeys: []string{hash},
commands: getCommands(dir, []instructions.Command{
&instructions.CopyCommand{
SourcesAndDest: []string{
filename, "foo.txt",
},
},
}),
fileName: filename,
}
}(),
func() testcase {
dir, filenames := tempDirAndFile(t)
filename := filenames[0]
tarContent := generateTar(t, filename)
destDir, err := ioutil.TempDir("", "baz")
if err != nil {
t.Errorf("could not create temp dir %v", err)
}
filePath := filepath.Join(dir, filename)
ch := NewCompositeCache("", fmt.Sprintf("COPY %s foo.txt", filename))
ch.AddPath(filePath)
logrus.SetLevel(logrus.DebugLevel)
logrus.Infof("test composite key %v", ch)
hash1, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
}
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
ch.AddPath(filePath)
logrus.Infof("test composite key %v", ch)
hash2, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
}
ch = NewCompositeCache("", fmt.Sprintf("COPY %s foo.txt", filename))
ch.AddKey(fmt.Sprintf("COPY %s bar.txt", filename))
ch.AddPath(filePath)
logrus.Infof("test composite key %v", ch)
hash3, err := ch.Hash()
if err != nil {
t.Errorf("couldn't create hash %v", err)
}
image := fakeImage{
ImageLayers: []v1.Layer{
fakeLayer{
TarContent: tarContent,
},
},
}
dockerFile := fmt.Sprintf(`
FROM ubuntu:16.04
COPY %s foo.txt
COPY %s bar.txt
`, filename, filename)
f, _ := ioutil.TempFile("", "")
ioutil.WriteFile(f.Name(), []byte(dockerFile), 0755)
opts := &config.KanikoOptions{
DockerfilePath: f.Name(),
}
stages, err := dockerfile.Stages(opts)
if err != nil {
t.Errorf("could not parse test dockerfile")
}
stage := stages[0]
cmds := stage.Commands
return testcase{
description: "cached copy command followed by uncached copy command result in different read and write hashes",
opts: &config.KanikoOptions{Cache: true},
rootDir: dir,
config: &v1.ConfigFile{Config: v1.Config{WorkingDir: destDir}},
layerCache: &fakeLayerCache{
keySequence: []string{hash1},
img: image,
},
image: image,
// hash1 is the read cachekey for the first layer
// hash2 is the read cachekey for the second layer
expectedCacheKeys: []string{hash1, hash2},
// Due to CachingCopyCommand and CopyCommand returning different values the write cache key for the second copy command will never match the read cache key
// hash3 is the cachekey used to write to the cache for layer 2
pushedCacheKeys: []string{hash3},
commands: getCommands(dir, cmds),
}
}(),
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
var fileName string
if tc.commands == nil {
file, err := ioutil.TempFile("", "foo")
if err != nil {
t.Error(err)
}
command := MockDockerCommand{
contextFiles: []string{file.Name()},
cacheCommand: MockCachedDockerCommand{
contextFiles: []string{file.Name()},
},
}
tc.commands = []commands.DockerCommand{command}
fileName = file.Name()
} else {
fileName = tc.fileName
}
cf := tc.config
if cf == nil {
cf = &v1.ConfigFile{
Config: v1.Config{
Env: make([]string, 0),
},
}
}
snap := fakeSnapShotter{file: fileName}
lc := tc.layerCache
if lc == nil {
lc = &fakeLayerCache{}
}
keys := []string{}
sb := &stageBuilder{
args: &dockerfile.BuildArgs{}, //required or code will panic
image: tc.image,
opts: tc.opts,
cf: cf,
snapshotter: snap,
layerCache: lc,
pushCache: func(_ *config.KanikoOptions, cacheKey, _, _ string) error {
keys = append(keys, cacheKey)
return nil
},
}
sb.cmds = tc.commands
tmp := commands.RootDir
if tc.rootDir != "" {
commands.RootDir = tc.rootDir
}
err := sb.build()
if err != nil {
t.Errorf("Expected error to be nil but was %v", err)
}
assertCacheKeys(t, tc.expectedCacheKeys, lc.receivedKeys, "receive")
assertCacheKeys(t, tc.pushedCacheKeys, keys, "push")
commands.RootDir = tmp
})
}
}
func assertCacheKeys(t *testing.T, expectedCacheKeys, actualCacheKeys []string, description string) {
if len(expectedCacheKeys) != len(actualCacheKeys) {
t.Errorf("expected to %v %v keys but was %v", description, len(expectedCacheKeys), len(actualCacheKeys))
}
sort.Slice(expectedCacheKeys, func(x, y int) bool {
return expectedCacheKeys[x] > expectedCacheKeys[y]
})
sort.Slice(actualCacheKeys, func(x, y int) bool {
return actualCacheKeys[x] > actualCacheKeys[y]
})
for i, key := range expectedCacheKeys {
if key != actualCacheKeys[i] {
t.Errorf("expected to %v keys %d to be %v but was %v %v", description, i, key, actualCacheKeys[i], actualCacheKeys)
}
}
}
func getCommands(dir string, cmds []instructions.Command) []commands.DockerCommand {
outCommands := make([]commands.DockerCommand, 0)
for _, c := range cmds {
cmd, err := commands.GetCommand(
c,
dir,
)
if err != nil {
panic(err)
}
outCommands = append(outCommands, cmd)
}
return outCommands
}
func tempDirAndFile(t *testing.T) (string, []string) {
filenames := []string{"bar.txt"}
dir, err := ioutil.TempDir("", "foo")
if err != nil {
t.Errorf("could not create temp dir %v", err)
}
for _, filename := range filenames {
filepath := filepath.Join(dir, filename)
err = ioutil.WriteFile(filepath, []byte(`meow`), 0777)
if err != nil {
t.Errorf("could not create temp file %v", err)
}
}
return dir, filenames
}
func generateTar(t *testing.T, dir string, fileNames ...string) []byte {
buf := bytes.NewBuffer([]byte{})
writer := tar.NewWriter(buf)
defer writer.Close()
for _, filename := range fileNames {
filePath := filepath.Join(dir, filename)
info, err := os.Stat(filePath)
if err != nil {
t.Errorf("could not get file info for temp file %v", err)
}
hdr, err := tar.FileInfoHeader(info, filename)
if err != nil {
t.Errorf("could not get tar header for temp file %v", err)
}
if err := writer.WriteHeader(hdr); err != nil {
t.Errorf("could not write tar header %v", err)
}
content, err := ioutil.ReadFile(filePath)
if err != nil {
t.Errorf("could not read tempfile %v", err)
}
if _, err := writer.Write(content); err != nil {
t.Errorf("could not write file contents to tar")
}
}
return buf.Bytes()
}

View File

@ -0,0 +1,137 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package executor
import (
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
)
func Test_NewCompositeCache(t *testing.T) {
r := NewCompositeCache()
if reflect.TypeOf(r).String() != "*executor.CompositeCache" {
t.Errorf("expected return to be *executor.CompositeCache but was %v", reflect.TypeOf(r).String())
}
}
func Test_CompositeCache_AddKey(t *testing.T) {
keys := []string{
"meow",
"purr",
}
r := NewCompositeCache()
r.AddKey(keys...)
if len(r.keys) != 2 {
t.Errorf("expected keys to have length 2 but was %v", len(r.keys))
}
}
func Test_CompositeCache_Key(t *testing.T) {
r := NewCompositeCache("meow", "purr")
k := r.Key()
if k != "meow-purr" {
t.Errorf("expected result to equal meow-purr but was %v", k)
}
}
func Test_CompositeCache_Hash(t *testing.T) {
r := NewCompositeCache("meow", "purr")
h, err := r.Hash()
if err != nil {
t.Errorf("expected error to be nil but was %v", err)
}
expectedHash := "b4fd5a11af812a11a79d794007c842794cc668c8e7ebaba6d1e6d021b8e06c71"
if h != expectedHash {
t.Errorf("expected result to equal %v but was %v", expectedHash, h)
}
}
func Test_CompositeCache_AddPath_dir(t *testing.T) {
tmpDir, err := ioutil.TempDir("/tmp", "foo")
if err != nil {
t.Errorf("got error setting up test %v", err)
}
content := `meow meow meow`
if err := ioutil.WriteFile(filepath.Join(tmpDir, "foo.txt"), []byte(content), 0777); err != nil {
t.Errorf("got error writing temp file %v", err)
}
fn := func() string {
r := NewCompositeCache()
if err := r.AddPath(tmpDir); err != nil {
t.Errorf("expected error to be nil but was %v", err)
}
if len(r.keys) != 1 {
t.Errorf("expected len of keys to be 1 but was %v", len(r.keys))
}
hash, err := r.Hash()
if err != nil {
t.Errorf("couldnt generate hash from test cache")
}
return hash
}
hash1 := fn()
hash2 := fn()
if hash1 != hash2 {
t.Errorf("expected hash %v to equal hash %v", hash1, hash2)
}
}
func Test_CompositeCache_AddPath_file(t *testing.T) {
tmpfile, err := ioutil.TempFile("/tmp", "foo.txt")
if err != nil {
t.Errorf("got error setting up test %v", err)
}
defer os.Remove(tmpfile.Name()) // clean up
content := `meow meow meow`
if _, err := tmpfile.Write([]byte(content)); err != nil {
t.Errorf("got error writing temp file %v", err)
}
if err := tmpfile.Close(); err != nil {
t.Errorf("got error closing temp file %v", err)
}
p := tmpfile.Name()
fn := func() string {
r := NewCompositeCache()
if err := r.AddPath(p); err != nil {
t.Errorf("expected error to be nil but was %v", err)
}
if len(r.keys) != 1 {
t.Errorf("expected len of keys to be 1 but was %v", len(r.keys))
}
hash, err := r.Hash()
if err != nil {
t.Errorf("couldnt generate hash from test cache")
}
return hash
}
hash1 := fn()
hash2 := fn()
if hash1 != hash2 {
t.Errorf("expected hash %v to equal hash %v", hash1, hash2)
}
}

184
pkg/executor/fakes.go Normal file
View File

@ -0,0 +1,184 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// for use in tests
package executor
import (
"bytes"
"errors"
"io"
"io/ioutil"
"github.com/GoogleContainerTools/kaniko/pkg/commands"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/types"
)
type fakeSnapShotter struct {
file string
tarPath string
}
func (f fakeSnapShotter) Init() error { return nil }
func (f fakeSnapShotter) TakeSnapshotFS() (string, error) {
return f.tarPath, nil
}
func (f fakeSnapShotter) TakeSnapshot(_ []string) (string, error) {
return f.tarPath, nil
}
type MockDockerCommand struct {
contextFiles []string
cacheCommand commands.DockerCommand
}
func (m MockDockerCommand) ExecuteCommand(c *v1.Config, args *dockerfile.BuildArgs) error { return nil }
func (m MockDockerCommand) String() string {
return "meow"
}
func (m MockDockerCommand) FilesToSnapshot() []string {
return []string{"meow-snapshot-no-cache"}
}
func (m MockDockerCommand) CacheCommand(image v1.Image) commands.DockerCommand {
return m.cacheCommand
}
func (m MockDockerCommand) FilesUsedFromContext(c *v1.Config, args *dockerfile.BuildArgs) ([]string, error) {
return m.contextFiles, nil
}
func (m MockDockerCommand) MetadataOnly() bool {
return false
}
func (m MockDockerCommand) RequiresUnpackedFS() bool {
return false
}
func (m MockDockerCommand) ShouldCacheOutput() bool {
return true
}
type MockCachedDockerCommand struct {
contextFiles []string
}
func (m MockCachedDockerCommand) ExecuteCommand(c *v1.Config, args *dockerfile.BuildArgs) error {
return nil
}
func (m MockCachedDockerCommand) String() string {
return "meow"
}
func (m MockCachedDockerCommand) FilesToSnapshot() []string {
return []string{"meow-snapshot"}
}
func (m MockCachedDockerCommand) CacheCommand(image v1.Image) commands.DockerCommand {
return nil
}
func (m MockCachedDockerCommand) FilesUsedFromContext(c *v1.Config, args *dockerfile.BuildArgs) ([]string, error) {
return m.contextFiles, nil
}
func (m MockCachedDockerCommand) MetadataOnly() bool {
return false
}
func (m MockCachedDockerCommand) RequiresUnpackedFS() bool {
return false
}
func (m MockCachedDockerCommand) ShouldCacheOutput() bool {
return false
}
type fakeLayerCache struct {
retrieve bool
receivedKeys []string
img v1.Image
keySequence []string
}
func (f *fakeLayerCache) RetrieveLayer(key string) (v1.Image, error) {
f.receivedKeys = append(f.receivedKeys, key)
if len(f.keySequence) > 0 {
if f.keySequence[0] == key {
f.keySequence = f.keySequence[1:]
return f.img, nil
}
return f.img, errors.New("could not find layer")
}
if !f.retrieve {
return nil, errors.New("could not find layer")
}
return f.img, nil
}
type fakeLayer struct {
TarContent []byte
}
func (f fakeLayer) Digest() (v1.Hash, error) {
return v1.Hash{}, nil
}
func (f fakeLayer) DiffID() (v1.Hash, error) {
return v1.Hash{}, nil
}
func (f fakeLayer) Compressed() (io.ReadCloser, error) {
return nil, nil
}
func (f fakeLayer) Uncompressed() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(f.TarContent)), nil
}
func (f fakeLayer) Size() (int64, error) {
return 0, nil
}
func (f fakeLayer) MediaType() (types.MediaType, error) {
return "", nil
}
type fakeImage struct {
ImageLayers []v1.Layer
}
func (f fakeImage) Layers() ([]v1.Layer, error) {
return f.ImageLayers, nil
}
func (f fakeImage) MediaType() (types.MediaType, error) {
return "", nil
}
func (f fakeImage) Size() (int64, error) {
return 0, nil
}
func (f fakeImage) ConfigName() (v1.Hash, error) {
return v1.Hash{}, nil
}
func (f fakeImage) ConfigFile() (*v1.ConfigFile, error) {
return &v1.ConfigFile{}, nil
}
func (f fakeImage) RawConfigFile() ([]byte, error) {
return []byte{}, nil
}
func (f fakeImage) Digest() (v1.Hash, error) {
return v1.Hash{}, nil
}
func (f fakeImage) Manifest() (*v1.Manifest, error) {
return &v1.Manifest{}, nil
}
func (f fakeImage) RawManifest() ([]byte, error) {
return []byte{}, nil
}
func (f fakeImage) LayerByDigest(v1.Hash) (v1.Layer, error) {
return fakeLayer{}, nil
}
func (f fakeImage) LayerByDiffID(v1.Hash) (v1.Layer, error) {
return fakeLayer{}, nil
}

View File

@ -151,7 +151,7 @@ func DoPush(image v1.Image, opts *config.KanikoOptions) error {
if opts.ImageNameDigestFile != "" {
err := ioutil.WriteFile(opts.ImageNameDigestFile, []byte(builder.String()), 0644)
if err != nil {
return errors.Wrap(err, "writing digest to file failed")
return errors.Wrap(err, "writing image name with digest to file failed")
}
}

View File

@ -176,7 +176,7 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
// Only add changed files.
fileChanged, err := s.l.CheckFileChange(path)
if err != nil {
return nil, nil, err
return nil, nil, fmt.Errorf("could not check if file has changed %s %s", path, err)
}
if fileChanged {
logrus.Debugf("Adding %s to layer, because it was changed.", path)

View File

@ -17,6 +17,7 @@ limitations under the License.
package util
import (
"fmt"
"net/http"
"net/url"
"os"
@ -25,7 +26,7 @@ import (
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/moby/buildkit/frontend/dockerfile/shell"
@ -77,13 +78,16 @@ func ResolveEnvAndWildcards(sd instructions.SourcesAndDest, buildcontext string,
// First, resolve any environment replacement
resolvedEnvs, err := ResolveEnvironmentReplacementList(sd, envs, true)
if err != nil {
return nil, "", err
return nil, "", errors.Wrap(err, "failed to resolve environment")
}
if len(resolvedEnvs) == 0 {
return nil, "", errors.New("resolved envs is empty")
}
dest := resolvedEnvs[len(resolvedEnvs)-1]
// Resolve wildcards and get a list of resolved sources
srcs, err := ResolveSources(resolvedEnvs[0:len(resolvedEnvs)-1], buildcontext)
if err != nil {
return nil, "", err
return nil, "", errors.Wrap(err, "failed to resolve sources")
}
err = IsSrcsValid(sd, srcs, buildcontext)
return srcs, dest, err
@ -219,9 +223,10 @@ func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, resolvedSources []stri
if IsSrcRemoteFileURL(resolvedSources[0]) {
return nil
}
fi, err := os.Lstat(filepath.Join(root, resolvedSources[0]))
path := filepath.Join(root, resolvedSources[0])
fi, err := os.Lstat(path)
if err != nil {
return err
return errors.Wrap(err, fmt.Sprintf("failed to get fileinfo for %v", path))
}
if fi.IsDir() {
return nil
@ -237,7 +242,7 @@ func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, resolvedSources []stri
src = filepath.Clean(src)
files, err := RelativeFiles(src, root)
if err != nil {
return err
return errors.Wrap(err, "failed to get relative files")
}
for _, file := range files {
if excludeFile(file, root) {

View File

@ -551,6 +551,12 @@ func CopyFile(src, dest, buildcontext string) (bool, error) {
logrus.Debugf("%s found in .dockerignore, ignoring", src)
return true, nil
}
if src == dest {
// This is a no-op. Move on, but don't list it as ignored.
// We have to make sure we do this so we don't overwrite our own file.
// See iusse #904 for an example.
return false, nil
}
fi, err := os.Stat(src)
if err != nil {
return false, err

View File

@ -825,3 +825,41 @@ func Test_correctDockerignoreFileIsUsed(t *testing.T) {
}
}
}
func Test_CopyFile_skips_self(t *testing.T) {
t.Parallel()
tempDir, err := ioutil.TempDir("", "kaniko_test")
if err != nil {
t.Fatal(err)
}
tempFile := filepath.Join(tempDir, "foo")
expected := "bar"
if err := ioutil.WriteFile(
tempFile,
[]byte(expected),
0755,
); err != nil {
t.Fatal(err)
}
ignored, err := CopyFile(tempFile, tempFile, "")
if err != nil {
t.Fatal(err)
}
if ignored {
t.Fatal("expected file to NOT be ignored")
}
// Ensure file has expected contents
actualData, err := ioutil.ReadFile(tempFile)
if err != nil {
t.Fatal(err)
}
if actual := string(actualData); actual != expected {
t.Fatalf("expected file contents to be %q, but got %q", expected, actual)
}
}