Merge pull request #353 from priyawadhwa/cache
Add layer caching to kaniko
This commit is contained in:
commit
57ede49dac
|
|
@ -95,6 +95,8 @@ func addKanikoOptionsFlags(cmd *cobra.Command) {
|
||||||
RootCmd.PersistentFlags().BoolVarP(&opts.Reproducible, "reproducible", "", false, "Strip timestamps out of the image to make it reproducible")
|
RootCmd.PersistentFlags().BoolVarP(&opts.Reproducible, "reproducible", "", false, "Strip timestamps out of the image to make it reproducible")
|
||||||
RootCmd.PersistentFlags().StringVarP(&opts.Target, "target", "", "", "Set the target build stage to build")
|
RootCmd.PersistentFlags().StringVarP(&opts.Target, "target", "", "", "Set the target build stage to build")
|
||||||
RootCmd.PersistentFlags().BoolVarP(&opts.NoPush, "no-push", "", false, "Do not push the image to the registry")
|
RootCmd.PersistentFlags().BoolVarP(&opts.NoPush, "no-push", "", false, "Do not push the image to the registry")
|
||||||
|
RootCmd.PersistentFlags().StringVarP(&opts.CacheRepo, "cache-repo", "", "", "Specify a repository to use as a cache, otherwise one will be inferred from the destination provided")
|
||||||
|
RootCmd.PersistentFlags().BoolVarP(&opts.Cache, "cache", "", false, "Use cache when building image")
|
||||||
}
|
}
|
||||||
|
|
||||||
// addHiddenFlags marks certain flags as hidden from the executor help text
|
// addHiddenFlags marks certain flags as hidden from the executor help text
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,22 @@
|
||||||
|
# Copyright 2018 Google, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Test to make sure the cache works properly
|
||||||
|
# If the image is built twice, /date should be the same in both images
|
||||||
|
# if the cache is implemented correctly
|
||||||
|
|
||||||
|
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||||
|
RUN date > /date
|
||||||
|
COPY context/foo /foo
|
||||||
|
RUN echo hey
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
# Copyright 2018 Google, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Test to make sure the cache works properly
|
||||||
|
# /date should be the same regardless of when this image is built
|
||||||
|
# if the cache is implemented correctly
|
||||||
|
|
||||||
|
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||||
|
RUN apt-get update && apt-get install -y make
|
||||||
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -77,12 +78,16 @@ func GetKanikoImage(imageRepo, dockerfile string) string {
|
||||||
return strings.ToLower(imageRepo + kanikoPrefix + dockerfile)
|
return strings.ToLower(imageRepo + kanikoPrefix + dockerfile)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetVersionedKanikoImage versions constructs the name of the kaniko image that would be built
|
||||||
|
// with the dockerfile and versions it for cache testing
|
||||||
|
func GetVersionedKanikoImage(imageRepo, dockerfile string, version int) string {
|
||||||
|
return strings.ToLower(imageRepo + kanikoPrefix + dockerfile + strconv.Itoa(version))
|
||||||
|
}
|
||||||
|
|
||||||
// FindDockerFiles will look for test docker files in the directory dockerfilesPath.
|
// FindDockerFiles will look for test docker files in the directory dockerfilesPath.
|
||||||
// These files must start with `Dockerfile_test`. If the file is one we are intentionally
|
// These files must start with `Dockerfile_test`. If the file is one we are intentionally
|
||||||
// skipping, it will not be included in the returned list.
|
// skipping, it will not be included in the returned list.
|
||||||
func FindDockerFiles(dockerfilesPath string) ([]string, error) {
|
func FindDockerFiles(dockerfilesPath string) ([]string, error) {
|
||||||
// TODO: remove test_user_run from this when https://github.com/GoogleContainerTools/container-diff/issues/237 is fixed
|
|
||||||
testsToIgnore := map[string]bool{"Dockerfile_test_user_run": true}
|
|
||||||
allDockerfiles, err := filepath.Glob(path.Join(dockerfilesPath, "Dockerfile_test*"))
|
allDockerfiles, err := filepath.Glob(path.Join(dockerfilesPath, "Dockerfile_test*"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []string{}, fmt.Errorf("Failed to find docker files at %s: %s", dockerfilesPath, err)
|
return []string{}, fmt.Errorf("Failed to find docker files at %s: %s", dockerfilesPath, err)
|
||||||
|
|
@ -92,9 +97,8 @@ func FindDockerFiles(dockerfilesPath string) ([]string, error) {
|
||||||
for _, dockerfile := range allDockerfiles {
|
for _, dockerfile := range allDockerfiles {
|
||||||
// Remove the leading directory from the path
|
// Remove the leading directory from the path
|
||||||
dockerfile = dockerfile[len("dockerfiles/"):]
|
dockerfile = dockerfile[len("dockerfiles/"):]
|
||||||
if !testsToIgnore[dockerfile] {
|
|
||||||
dockerfiles = append(dockerfiles, dockerfile)
|
dockerfiles = append(dockerfiles, dockerfile)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return dockerfiles, err
|
return dockerfiles, err
|
||||||
}
|
}
|
||||||
|
|
@ -104,6 +108,8 @@ func FindDockerFiles(dockerfilesPath string) ([]string, error) {
|
||||||
type DockerFileBuilder struct {
|
type DockerFileBuilder struct {
|
||||||
// Holds all available docker files and whether or not they've been built
|
// Holds all available docker files and whether or not they've been built
|
||||||
FilesBuilt map[string]bool
|
FilesBuilt map[string]bool
|
||||||
|
DockerfilesToIgnore map[string]struct{}
|
||||||
|
TestCacheDockerfiles map[string]struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDockerFileBuilder will create a DockerFileBuilder initialized with dockerfiles, which
|
// NewDockerFileBuilder will create a DockerFileBuilder initialized with dockerfiles, which
|
||||||
|
|
@ -113,6 +119,14 @@ func NewDockerFileBuilder(dockerfiles []string) *DockerFileBuilder {
|
||||||
for _, f := range dockerfiles {
|
for _, f := range dockerfiles {
|
||||||
d.FilesBuilt[f] = false
|
d.FilesBuilt[f] = false
|
||||||
}
|
}
|
||||||
|
d.DockerfilesToIgnore = map[string]struct{}{
|
||||||
|
// TODO: remove test_user_run from this when https://github.com/GoogleContainerTools/container-diff/issues/237 is fixed
|
||||||
|
"Dockerfile_test_user_run": {},
|
||||||
|
}
|
||||||
|
d.TestCacheDockerfiles = map[string]struct{}{
|
||||||
|
"Dockerfile_test_cache": {},
|
||||||
|
"Dockerfile_test_cache_install": {},
|
||||||
|
}
|
||||||
return &d
|
return &d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -186,3 +200,31 @@ func (d *DockerFileBuilder) BuildImage(imageRepo, gcsBucket, dockerfilesPath, do
|
||||||
d.FilesBuilt[dockerfile] = true
|
d.FilesBuilt[dockerfile] = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// buildCachedImages builds the images for testing caching via kaniko where version is the nth time this image has been built
|
||||||
|
func (d *DockerFileBuilder) buildCachedImages(imageRepo, cacheRepo, dockerfilesPath string, version int) error {
|
||||||
|
_, ex, _, _ := runtime.Caller(0)
|
||||||
|
cwd := filepath.Dir(ex)
|
||||||
|
|
||||||
|
cacheFlag := "--cache=true"
|
||||||
|
|
||||||
|
for dockerfile := range d.TestCacheDockerfiles {
|
||||||
|
kanikoImage := GetVersionedKanikoImage(imageRepo, dockerfile, version)
|
||||||
|
kanikoCmd := exec.Command("docker",
|
||||||
|
append([]string{"run",
|
||||||
|
"-v", os.Getenv("HOME") + "/.config/gcloud:/root/.config/gcloud",
|
||||||
|
"-v", cwd + ":/workspace",
|
||||||
|
ExecutorImage,
|
||||||
|
"-f", path.Join(buildContextPath, dockerfilesPath, dockerfile),
|
||||||
|
"-d", kanikoImage,
|
||||||
|
"-c", buildContextPath,
|
||||||
|
cacheFlag,
|
||||||
|
"--cache-repo", cacheRepo})...,
|
||||||
|
)
|
||||||
|
|
||||||
|
if _, err := RunCommandWithoutTest(kanikoCmd); err != nil {
|
||||||
|
return fmt.Errorf("Failed to build cached image %s with kaniko command \"%s\": %s", kanikoImage, kanikoCmd.Args, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,8 +24,10 @@ import (
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/name"
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/daemon"
|
"github.com/google/go-containerregistry/pkg/v1/daemon"
|
||||||
|
|
@ -148,6 +150,7 @@ func TestMain(m *testing.M) {
|
||||||
fmt.Printf("error building onbuild base: %v", err)
|
fmt.Printf("error building onbuild base: %v", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
pushOnbuildBase := exec.Command("docker", "push", config.onbuildBaseImage)
|
pushOnbuildBase := exec.Command("docker", "push", config.onbuildBaseImage)
|
||||||
if err := pushOnbuildBase.Run(); err != nil {
|
if err := pushOnbuildBase.Run(); err != nil {
|
||||||
fmt.Printf("error pushing onbuild base %s: %v", config.onbuildBaseImage, err)
|
fmt.Printf("error pushing onbuild base %s: %v", config.onbuildBaseImage, err)
|
||||||
|
|
@ -165,7 +168,6 @@ func TestMain(m *testing.M) {
|
||||||
fmt.Printf("error pushing hardlink base %s: %v", config.hardlinkBaseImage, err)
|
fmt.Printf("error pushing hardlink base %s: %v", config.hardlinkBaseImage, err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
dockerfiles, err := FindDockerFiles(dockerfilesPath)
|
dockerfiles, err := FindDockerFiles(dockerfilesPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Coudn't create map of dockerfiles: %s", err)
|
fmt.Printf("Coudn't create map of dockerfiles: %s", err)
|
||||||
|
|
@ -177,6 +179,12 @@ func TestMain(m *testing.M) {
|
||||||
func TestRun(t *testing.T) {
|
func TestRun(t *testing.T) {
|
||||||
for dockerfile, built := range imageBuilder.FilesBuilt {
|
for dockerfile, built := range imageBuilder.FilesBuilt {
|
||||||
t.Run("test_"+dockerfile, func(t *testing.T) {
|
t.Run("test_"+dockerfile, func(t *testing.T) {
|
||||||
|
if _, ok := imageBuilder.DockerfilesToIgnore[dockerfile]; ok {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
|
if _, ok := imageBuilder.TestCacheDockerfiles[dockerfile]; ok {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
if !built {
|
if !built {
|
||||||
err := imageBuilder.BuildImage(config.imageRepo, config.gcsBucket, dockerfilesPath, dockerfile)
|
err := imageBuilder.BuildImage(config.imageRepo, config.gcsBucket, dockerfilesPath, dockerfile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -195,25 +203,8 @@ func TestRun(t *testing.T) {
|
||||||
t.Logf("diff = %s", string(diff))
|
t.Logf("diff = %s", string(diff))
|
||||||
|
|
||||||
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
|
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
|
||||||
|
checkContainerDiffOutput(t, diff, expected)
|
||||||
|
|
||||||
// Let's compare the json objects themselves instead of strings to avoid
|
|
||||||
// issues with spaces and indents
|
|
||||||
var diffInt interface{}
|
|
||||||
var expectedInt interface{}
|
|
||||||
|
|
||||||
err := json.Unmarshal(diff, &diffInt)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
err = json.Unmarshal([]byte(expected), &expectedInt)
|
|
||||||
if err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
t.Fail()
|
|
||||||
}
|
|
||||||
|
|
||||||
testutil.CheckErrorAndDeepEqual(t, false, nil, expectedInt, diffInt)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -228,6 +219,9 @@ func TestLayers(t *testing.T) {
|
||||||
}
|
}
|
||||||
for dockerfile, built := range imageBuilder.FilesBuilt {
|
for dockerfile, built := range imageBuilder.FilesBuilt {
|
||||||
t.Run("test_layer_"+dockerfile, func(t *testing.T) {
|
t.Run("test_layer_"+dockerfile, func(t *testing.T) {
|
||||||
|
if _, ok := imageBuilder.DockerfilesToIgnore[dockerfile]; ok {
|
||||||
|
t.SkipNow()
|
||||||
|
}
|
||||||
if !built {
|
if !built {
|
||||||
err := imageBuilder.BuildImage(config.imageRepo, config.gcsBucket, dockerfilesPath, dockerfile)
|
err := imageBuilder.BuildImage(config.imageRepo, config.gcsBucket, dockerfilesPath, dockerfile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -244,6 +238,58 @@ func TestLayers(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Build each image with kaniko twice, and then make sure they're exactly the same
|
||||||
|
func TestCache(t *testing.T) {
|
||||||
|
for dockerfile := range imageBuilder.TestCacheDockerfiles {
|
||||||
|
t.Run("test_cache_"+dockerfile, func(t *testing.T) {
|
||||||
|
cache := filepath.Join(config.imageRepo, "cache", fmt.Sprintf("%v", time.Now().UnixNano()))
|
||||||
|
// Build the initial image which will cache layers
|
||||||
|
if err := imageBuilder.buildCachedImages(config.imageRepo, cache, dockerfilesPath, 0); err != nil {
|
||||||
|
t.Fatalf("error building cached image for the first time: %v", err)
|
||||||
|
}
|
||||||
|
// Build the second image which should pull from the cache
|
||||||
|
if err := imageBuilder.buildCachedImages(config.imageRepo, cache, dockerfilesPath, 1); err != nil {
|
||||||
|
t.Fatalf("error building cached image for the first time: %v", err)
|
||||||
|
}
|
||||||
|
// Make sure both images are the same
|
||||||
|
kanikoVersion0 := GetVersionedKanikoImage(config.imageRepo, dockerfile, 0)
|
||||||
|
kanikoVersion1 := GetVersionedKanikoImage(config.imageRepo, dockerfile, 1)
|
||||||
|
|
||||||
|
// container-diff
|
||||||
|
containerdiffCmd := exec.Command("container-diff", "diff",
|
||||||
|
kanikoVersion0, kanikoVersion1,
|
||||||
|
"-q", "--type=file", "--type=metadata", "--json")
|
||||||
|
|
||||||
|
diff := RunCommand(containerdiffCmd, t)
|
||||||
|
t.Logf("diff = %s", diff)
|
||||||
|
|
||||||
|
expected := fmt.Sprintf(emptyContainerDiff, kanikoVersion0, kanikoVersion1, kanikoVersion0, kanikoVersion1)
|
||||||
|
checkContainerDiffOutput(t, diff, expected)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkContainerDiffOutput(t *testing.T, diff []byte, expected string) {
|
||||||
|
// Let's compare the json objects themselves instead of strings to avoid
|
||||||
|
// issues with spaces and indents
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var diffInt interface{}
|
||||||
|
var expectedInt interface{}
|
||||||
|
|
||||||
|
err := json.Unmarshal(diff, &diffInt)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal([]byte(expected), &expectedInt)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
testutil.CheckErrorAndDeepEqual(t, false, nil, expectedInt, diffInt)
|
||||||
|
}
|
||||||
|
|
||||||
func checkLayers(t *testing.T, image1, image2 string, offset int) {
|
func checkLayers(t *testing.T, image1, image2 string, offset int) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
img1, err := getImageDetails(image1)
|
img1, err := getImageDetails(image1)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,70 @@
|
||||||
|
/*
|
||||||
|
Copyright 2018 Google LLC
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||||
|
"github.com/google/go-containerregistry/pkg/authn"
|
||||||
|
"github.com/google/go-containerregistry/pkg/authn/k8schain"
|
||||||
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RetrieveLayer checks the specified cache for a layer with the tag :cacheKey
|
||||||
|
func RetrieveLayer(opts *config.KanikoOptions, cacheKey string) (v1.Image, error) {
|
||||||
|
cache, err := Destination(opts, cacheKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "getting cache destination")
|
||||||
|
}
|
||||||
|
logrus.Infof("Checking for cached layer %s...", cache)
|
||||||
|
|
||||||
|
cacheRef, err := name.NewTag(cache, name.WeakValidation)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, fmt.Sprintf("getting reference for %s", cache))
|
||||||
|
}
|
||||||
|
k8sc, err := k8schain.NewNoClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
kc := authn.NewMultiKeychain(authn.DefaultKeychain, k8sc)
|
||||||
|
img, err := remote.Image(cacheRef, remote.WithAuthFromKeychain(kc))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_, err = img.Layers()
|
||||||
|
return img, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Destination returns the repo where the layer should be stored
|
||||||
|
// If no cache is specified, one is inferred from the destination provided
|
||||||
|
func Destination(opts *config.KanikoOptions, cacheKey string) (string, error) {
|
||||||
|
cache := opts.CacheRepo
|
||||||
|
if cache == "" {
|
||||||
|
destination := opts.Destinations[0]
|
||||||
|
destRef, err := name.NewTag(destination, name.WeakValidation)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "getting tag for destination")
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s/cache:%s", destRef.Context(), cacheKey), nil
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%s", cache, cacheKey), nil
|
||||||
|
}
|
||||||
|
|
@ -24,6 +24,7 @@ type KanikoOptions struct {
|
||||||
Bucket string
|
Bucket string
|
||||||
TarPath string
|
TarPath string
|
||||||
Target string
|
Target string
|
||||||
|
CacheRepo string
|
||||||
Destinations multiArg
|
Destinations multiArg
|
||||||
BuildArgs multiArg
|
BuildArgs multiArg
|
||||||
InsecurePush bool
|
InsecurePush bool
|
||||||
|
|
@ -31,4 +32,5 @@ type KanikoOptions struct {
|
||||||
SingleSnapshot bool
|
SingleSnapshot bool
|
||||||
Reproducible bool
|
Reproducible bool
|
||||||
NoPush bool
|
NoPush bool
|
||||||
|
Cache bool
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ package executor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
@ -33,6 +34,7 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/GoogleContainerTools/kaniko/pkg/cache"
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/commands"
|
"github.com/GoogleContainerTools/kaniko/pkg/commands"
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||||
|
|
@ -84,20 +86,52 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*sta
|
||||||
}
|
}
|
||||||
|
|
||||||
// key will return a string representation of the build at the cmd
|
// key will return a string representation of the build at the cmd
|
||||||
// TODO: priyawadhwa@ to fill this out when implementing caching
|
func (s *stageBuilder) key(cmd string) (string, error) {
|
||||||
// func (s *stageBuilder) key(cmd string) (string, error) {
|
fsKey, err := s.snapshotter.Key()
|
||||||
// return "", nil
|
if err != nil {
|
||||||
// }
|
return "", err
|
||||||
|
}
|
||||||
|
c := bytes.NewBuffer([]byte{})
|
||||||
|
enc := json.NewEncoder(c)
|
||||||
|
enc.Encode(s.cf)
|
||||||
|
cf, err := util.SHA256(c)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
logrus.Debugf("%s\n%s\n%s\n%s\n", s.baseImageDigest, fsKey, cf, cmd)
|
||||||
|
return util.SHA256(bytes.NewReader([]byte(s.baseImageDigest + fsKey + cf + cmd)))
|
||||||
|
}
|
||||||
|
|
||||||
// extractCachedLayer will extract the cached layer and append it to the config file
|
// extractCachedLayer will extract the cached layer and append it to the config file
|
||||||
// TODO: priyawadhwa@ to fill this out when implementing caching
|
func (s *stageBuilder) extractCachedLayer(layer v1.Image, createdBy string) error {
|
||||||
// func (s *stageBuilder) extractCachedLayer(layer v1.Image, createdBy string) error {
|
logrus.Infof("Found cached layer, extracting to filesystem")
|
||||||
// return nil
|
extractedFiles, err := util.GetFSFromImage(constants.RootDir, layer)
|
||||||
// }
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "extracting fs from image")
|
||||||
|
}
|
||||||
|
if _, err := s.snapshotter.TakeSnapshot(extractedFiles); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
logrus.Infof("Appending cached layer to base image")
|
||||||
|
l, err := layer.Layers()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "getting cached layer from image")
|
||||||
|
}
|
||||||
|
s.image, err = mutate.Append(s.image,
|
||||||
|
mutate.Addendum{
|
||||||
|
Layer: l[0],
|
||||||
|
History: v1.History{
|
||||||
|
Author: constants.Author,
|
||||||
|
CreatedBy: createdBy,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
func (s *stageBuilder) build(opts *config.KanikoOptions) error {
|
func (s *stageBuilder) build(opts *config.KanikoOptions) error {
|
||||||
// Unpack file system to root
|
// Unpack file system to root
|
||||||
if err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
|
if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Take initial snapshot
|
// Take initial snapshot
|
||||||
|
|
@ -115,6 +149,20 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
logrus.Info(command.String())
|
logrus.Info(command.String())
|
||||||
|
cacheKey, err := s.key(command.String())
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "getting key")
|
||||||
|
}
|
||||||
|
if command.CacheCommand() && opts.Cache {
|
||||||
|
image, err := cache.RetrieveLayer(opts, cacheKey)
|
||||||
|
if err == nil {
|
||||||
|
if err := s.extractCachedLayer(image, command.String()); err != nil {
|
||||||
|
return errors.Wrap(err, "extracting cached layer")
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
logrus.Info("No cached layer found, executing command...")
|
||||||
|
}
|
||||||
if err := command.ExecuteCommand(&s.cf.Config, args); err != nil {
|
if err := command.ExecuteCommand(&s.cf.Config, args); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -163,6 +211,12 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// Push layer to cache now along with new config file
|
||||||
|
if command.CacheCommand() && opts.Cache {
|
||||||
|
if err := pushLayerToCache(opts, cacheKey, layer, command.String()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
s.image, err = mutate.Append(s.image,
|
s.image, err = mutate.Append(s.image,
|
||||||
mutate.Addendum{
|
mutate.Addendum{
|
||||||
Layer: layer,
|
Layer: layer,
|
||||||
|
|
@ -233,7 +287,8 @@ func extractImageToDependecyDir(index int, image v1.Image) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logrus.Infof("trying to extract to %s", dependencyDir)
|
logrus.Infof("trying to extract to %s", dependencyDir)
|
||||||
return util.GetFSFromImage(dependencyDir, image)
|
_, err := util.GetFSFromImage(dependencyDir, image)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func saveStageAsTarball(stageIndex int, image v1.Image) error {
|
func saveStageAsTarball(stageIndex int, image v1.Image) error {
|
||||||
|
|
|
||||||
|
|
@ -21,12 +21,16 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/GoogleContainerTools/kaniko/pkg/cache"
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||||
|
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/version"
|
"github.com/GoogleContainerTools/kaniko/pkg/version"
|
||||||
"github.com/google/go-containerregistry/pkg/authn"
|
"github.com/google/go-containerregistry/pkg/authn"
|
||||||
"github.com/google/go-containerregistry/pkg/authn/k8schain"
|
"github.com/google/go-containerregistry/pkg/authn/k8schain"
|
||||||
"github.com/google/go-containerregistry/pkg/name"
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
"github.com/google/go-containerregistry/pkg/v1"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
@ -100,3 +104,29 @@ func DoPush(image v1.Image, opts *config.KanikoOptions) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// pushLayerToCache pushes layer (tagged with cacheKey) to opts.Cache
|
||||||
|
// if opts.Cache doesn't exist, infer the cache from the given destination
|
||||||
|
func pushLayerToCache(opts *config.KanikoOptions, cacheKey string, layer v1.Layer, createdBy string) error {
|
||||||
|
cache, err := cache.Destination(opts, cacheKey)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "getting cache destination")
|
||||||
|
}
|
||||||
|
logrus.Infof("Pushing layer %s to cache now", cache)
|
||||||
|
empty := empty.Image
|
||||||
|
empty, err = mutate.Append(empty,
|
||||||
|
mutate.Addendum{
|
||||||
|
Layer: layer,
|
||||||
|
History: v1.History{
|
||||||
|
Author: constants.Author,
|
||||||
|
CreatedBy: createdBy,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "appending layer onto empty image")
|
||||||
|
}
|
||||||
|
return DoPush(empty, &config.KanikoOptions{
|
||||||
|
Destinations: []string{cache},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -46,22 +46,25 @@ var whitelist = []string{
|
||||||
}
|
}
|
||||||
var volumeWhitelist = []string{}
|
var volumeWhitelist = []string{}
|
||||||
|
|
||||||
func GetFSFromImage(root string, img v1.Image) error {
|
// GetFSFromImage extracts the layers of img to root
|
||||||
|
// It returns a list of all files extracted
|
||||||
|
func GetFSFromImage(root string, img v1.Image) ([]string, error) {
|
||||||
whitelist, err := fileSystemWhitelist(constants.WhitelistPath)
|
whitelist, err := fileSystemWhitelist(constants.WhitelistPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
logrus.Infof("Mounted directories: %v", whitelist)
|
logrus.Debugf("Mounted directories: %v", whitelist)
|
||||||
layers, err := img.Layers()
|
layers, err := img.Layers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
extractedFiles := []string{}
|
||||||
|
|
||||||
for i, l := range layers {
|
for i, l := range layers {
|
||||||
logrus.Infof("Extracting layer %d", i)
|
logrus.Infof("Extracting layer %d", i)
|
||||||
r, err := l.Uncompressed()
|
r, err := l.Uncompressed()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
tr := tar.NewReader(r)
|
tr := tar.NewReader(r)
|
||||||
for {
|
for {
|
||||||
|
|
@ -70,7 +73,7 @@ func GetFSFromImage(root string, img v1.Image) error {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
path := filepath.Join(root, filepath.Clean(hdr.Name))
|
path := filepath.Join(root, filepath.Clean(hdr.Name))
|
||||||
base := filepath.Base(path)
|
base := filepath.Base(path)
|
||||||
|
|
@ -79,13 +82,13 @@ func GetFSFromImage(root string, img v1.Image) error {
|
||||||
logrus.Debugf("Whiting out %s", path)
|
logrus.Debugf("Whiting out %s", path)
|
||||||
name := strings.TrimPrefix(base, ".wh.")
|
name := strings.TrimPrefix(base, ".wh.")
|
||||||
if err := os.RemoveAll(filepath.Join(dir, name)); err != nil {
|
if err := os.RemoveAll(filepath.Join(dir, name)); err != nil {
|
||||||
return errors.Wrapf(err, "removing whiteout %s", hdr.Name)
|
return nil, errors.Wrapf(err, "removing whiteout %s", hdr.Name)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
whitelisted, err := CheckWhitelist(path)
|
whitelisted, err := CheckWhitelist(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
if whitelisted && !checkWhitelistRoot(root) {
|
if whitelisted && !checkWhitelistRoot(root) {
|
||||||
logrus.Debugf("Not adding %s because it is whitelisted", path)
|
logrus.Debugf("Not adding %s because it is whitelisted", path)
|
||||||
|
|
@ -94,7 +97,7 @@ func GetFSFromImage(root string, img v1.Image) error {
|
||||||
if hdr.Typeflag == tar.TypeSymlink {
|
if hdr.Typeflag == tar.TypeSymlink {
|
||||||
whitelisted, err := CheckWhitelist(hdr.Linkname)
|
whitelisted, err := CheckWhitelist(hdr.Linkname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
if whitelisted {
|
if whitelisted {
|
||||||
logrus.Debugf("skipping symlink from %s to %s because %s is whitelisted", hdr.Linkname, path, hdr.Linkname)
|
logrus.Debugf("skipping symlink from %s to %s because %s is whitelisted", hdr.Linkname, path, hdr.Linkname)
|
||||||
|
|
@ -102,11 +105,12 @@ func GetFSFromImage(root string, img v1.Image) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := extractFile(root, hdr, tr); err != nil {
|
if err := extractFile(root, hdr, tr); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
|
}
|
||||||
|
extractedFiles = append(extractedFiles, filepath.Join(root, filepath.Clean(hdr.Name)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
return extractedFiles, nil
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFilesystem deletes the extracted image file system
|
// DeleteFilesystem deletes the extracted image file system
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue