Merge branch 'master' into useAmd64
This commit is contained in:
commit
813fbeb634
55
README.md
55
README.md
|
|
@ -1,5 +1,7 @@
|
|||
# kaniko - Build Images In Kubernetes
|
||||
|
||||
`NOTE: Kaniko is not an officially supported Google product`
|
||||
|
||||
[](https://travis-ci.org/GoogleContainerTools/kaniko) [](https://goreportcard.com/report/github.com/GoogleContainerTools/kaniko)
|
||||
|
||||

|
||||
|
|
@ -15,7 +17,6 @@ We'd love to hear from you! Join us on [#kaniko Kubernetes Slack](https://kuber
|
|||
|
||||
:mega: **Please fill out our [quick 5-question survey](https://forms.gle/HhZGEM33x4FUz9Qa6)** so that we can learn how satisfied you are with Kaniko, and what improvements we should make. Thank you! :dancers:
|
||||
|
||||
Kaniko is not an officially supported Google project.
|
||||
|
||||
_If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPMENT.md) and [CONTRIBUTING.md](CONTRIBUTING.md)._
|
||||
|
||||
|
|
@ -61,6 +62,7 @@ _If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPME
|
|||
- [--log-format](#--log-format)
|
||||
- [--log-timestamp](#--log-timestamp)
|
||||
- [--no-push](#--no-push)
|
||||
- [--registry-certificate](#--registry-certificate)
|
||||
- [--registry-mirror](#--registry-mirror)
|
||||
- [--reproducible](#--reproducible)
|
||||
- [--single-snapshot](#--single-snapshot)
|
||||
|
|
@ -173,6 +175,9 @@ If you are using Azure Blob Storage for context file, you will need to pass [Azu
|
|||
### Using Private Git Repository
|
||||
You can use `Personal Access Tokens` for Build Contexts from Private Repositories from [GitHub](https://blog.github.com/2012-09-21-easier-builds-and-deployments-using-git-over-https-and-oauth/).
|
||||
|
||||
You can either pass this in as part of the git URL (e.g., `git://TOKEN@github.com/acme/myproject.git#refs/heads/mybranch`)
|
||||
or using the environment variable `GIT_USERNAME`.
|
||||
|
||||
### Using Standard Input
|
||||
If running kaniko and using Standard Input build context, you will need to add the docker or kubernetes `-i, --interactive` flag.
|
||||
Once running, kaniko will then get the data from `STDIN` and create the build context as a compressed tar.
|
||||
|
|
@ -187,6 +192,48 @@ echo -e 'FROM alpine \nRUN echo "created from standard input"' > Dockerfile | ta
|
|||
--destination=<gcr.io/$project/$image:$tag>
|
||||
```
|
||||
|
||||
Complete example of how to interactively run kaniko with `.tar.gz` Standard Input data, using Kubernetes command line with a temporary container and completely dockerless:
|
||||
```shell
|
||||
echo -e 'FROM alpine \nRUN echo "created from standard input"' > Dockerfile | tar -cf - Dockerfile | gzip -9 | kubectl run kaniko \
|
||||
--rm --stdin=true \
|
||||
--image=gcr.io/kaniko-project/executor:latest --restart=Never \
|
||||
--overrides='{
|
||||
"apiVersion": "v1",
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "kaniko",
|
||||
"image": "gcr.io/kaniko-project/executor:latest",
|
||||
"stdin": true,
|
||||
"stdinOnce": true,
|
||||
"args": [
|
||||
"--dockerfile=Dockerfile",
|
||||
"--context=tar://stdin",
|
||||
"--destination=gcr.io/my-repo/my-image" ],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "cabundle",
|
||||
"mountPath": "/kaniko/ssl/certs/"
|
||||
},
|
||||
{
|
||||
"name": "docker-config",
|
||||
"mountPath": "/kaniko/.docker/"
|
||||
}]
|
||||
}],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "cabundle",
|
||||
"configMap": {
|
||||
"name": "cabundle"}},
|
||||
{
|
||||
"name": "docker-config",
|
||||
"configMap": {
|
||||
"name": "docker-config" }}
|
||||
]
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
### Running kaniko
|
||||
|
||||
There are several different ways to deploy and run kaniko:
|
||||
|
|
@ -515,6 +562,12 @@ Set this flag if you want to pull images from a plain HTTP registry. It is suppo
|
|||
|
||||
Set this flag if you only want to build the image, without pushing to a registry.
|
||||
|
||||
#### --registry-certificate
|
||||
|
||||
Set this flag to provide a certificate for TLS communication with a given registry.
|
||||
|
||||
Expected format is `my.registry.url=/path/to/the/certificate.cert`
|
||||
|
||||
#### --registry-mirror
|
||||
|
||||
Set this flag if you want to use a registry mirror instead of default `index.docker.io`.
|
||||
|
|
|
|||
|
|
@ -83,8 +83,8 @@ var RootCmd = &cobra.Command{
|
|||
if len(opts.Destinations) == 0 && opts.ImageNameDigestFile != "" {
|
||||
return errors.New("You must provide --destination if setting ImageNameDigestFile")
|
||||
}
|
||||
// Update whitelisted paths
|
||||
util.UpdateWhitelist(opts.WhitelistVarRun)
|
||||
// Update ignored paths
|
||||
util.UpdateInitialIgnoreList(opts.IgnoreVarRun)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
|
@ -160,7 +160,7 @@ func addKanikoOptionsFlags() {
|
|||
opts.RegistriesCertificates = make(map[string]string)
|
||||
RootCmd.PersistentFlags().VarP(&opts.RegistriesCertificates, "registry-certificate", "", "Use the provided certificate for TLS communication with the given registry. Expected format is 'my.registry.url=/path/to/the/server/certificate'.")
|
||||
RootCmd.PersistentFlags().StringVarP(&opts.RegistryMirror, "registry-mirror", "", "", "Registry mirror to use has pull-through cache instead of docker.io.")
|
||||
RootCmd.PersistentFlags().BoolVarP(&opts.WhitelistVarRun, "whitelist-var-run", "", true, "Ignore /var/run directory when taking image snapshot. Set it to false to preserve /var/run/ in destination image. (Default true).")
|
||||
RootCmd.PersistentFlags().BoolVarP(&opts.IgnoreVarRun, "whitelist-var-run", "", true, "Ignore /var/run directory when taking image snapshot. Set it to false to preserve /var/run/ in destination image. (Default true).")
|
||||
RootCmd.PersistentFlags().VarP(&opts.Labels, "label", "", "Set metadata for an image. Set it repeatedly for multiple labels.")
|
||||
RootCmd.PersistentFlags().BoolVarP(&opts.SkipUnusedStages, "skip-unused-stages", "", false, "Build only used stages if defined to true. Otherwise it builds by default all stages, even the unnecessaries ones until it reaches the target stage / end of Dockerfile")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@ FROM golang:1.14
|
|||
ARG GOARCH=amd64
|
||||
WORKDIR /go/src/github.com/GoogleContainerTools/kaniko
|
||||
# Get GCR credential helper
|
||||
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.5.0/docker-credential-gcr_linux_amd64-1.5.0.tar.gz /usr/local/bin/
|
||||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-1.5.0.tar.gz
|
||||
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v2.0.1/docker-credential-gcr_linux_amd64-2.0.1.tar.gz /usr/local/bin/
|
||||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-2.0.1.tar.gz
|
||||
# Get Amazon ECR credential helper
|
||||
RUN go get -u github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login
|
||||
RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64
|
||||
|
|
@ -39,6 +39,7 @@ COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-
|
|||
COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr
|
||||
COPY files/ca-certificates.crt /kaniko/ssl/certs/
|
||||
COPY --from=0 /kaniko/.docker /kaniko/.docker
|
||||
COPY files/nsswitch.conf /etc/nsswitch.conf
|
||||
ENV HOME /root
|
||||
ENV USER /root
|
||||
ENV PATH /usr/local/bin:/kaniko
|
||||
|
|
|
|||
|
|
@ -19,11 +19,11 @@ FROM golang:1.14
|
|||
ARG GOARCH=amd64
|
||||
WORKDIR /go/src/github.com/GoogleContainerTools/kaniko
|
||||
# Get GCR credential helper
|
||||
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.5.0/docker-credential-gcr_linux_amd64-1.5.0.tar.gz /usr/local/bin/
|
||||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-1.5.0.tar.gz
|
||||
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v2.0.1/docker-credential-gcr_linux_amd64-2.0.1.tar.gz /usr/local/bin/
|
||||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-2.0.1.tar.gz
|
||||
# Get Amazon ECR credential helper
|
||||
RUN go get -u github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login
|
||||
RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64
|
||||
RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64
|
||||
# ACR docker credential helper
|
||||
ADD https://aadacr.blob.core.windows.net/acr-docker-credential-helper/docker-credential-acr-linux-amd64.tar.gz /usr/local/bin
|
||||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-acr-linux-amd64.tar.gz
|
||||
|
|
@ -46,13 +46,14 @@ COPY --from=0 /usr/local/bin/docker-credential-gcr /kaniko/docker-credential-gcr
|
|||
COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /kaniko/docker-credential-ecr-login
|
||||
COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr
|
||||
COPY --from=1 /distroless/bazel-bin/experimental/busybox/busybox/ /busybox/
|
||||
# Workaround See #https://github.com/GoogleContainerTools/kaniko/issues/656#issuecomment-564035645
|
||||
COPY --from=amd64/busybox:1.31.1 /bin/busybox /busybox/busybox
|
||||
|
||||
# Declare /busybox as a volume to get it automatically whitelisted
|
||||
# Declare /busybox as a volume to get it automatically in the path to ignore
|
||||
VOLUME /busybox
|
||||
|
||||
COPY files/ca-certificates.crt /kaniko/ssl/certs/
|
||||
COPY --from=0 /kaniko/.docker /kaniko/.docker
|
||||
COPY files/nsswitch.conf /etc/nsswitch.conf
|
||||
ENV HOME /root
|
||||
ENV USER /root
|
||||
ENV PATH /usr/local/bin:/kaniko:/busybox
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@ FROM golang:1.14
|
|||
ARG GOARCH=amd64
|
||||
WORKDIR /go/src/github.com/GoogleContainerTools/kaniko
|
||||
# Get GCR credential helper
|
||||
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.5.0/docker-credential-gcr_linux_amd64-1.5.0.tar.gz /usr/local/bin/
|
||||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-1.5.0.tar.gz
|
||||
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v2.0.1/docker-credential-gcr_linux_amd64-2.0.1.tar.gz /usr/local/bin/
|
||||
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-2.0.1.tar.gz
|
||||
# Get Amazon ECR credential helper
|
||||
RUN go get -u github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login
|
||||
RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64
|
||||
|
|
@ -39,6 +39,7 @@ COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-
|
|||
COPY --from=0 /usr/local/bin/docker-credential-acr-linux /kaniko/docker-credential-acr
|
||||
COPY files/ca-certificates.crt /kaniko/ssl/certs/
|
||||
COPY --from=0 /kaniko/.docker /kaniko/.docker
|
||||
COPY files/nsswitch.conf /etc/nsswitch.conf
|
||||
ENV HOME /root
|
||||
ENV USER /root
|
||||
ENV PATH /usr/local/bin:/kaniko
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ To accomplish this, Kaniko walks the entire filesystem to discover every object.
|
|||
Some of these objects may actually be a symlink to another object in the
|
||||
filesystem; in these cases we must consider both the link and the target object.
|
||||
|
||||
Kaniko also maintains a set of whitelisted (aka ignored) filepaths. Any object
|
||||
Kaniko also maintains a set of ignored (aka ignored) filepaths. Any object
|
||||
which matches one of these filepaths should be ignored by kaniko.
|
||||
|
||||
This results in a 3 dimensional search space
|
||||
|
|
|
|||
|
|
@ -0,0 +1,25 @@
|
|||
# /etc/nsswitch.conf
|
||||
#
|
||||
# As described on the web page https://man7.org/linux/man-pages/man3/gethostbyname.3.html,
|
||||
# without the nsswitch.conf file, the gethostbyname() and gethostbyaddr() domain queries
|
||||
# will fail to a local name server, thus the /etc/hosts will take no effect.
|
||||
#
|
||||
# For example, when hostaliases are specified for a kubernetes pod, without proper settings
|
||||
# defined in this file, the hostaliases settings will not take effect.
|
||||
#
|
||||
# Following contents of this file is from the ubuntu:16.04 docker image.
|
||||
|
||||
passwd: compat
|
||||
group: compat
|
||||
shadow: compat
|
||||
gshadow: files
|
||||
|
||||
hosts: files dns
|
||||
networks: files
|
||||
|
||||
protocols: db files
|
||||
services: db files
|
||||
ethers: db files
|
||||
rpc: db files
|
||||
|
||||
netgroup: nis
|
||||
|
|
@ -14,7 +14,7 @@ ADD $contextenv/* /tmp/${contextenv}/
|
|||
ADD context/tars/fil* /tars/
|
||||
ADD context/tars/file.tar /tars_again
|
||||
|
||||
# This tar has some directories that should be whitelisted inside it.
|
||||
# This tar has some directories that should be ignored inside it.
|
||||
|
||||
ADD context/tars/sys.tar.gz /
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,11 @@
|
|||
# This dockerfile makes sure the .dockerignore is working
|
||||
# If so then ignore/foo should copy to /foo
|
||||
# If not, then this image won't build because it will attempt to copy three files to /foo, which is a file not a directory
|
||||
FROM scratch
|
||||
FROM scratch as base
|
||||
COPY ignore/* /foo
|
||||
|
||||
From base as first
|
||||
COPY --from=base /foo ignore/bar
|
||||
|
||||
FROM first
|
||||
COPY --from=first ignore/* /fooAnother/
|
||||
|
|
@ -13,7 +13,7 @@ FROM base as fourth
|
|||
RUN date > /date
|
||||
ENV foo bar
|
||||
|
||||
# This base image contains symlinks with relative paths to whitelisted directories
|
||||
# This base image contains symlinks with relative paths to ignored directories
|
||||
# We need to test they're extracted correctly
|
||||
FROM fedora@sha256:c4cc32b09c6ae3f1353e7e33a8dda93dc41676b923d6d89afa996b421cc5aa48
|
||||
|
||||
|
|
|
|||
|
|
@ -556,7 +556,7 @@ func checkContainerDiffOutput(t *testing.T, diff []byte, expected string) {
|
|||
t.Error(err)
|
||||
}
|
||||
|
||||
// Some differences (whitelisted paths, etc.) are known and expected.
|
||||
// Some differences (ignored paths, etc.) are known and expected.
|
||||
fdr := diffInt[0].Diff.(*fileDiffResult)
|
||||
fdr.Adds = filterFileDiff(fdr.Adds)
|
||||
fdr.Dels = filterFileDiff(fdr.Dels)
|
||||
|
|
@ -588,14 +588,14 @@ func filterMetaDiff(metaDiff []string) []string {
|
|||
func filterFileDiff(f []fileDiff) []fileDiff {
|
||||
var newDiffs []fileDiff
|
||||
for _, diff := range f {
|
||||
isWhitelisted := false
|
||||
isIgnored := false
|
||||
for _, p := range allowedDiffPaths {
|
||||
if util.HasFilepathPrefix(diff.Name, p, false) {
|
||||
isWhitelisted = true
|
||||
isIgnored = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isWhitelisted {
|
||||
if !isIgnored {
|
||||
newDiffs = append(newDiffs, diff)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,12 +23,17 @@ import (
|
|||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
git "gopkg.in/src-d/go-git.v4"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/transport"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/transport/http"
|
||||
)
|
||||
|
||||
const (
|
||||
gitPullMethodEnvKey = "GIT_PULL_METHOD"
|
||||
gitPullMethodHTTPS = "https"
|
||||
gitPullMethodHTTP = "http"
|
||||
|
||||
gitAuthUsernameEnvKey = "GIT_USERNAME"
|
||||
gitAuthPasswordEnvKey = "GIT_PASSWORD"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -46,6 +51,7 @@ func (g *Git) UnpackTarFromBuildContext() (string, error) {
|
|||
parts := strings.Split(g.context, "#")
|
||||
options := git.CloneOptions{
|
||||
URL: getGitPullMethod() + "://" + parts[0],
|
||||
Auth: getGitAuth(),
|
||||
Progress: os.Stdout,
|
||||
}
|
||||
if len(parts) > 1 {
|
||||
|
|
@ -55,6 +61,18 @@ func (g *Git) UnpackTarFromBuildContext() (string, error) {
|
|||
return directory, err
|
||||
}
|
||||
|
||||
func getGitAuth() transport.AuthMethod {
|
||||
username := os.Getenv(gitAuthUsernameEnvKey)
|
||||
password := os.Getenv(gitAuthPasswordEnvKey)
|
||||
if username != "" || password != "" {
|
||||
return &http.BasicAuth{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getGitPullMethod() string {
|
||||
gitPullMethod := os.Getenv(gitPullMethodEnvKey)
|
||||
if ok := supportedGitPullMethods[gitPullMethod]; !ok {
|
||||
|
|
|
|||
|
|
@ -21,6 +21,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/transport"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/transport/http"
|
||||
)
|
||||
|
||||
func TestGetGitPullMethod(t *testing.T) {
|
||||
|
|
@ -80,3 +82,88 @@ func TestGetGitPullMethod(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetGitAuth(t *testing.T) {
|
||||
tests := []struct {
|
||||
testName string
|
||||
setEnv func() (expectedValue transport.AuthMethod)
|
||||
}{
|
||||
{
|
||||
testName: "noEnv",
|
||||
setEnv: func() (expectedValue transport.AuthMethod) {
|
||||
expectedValue = nil
|
||||
return
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "emptyUsernameEnv",
|
||||
setEnv: func() (expectedValue transport.AuthMethod) {
|
||||
_ = os.Setenv(gitAuthUsernameEnvKey, "")
|
||||
expectedValue = nil
|
||||
return
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "emptyPasswordEnv",
|
||||
setEnv: func() (expectedValue transport.AuthMethod) {
|
||||
_ = os.Setenv(gitAuthPasswordEnvKey, "")
|
||||
expectedValue = nil
|
||||
return
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "emptyEnv",
|
||||
setEnv: func() (expectedValue transport.AuthMethod) {
|
||||
_ = os.Setenv(gitAuthUsernameEnvKey, "")
|
||||
_ = os.Setenv(gitAuthPasswordEnvKey, "")
|
||||
expectedValue = nil
|
||||
return
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "withUsername",
|
||||
setEnv: func() (expectedValue transport.AuthMethod) {
|
||||
username := "foo"
|
||||
_ = os.Setenv(gitAuthUsernameEnvKey, username)
|
||||
expectedValue = &http.BasicAuth{Username: username}
|
||||
return
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "withPassword",
|
||||
setEnv: func() (expectedValue transport.AuthMethod) {
|
||||
pass := "super-secret-password-1234"
|
||||
_ = os.Setenv(gitAuthPasswordEnvKey, pass)
|
||||
expectedValue = &http.BasicAuth{Password: pass}
|
||||
return
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "withUsernamePassword",
|
||||
setEnv: func() (expectedValue transport.AuthMethod) {
|
||||
username := "foo"
|
||||
pass := "super-secret-password-1234"
|
||||
_ = os.Setenv(gitAuthUsernameEnvKey, username)
|
||||
_ = os.Setenv(gitAuthPasswordEnvKey, pass)
|
||||
expectedValue = &http.BasicAuth{Username: username, Password: pass}
|
||||
return
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.testName, func(t *testing.T) {
|
||||
// Make sure to unset environment vars to get a clean test each time
|
||||
defer clearTestAuthEnv()
|
||||
|
||||
expectedValue := tt.setEnv()
|
||||
testutil.CheckDeepEqual(t, expectedValue, getGitAuth())
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func clearTestAuthEnv() {
|
||||
_ = os.Unsetenv(gitAuthUsernameEnvKey)
|
||||
_ = os.Unsetenv(gitAuthPasswordEnvKey)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,9 +17,7 @@ limitations under the License.
|
|||
package cache
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
|
@ -27,6 +25,7 @@ import (
|
|||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/creds"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
|
|
@ -67,12 +66,7 @@ func (rc *RegistryCache) RetrieveLayer(ck string) (v1.Image, error) {
|
|||
cacheRef.Repository.Registry = newReg
|
||||
}
|
||||
|
||||
tr := http.DefaultTransport.(*http.Transport)
|
||||
if rc.Opts.SkipTLSVerifyRegistries.Contains(registryName) {
|
||||
tr.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
tr := util.MakeTransport(rc.Opts, registryName)
|
||||
|
||||
img, err := remote.Image(cacheRef, remote.WithTransport(tr), remote.WithAuthFromKeychain(creds.GetKeychain()))
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -65,7 +65,8 @@ func (r *RunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui
|
|||
logrus.Infof("args: %s", newCommand[1:])
|
||||
|
||||
cmd := exec.Command(newCommand[0], newCommand[1:]...)
|
||||
cmd.Dir = config.WorkingDir
|
||||
|
||||
cmd.Dir = setWorkDirIfExists(config.WorkingDir)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
||||
|
|
@ -236,3 +237,10 @@ func (cr *CachingRunCommand) String() string {
|
|||
func (cr *CachingRunCommand) MetadataOnly() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func setWorkDirIfExists(workdir string) string {
|
||||
if _, err := os.Lstat(workdir); err == nil {
|
||||
return workdir
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
|
|||
|
|
@ -316,3 +316,12 @@ func Test_CachingRunCommand_ExecuteCommand(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetWorkDirIfExists(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", "workdir")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
testutil.CheckDeepEqual(t, testDir, setWorkDirIfExists(testDir))
|
||||
testutil.CheckDeepEqual(t, "", setWorkDirIfExists("doesnot-exists"))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.
|
|||
for _, volume := range resolvedVolumes {
|
||||
var x struct{}
|
||||
existingVolumes[volume] = x
|
||||
util.AddVolumePathToWhitelist(volume)
|
||||
util.AddVolumePathToIgnoreList(volume)
|
||||
|
||||
// Only create and snapshot the dir if it didn't exist already
|
||||
if _, err := os.Stat(volume); os.IsNotExist(err) {
|
||||
|
|
|
|||
|
|
@ -22,10 +22,10 @@ import (
|
|||
|
||||
var RootDir string
|
||||
var KanikoDir string
|
||||
var WhitelistPath string
|
||||
var IgnoreListPath string
|
||||
|
||||
func init() {
|
||||
RootDir = constants.RootDir
|
||||
KanikoDir = constants.KanikoDir
|
||||
WhitelistPath = constants.WhitelistPath
|
||||
IgnoreListPath = constants.IgnoreListPath
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ type KanikoOptions struct {
|
|||
NoPush bool
|
||||
Cache bool
|
||||
Cleanup bool
|
||||
WhitelistVarRun bool
|
||||
IgnoreVarRun bool
|
||||
SkipUnusedStages bool
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ const (
|
|||
//KanikoDir is the path to the Kaniko directory
|
||||
KanikoDir = "/kaniko"
|
||||
|
||||
WhitelistPath = "/proc/self/mountinfo"
|
||||
IgnoreListPath = "/proc/self/mountinfo"
|
||||
|
||||
Author = "kaniko"
|
||||
|
||||
|
|
|
|||
|
|
@ -565,7 +565,7 @@ func Test_SkipingUnusedStages(t *testing.T) {
|
|||
# Make sure that we snapshot intermediate images correctly
|
||||
RUN date > /date
|
||||
ENV foo bar
|
||||
# This base image contains symlinks with relative paths to whitelisted directories
|
||||
# This base image contains symlinks with relative paths to ignored directories
|
||||
# We need to test they're extracted correctly
|
||||
FROM fedora@sha256:c4cc32b09c6ae3f1353e7e33a8dda93dc41676b923d6d89afa996b421cc5aa48
|
||||
FROM fourth
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ import (
|
|||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
image_util "github.com/GoogleContainerTools/kaniko/pkg/image"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/snapshot"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/timing"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
|
|
@ -84,7 +85,7 @@ type stageBuilder struct {
|
|||
|
||||
// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
|
||||
func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, crossStageDeps map[int][]string, dcm map[string]string, sid map[string]string, stageNameToIdx map[string]string) (*stageBuilder, error) {
|
||||
sourceImage, err := util.RetrieveSourceImage(stage, opts)
|
||||
sourceImage, err := image_util.RetrieveSourceImage(stage, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -313,8 +314,8 @@ func (s *stageBuilder) build() error {
|
|||
logrus.Info("Skipping unpacking as no commands require it.")
|
||||
}
|
||||
|
||||
if err := util.DetectFilesystemWhitelist(config.WhitelistPath); err != nil {
|
||||
return errors.Wrap(err, "failed to check filesystem whitelist")
|
||||
if err := util.DetectFilesystemIgnoreList(config.IgnoreListPath); err != nil {
|
||||
return errors.Wrap(err, "failed to check filesystem mount paths")
|
||||
}
|
||||
|
||||
initSnapshotTaken := false
|
||||
|
|
@ -517,7 +518,7 @@ func CalculateDependencies(stages []config.KanikoStage, opts *config.KanikoOptio
|
|||
} else if s.Name == constants.NoBaseImage {
|
||||
image = empty.Image
|
||||
} else {
|
||||
image, err = util.RetrieveSourceImage(s, opts)
|
||||
image, err = image_util.RetrieveSourceImage(s, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -596,6 +597,7 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
}
|
||||
logrus.Infof("Built cross stage deps: %v", crossStageDependencies)
|
||||
|
||||
util.IsFirstStage = true
|
||||
for index, stage := range kanikoStages {
|
||||
sb, err := newStageBuilder(opts, stage, crossStageDependencies, digestToCacheKey, stageIdxToDigest, stageNameToIdx)
|
||||
if err != nil {
|
||||
|
|
@ -604,6 +606,7 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
|||
if err := sb.build(); err != nil {
|
||||
return nil, errors.Wrap(err, "error building stage")
|
||||
}
|
||||
util.IsFirstStage = false
|
||||
|
||||
reviewConfig(stage, &sb.cf.Config)
|
||||
|
||||
|
|
@ -749,7 +752,7 @@ func fetchExtraStages(stages []config.KanikoStage, opts *config.KanikoOptions) e
|
|||
|
||||
// This must be an image name, fetch it.
|
||||
logrus.Debugf("Found extra base image stage %s", c.From)
|
||||
sourceImage, err := util.RetrieveRemoteImage(c.From, opts)
|
||||
sourceImage, err := image_util.RetrieveRemoteImage(c.From, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ func (s *CompositeCache) AddPath(p, context string) error {
|
|||
}
|
||||
|
||||
// Only add the hash of this directory to the key
|
||||
// if there is any whitelisted content.
|
||||
// if there is any ignored content.
|
||||
if !empty || !util.ExcludeFile(p, context) {
|
||||
s.keys = append(s.keys, k)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ func setupMultistageTests(t *testing.T) (string, func()) {
|
|||
// set up config
|
||||
config.RootDir = testDir
|
||||
config.KanikoDir = fmt.Sprintf("%s/%s", testDir, "kaniko")
|
||||
// Write a whitelist path
|
||||
// Write path to ignore list
|
||||
if err := os.MkdirAll(filepath.Join(testDir, "proc"), 0755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -178,10 +178,10 @@ func setupMultistageTests(t *testing.T) (string, func()) {
|
|||
if err := ioutil.WriteFile(mFile, []byte(mountInfo), 0644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
config.WhitelistPath = mFile
|
||||
config.IgnoreListPath = mFile
|
||||
return testDir, func() {
|
||||
config.KanikoDir = constants.KanikoDir
|
||||
config.RootDir = constants.RootDir
|
||||
config.WhitelistPath = constants.WhitelistPath
|
||||
config.IgnoreListPath = constants.IgnoreListPath
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,8 +17,6 @@ limitations under the License.
|
|||
package executor
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
|
@ -34,6 +32,7 @@ import (
|
|||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/creds"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/timing"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/version"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
|
@ -41,6 +40,7 @@ import (
|
|||
"github.com/google/go-containerregistry/pkg/v1/layout"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -51,6 +51,11 @@ type withUserAgent struct {
|
|||
t http.RoundTripper
|
||||
}
|
||||
|
||||
// for testing
|
||||
var (
|
||||
newRetry = transport.NewRetry
|
||||
)
|
||||
|
||||
const (
|
||||
UpstreamClientUaKey = "UPSTREAM_CLIENT_TYPE"
|
||||
)
|
||||
|
|
@ -76,41 +81,6 @@ func (w *withUserAgent) RoundTrip(r *http.Request) (*http.Response, error) {
|
|||
return w.t.RoundTrip(r)
|
||||
}
|
||||
|
||||
type CertPool interface {
|
||||
value() *x509.CertPool
|
||||
append(path string) error
|
||||
}
|
||||
|
||||
type X509CertPool struct {
|
||||
inner x509.CertPool
|
||||
}
|
||||
|
||||
func (p *X509CertPool) value() *x509.CertPool {
|
||||
return &p.inner
|
||||
}
|
||||
|
||||
func (p *X509CertPool) append(path string) error {
|
||||
pem, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.inner.AppendCertsFromPEM(pem)
|
||||
return nil
|
||||
}
|
||||
|
||||
type systemCertLoader func() CertPool
|
||||
|
||||
var defaultX509Handler systemCertLoader = func() CertPool {
|
||||
systemCertPool, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
logrus.Warn("Failed to load system cert pool. Loading empty one instead.")
|
||||
systemCertPool = x509.NewCertPool()
|
||||
}
|
||||
return &X509CertPool{
|
||||
inner: *systemCertPool,
|
||||
}
|
||||
}
|
||||
|
||||
// for testing
|
||||
var (
|
||||
fs = afero.NewOsFs()
|
||||
|
|
@ -155,7 +125,7 @@ func CheckPushPermissions(opts *config.KanikoOptions) error {
|
|||
}
|
||||
destRef.Repository.Registry = newReg
|
||||
}
|
||||
tr := makeTransport(opts, registryName, defaultX509Handler)
|
||||
tr := newRetry(util.MakeTransport(opts, registryName))
|
||||
if err := checkRemotePushPermission(destRef, creds.GetKeychain(), tr); err != nil {
|
||||
return errors.Wrapf(err, "checking push permission for %q", destRef)
|
||||
}
|
||||
|
|
@ -252,7 +222,7 @@ func DoPush(image v1.Image, opts *config.KanikoOptions) error {
|
|||
return errors.Wrap(err, "resolving pushAuth")
|
||||
}
|
||||
|
||||
tr := makeTransport(opts, registryName, defaultX509Handler)
|
||||
tr := newRetry(util.MakeTransport(opts, registryName))
|
||||
rt := &withUserAgent{t: tr}
|
||||
|
||||
if err := remote.Write(destRef, image, remote.WithAuth(pushAuth), remote.WithTransport(rt)); err != nil {
|
||||
|
|
@ -294,26 +264,6 @@ func writeImageOutputs(image v1.Image, destRefs []name.Tag) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func makeTransport(opts *config.KanikoOptions, registryName string, loader systemCertLoader) http.RoundTripper {
|
||||
// Create a transport to set our user-agent.
|
||||
var tr http.RoundTripper = http.DefaultTransport.(*http.Transport).Clone()
|
||||
if opts.SkipTLSVerify || opts.SkipTLSVerifyRegistries.Contains(registryName) {
|
||||
tr.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
} else if certificatePath := opts.RegistriesCertificates[registryName]; certificatePath != "" {
|
||||
systemCertPool := loader()
|
||||
if err := systemCertPool.append(certificatePath); err != nil {
|
||||
logrus.WithError(err).Warnf("Failed to load certificate %s for %s\n", certificatePath, registryName)
|
||||
} else {
|
||||
tr.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||
RootCAs: systemCertPool.value(),
|
||||
}
|
||||
}
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
// pushLayerToCache pushes layer (tagged with cacheKey) to opts.Cache
|
||||
// if opts.Cache doesn't exist, infer the cache from the given destination
|
||||
func pushLayerToCache(opts *config.KanikoOptions, cacheKey string, tarPath string, createdBy string) error {
|
||||
|
|
|
|||
|
|
@ -18,8 +18,6 @@ package executor
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
|
@ -35,6 +33,7 @@ import (
|
|||
"github.com/google/go-containerregistry/pkg/v1/layout"
|
||||
"github.com/google/go-containerregistry/pkg/v1/random"
|
||||
"github.com/google/go-containerregistry/pkg/v1/validate"
|
||||
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
|
|
@ -270,88 +269,6 @@ func TestImageNameDigestFile(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
type mockedCertPool struct {
|
||||
certificatesPath []string
|
||||
}
|
||||
|
||||
func (m *mockedCertPool) value() *x509.CertPool {
|
||||
return &x509.CertPool{}
|
||||
}
|
||||
|
||||
func (m *mockedCertPool) append(path string) error {
|
||||
m.certificatesPath = append(m.certificatesPath, path)
|
||||
return nil
|
||||
}
|
||||
|
||||
func Test_makeTransport(t *testing.T) {
|
||||
registryName := "my.registry.name"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
opts *config.KanikoOptions
|
||||
check func(*tls.Config, *mockedCertPool)
|
||||
}{
|
||||
{
|
||||
name: "SkipTLSVerify set",
|
||||
opts: &config.KanikoOptions{SkipTLSVerify: true},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if !config.InsecureSkipVerify {
|
||||
t.Errorf("makeTransport().TLSClientConfig.InsecureSkipVerify not set while SkipTLSVerify set")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SkipTLSVerifyRegistries set with expected registry",
|
||||
opts: &config.KanikoOptions{SkipTLSVerifyRegistries: []string{registryName}},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if !config.InsecureSkipVerify {
|
||||
t.Errorf("makeTransport().TLSClientConfig.InsecureSkipVerify not set while SkipTLSVerifyRegistries set with registry name")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SkipTLSVerifyRegistries set with other registry",
|
||||
opts: &config.KanikoOptions{SkipTLSVerifyRegistries: []string{fmt.Sprintf("other.%s", registryName)}},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if config.InsecureSkipVerify {
|
||||
t.Errorf("makeTransport().TLSClientConfig.InsecureSkipVerify set while SkipTLSVerifyRegistries not set with registry name")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RegistriesCertificates set for registry",
|
||||
opts: &config.KanikoOptions{RegistriesCertificates: map[string]string{registryName: "/path/to/the/certificate.cert"}},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if len(pool.certificatesPath) != 1 || pool.certificatesPath[0] != "/path/to/the/certificate.cert" {
|
||||
t.Errorf("makeTransport().RegistriesCertificates certificate not appended to system certificates")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RegistriesCertificates set for another registry",
|
||||
opts: &config.KanikoOptions{RegistriesCertificates: map[string]string{fmt.Sprintf("other.%s=", registryName): "/path/to/the/certificate.cert"}},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if len(pool.certificatesPath) != 0 {
|
||||
t.Errorf("makeTransport().RegistriesCertificates certificate appended to system certificates while added for other registry")
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var certificatesPath []string
|
||||
certPool := mockedCertPool{
|
||||
certificatesPath: certificatesPath,
|
||||
}
|
||||
var mockedSystemCertLoader systemCertLoader = func() CertPool {
|
||||
return &certPool
|
||||
}
|
||||
transport := makeTransport(tt.opts, registryName, mockedSystemCertLoader)
|
||||
tt.check(transport.(*http.Transport).TLSClientConfig, &certPool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var calledExecCommand = false
|
||||
var calledCheckPushPermission = false
|
||||
|
||||
|
|
|
|||
|
|
@ -26,24 +26,24 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ResolvePaths takes a slice of file paths and a slice of whitelist entries. It resolve each
|
||||
// ResolvePaths takes a slice of file paths and a list of skipped file paths. It resolve each
|
||||
// file path according to a set of rules and then returns a slice of resolved paths or error.
|
||||
// File paths are resolved according to the following rules:
|
||||
// * If path is whitelisted, skip it.
|
||||
// * If path is in ignorelist, skip it.
|
||||
// * If path is a symlink, resolve it's ancestor link and add it to the output set.
|
||||
// * If path is a symlink, resolve it's target. If the target is not whitelisted add it to the
|
||||
// * If path is a symlink, resolve it's target. If the target is not ignored add it to the
|
||||
// output set.
|
||||
// * Add all ancestors of each path to the output set.
|
||||
func ResolvePaths(paths []string, wl []util.WhitelistEntry) (pathsToAdd []string, err error) {
|
||||
func ResolvePaths(paths []string, wl []util.IgnoreListEntry) (pathsToAdd []string, err error) {
|
||||
logrus.Infof("Resolving %d paths", len(paths))
|
||||
logrus.Tracef("Resolving paths %s", paths)
|
||||
|
||||
fileSet := make(map[string]bool)
|
||||
|
||||
for _, f := range paths {
|
||||
// If the given path is part of the whitelist ignore it
|
||||
if util.IsInProvidedWhitelist(f, wl) {
|
||||
logrus.Debugf("path %s is whitelisted, ignoring it", f)
|
||||
// If the given path is part of the ignorelist ignore it
|
||||
if util.IsInProvidedIgnoreList(f, wl) {
|
||||
logrus.Debugf("path %s is in list to ignore, ignoring it", f)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -76,10 +76,10 @@ func ResolvePaths(paths []string, wl []util.WhitelistEntry) (pathsToAdd []string
|
|||
continue
|
||||
}
|
||||
|
||||
// If the given path is a symlink and the target is part of the whitelist
|
||||
// If the given path is a symlink and the target is part of the ignorelist
|
||||
// ignore the target
|
||||
if util.IsInProvidedWhitelist(evaled, wl) {
|
||||
logrus.Debugf("path %s is whitelisted, ignoring it", evaled)
|
||||
if util.IsInProvidedIgnoreList(evaled, wl) {
|
||||
logrus.Debugf("path %s is ignored, ignoring it", evaled)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -85,8 +85,8 @@ func Test_ResolvePaths(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
t.Run("none are whitelisted", func(t *testing.T) {
|
||||
wl := []util.WhitelistEntry{}
|
||||
t.Run("none are ignored", func(t *testing.T) {
|
||||
wl := []util.IgnoreListEntry{}
|
||||
|
||||
inputFiles := []string{}
|
||||
expectedFiles := []string{}
|
||||
|
|
@ -107,8 +107,8 @@ func Test_ResolvePaths(t *testing.T) {
|
|||
validateResults(t, files, expectedFiles, err)
|
||||
})
|
||||
|
||||
t.Run("some are whitelisted", func(t *testing.T) {
|
||||
wl := []util.WhitelistEntry{
|
||||
t.Run("some are ignored", func(t *testing.T) {
|
||||
wl := []util.IgnoreListEntry{
|
||||
{
|
||||
Path: filepath.Join(dir, "link", "baz"),
|
||||
},
|
||||
|
|
@ -124,7 +124,7 @@ func Test_ResolvePaths(t *testing.T) {
|
|||
link := filepath.Join(dir, "link", f)
|
||||
inputFiles = append(inputFiles, link)
|
||||
|
||||
if util.IsInProvidedWhitelist(link, wl) {
|
||||
if util.IsInProvidedIgnoreList(link, wl) {
|
||||
t.Logf("skipping %s", link)
|
||||
continue
|
||||
}
|
||||
|
|
@ -133,7 +133,7 @@ func Test_ResolvePaths(t *testing.T) {
|
|||
|
||||
target := filepath.Join(dir, "target", f)
|
||||
|
||||
if util.IsInProvidedWhitelist(target, wl) {
|
||||
if util.IsInProvidedIgnoreList(target, wl) {
|
||||
t.Logf("skipping %s", target)
|
||||
continue
|
||||
}
|
||||
|
|
@ -177,7 +177,7 @@ func Test_ResolvePaths(t *testing.T) {
|
|||
inputFiles := []string{}
|
||||
expectedFiles := []string{}
|
||||
|
||||
wl := []util.WhitelistEntry{}
|
||||
wl := []util.IgnoreListEntry{}
|
||||
|
||||
files, err := ResolvePaths(inputFiles, wl)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,43 +0,0 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package image
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SetEnvVariables sets environment variables as specified in the image
|
||||
func SetEnvVariables(img v1.Image) error {
|
||||
cfg, err := img.ConfigFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
envVars := cfg.Config.Env
|
||||
for _, envVar := range envVars {
|
||||
split := strings.SplitN(envVar, "=", 2)
|
||||
if err := os.Setenv(split[0], split[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Setting environment variable %s", envVar)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -14,29 +14,28 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
package image
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/timing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/cache"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/creds"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/timing"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/cache"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -55,7 +54,7 @@ func RetrieveSourceImage(stage config.KanikoStage, opts *config.KanikoOptions) (
|
|||
buildArgs = append(buildArgs, fmt.Sprintf("%s=%s", arg.Key, arg.ValueString()))
|
||||
}
|
||||
buildArgs = append(buildArgs, opts.BuildArgs...)
|
||||
currentBaseName, err := ResolveEnvironmentReplacement(stage.BaseName, buildArgs, false)
|
||||
currentBaseName, err := util.ResolveEnvironmentReplacement(stage.BaseName, buildArgs, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -146,12 +145,7 @@ func remoteImage(image string, opts *config.KanikoOptions) (v1.Image, error) {
|
|||
}
|
||||
|
||||
func remoteOptions(registryName string, opts *config.KanikoOptions) []remote.Option {
|
||||
tr := http.DefaultTransport.(*http.Transport)
|
||||
if opts.SkipTLSVerifyPull || opts.SkipTLSVerifyRegistries.Contains(registryName) {
|
||||
tr.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
}
|
||||
tr := util.MakeTransport(opts, registryName)
|
||||
|
||||
// on which v1.Platform is this currently running?
|
||||
platform := currentPlatform()
|
||||
|
|
@ -182,3 +176,11 @@ func cachedImage(opts *config.KanikoOptions, image string) (v1.Image, error) {
|
|||
}
|
||||
return cache.LocalSource(&opts.CacheOptions, cacheKey)
|
||||
}
|
||||
|
||||
// CurrentPlatform returns the v1.Platform on which the code runs
|
||||
func currentPlatform() v1.Platform {
|
||||
return v1.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
}
|
||||
}
|
||||
|
|
@ -14,18 +14,19 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
package image
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -39,14 +39,14 @@ var snapshotPathPrefix = config.KanikoDir
|
|||
|
||||
// Snapshotter holds the root directory from which to take snapshots, and a list of snapshots taken
|
||||
type Snapshotter struct {
|
||||
l *LayeredMap
|
||||
directory string
|
||||
whitelist []util.WhitelistEntry
|
||||
l *LayeredMap
|
||||
directory string
|
||||
ignorelist []util.IgnoreListEntry
|
||||
}
|
||||
|
||||
// NewSnapshotter creates a new snapshotter rooted at d
|
||||
func NewSnapshotter(l *LayeredMap, d string) *Snapshotter {
|
||||
return &Snapshotter{l: l, directory: d, whitelist: util.Whitelist()}
|
||||
return &Snapshotter{l: l, directory: d, ignorelist: util.IgnoreList()}
|
||||
}
|
||||
|
||||
// Init initializes a new snapshotter
|
||||
|
|
@ -60,7 +60,7 @@ func (s *Snapshotter) Key() (string, error) {
|
|||
return s.l.Key()
|
||||
}
|
||||
|
||||
// TakeSnapshot takes a snapshot of the specified files, avoiding directories in the whitelist, and creates
|
||||
// TakeSnapshot takes a snapshot of the specified files, avoiding directories in the ignorelist, and creates
|
||||
// a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed
|
||||
func (s *Snapshotter) TakeSnapshot(files []string) (string, error) {
|
||||
f, err := ioutil.TempFile(config.KanikoDir, "")
|
||||
|
|
@ -75,7 +75,7 @@ func (s *Snapshotter) TakeSnapshot(files []string) (string, error) {
|
|||
return "", nil
|
||||
}
|
||||
|
||||
filesToAdd, err := filesystem.ResolvePaths(files, s.whitelist)
|
||||
filesToAdd, err := filesystem.ResolvePaths(files, s.ignorelist)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
|
|
@ -100,7 +100,7 @@ func (s *Snapshotter) TakeSnapshot(files []string) (string, error) {
|
|||
return f.Name(), nil
|
||||
}
|
||||
|
||||
// TakeSnapshotFS takes a snapshot of the filesystem, avoiding directories in the whitelist, and creates
|
||||
// TakeSnapshotFS takes a snapshot of the filesystem, avoiding directories in the ignorelist, and creates
|
||||
// a tarball of the changed files.
|
||||
func (s *Snapshotter) TakeSnapshotFS() (string, error) {
|
||||
f, err := ioutil.TempFile(snapshotPathPrefix, "")
|
||||
|
|
@ -139,9 +139,9 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
|
|||
|
||||
godirwalk.Walk(s.directory, &godirwalk.Options{
|
||||
Callback: func(path string, ent *godirwalk.Dirent) error {
|
||||
if util.IsInWhitelist(path) {
|
||||
if util.IsInIgnoreList(path) {
|
||||
if util.IsDestDir(path) {
|
||||
logrus.Tracef("Skipping paths under %s, as it is a whitelisted directory", path)
|
||||
logrus.Tracef("Skipping paths under %s, as it is a ignored directory", path)
|
||||
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
|
@ -158,7 +158,8 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
|
|||
)
|
||||
timing.DefaultRun.Stop(timer)
|
||||
|
||||
resolvedFiles, err := filesystem.ResolvePaths(foundPaths, s.whitelist)
|
||||
timer = timing.Start("Resolving Paths")
|
||||
resolvedFiles, err := filesystem.ResolvePaths(foundPaths, s.ignorelist)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
@ -192,8 +193,8 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
|
|||
|
||||
filesToAdd := []string{}
|
||||
for path := range resolvedMemFs {
|
||||
if util.CheckWhitelist(path) {
|
||||
logrus.Tracef("Not adding %s to layer, as it's whitelisted", path)
|
||||
if util.CheckIgnoreList(path) {
|
||||
logrus.Tracef("Not adding %s to layer, as it's ignored", path)
|
||||
continue
|
||||
}
|
||||
// Only add changed files.
|
||||
|
|
@ -207,6 +208,7 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
|
|||
}
|
||||
}
|
||||
|
||||
timing.DefaultRun.Stop(timer)
|
||||
sort.Strings(filesToAdd)
|
||||
// Add files to the layered map
|
||||
for _, file := range filesToAdd {
|
||||
|
|
|
|||
|
|
@ -44,35 +44,36 @@ import (
|
|||
const DoNotChangeUID = -1
|
||||
const DoNotChangeGID = -1
|
||||
|
||||
type WhitelistEntry struct {
|
||||
type IgnoreListEntry struct {
|
||||
Path string
|
||||
PrefixMatchOnly bool
|
||||
}
|
||||
|
||||
var initialWhitelist = []WhitelistEntry{
|
||||
var initialIgnoreList = []IgnoreListEntry{
|
||||
{
|
||||
Path: config.KanikoDir,
|
||||
PrefixMatchOnly: false,
|
||||
},
|
||||
{
|
||||
// similarly, we whitelist /etc/mtab, since there is no way to know if the file was mounted or came
|
||||
// similarly, we ignore /etc/mtab, since there is no way to know if the file was mounted or came
|
||||
// from the base image
|
||||
Path: "/etc/mtab",
|
||||
PrefixMatchOnly: false,
|
||||
},
|
||||
{
|
||||
// we whitelist /tmp/apt-key-gpghome, since the apt keys are added temporarily in this directory.
|
||||
// we ingore /tmp/apt-key-gpghome, since the apt keys are added temporarily in this directory.
|
||||
// from the base image
|
||||
Path: "/tmp/apt-key-gpghome",
|
||||
PrefixMatchOnly: true,
|
||||
},
|
||||
}
|
||||
|
||||
var whitelist = initialWhitelist
|
||||
var ignorelist = initialIgnoreList
|
||||
|
||||
var volumes = []string{}
|
||||
|
||||
var excluded []string
|
||||
var IsFirstStage = true
|
||||
|
||||
type ExtractFunction func(string, *tar.Header, io.Reader) error
|
||||
|
||||
|
|
@ -83,8 +84,8 @@ type FSConfig struct {
|
|||
|
||||
type FSOpt func(*FSConfig)
|
||||
|
||||
func Whitelist() []WhitelistEntry {
|
||||
return whitelist
|
||||
func IgnoreList() []IgnoreListEntry {
|
||||
return ignorelist
|
||||
}
|
||||
|
||||
func IncludeWhiteout() FSOpt {
|
||||
|
|
@ -125,11 +126,11 @@ func GetFSFromLayers(root string, layers []v1.Layer, opts ...FSOpt) ([]string, e
|
|||
return nil, errors.New("must supply an extract function")
|
||||
}
|
||||
|
||||
if err := DetectFilesystemWhitelist(config.WhitelistPath); err != nil {
|
||||
if err := DetectFilesystemIgnoreList(config.IgnoreListPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("Mounted directories: %v", whitelist)
|
||||
logrus.Debugf("Mounted directories: %v", ignorelist)
|
||||
|
||||
extractedFiles := []string{}
|
||||
for i, l := range layers {
|
||||
|
|
@ -194,19 +195,19 @@ func DeleteFilesystem() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if CheckWhitelist(path) {
|
||||
if CheckIgnoreList(path) {
|
||||
if !isExist(path) {
|
||||
logrus.Debugf("Path %s whitelisted, but not exists", path)
|
||||
logrus.Debugf("Path %s ignored, but not exists", path)
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
logrus.Debugf("Not deleting %s, as it's whitelisted", path)
|
||||
logrus.Debugf("Not deleting %s, as it's ignored", path)
|
||||
return nil
|
||||
}
|
||||
if childDirInWhitelist(path) {
|
||||
logrus.Debugf("Not deleting %s, as it contains a whitelisted path", path)
|
||||
if childDirInIgnoreList(path) {
|
||||
logrus.Debugf("Not deleting %s, as it contains a ignored path", path)
|
||||
return nil
|
||||
}
|
||||
if path == config.RootDir {
|
||||
|
|
@ -224,9 +225,9 @@ func isExist(path string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// ChildDirInWhitelist returns true if there is a child file or directory of the path in the whitelist
|
||||
func childDirInWhitelist(path string) bool {
|
||||
for _, d := range whitelist {
|
||||
// childDirInIgnoreList returns true if there is a child file or directory of the path in the ignorelist
|
||||
func childDirInIgnoreList(path string) bool {
|
||||
for _, d := range ignorelist {
|
||||
if HasFilepathPrefix(d.Path, path, d.PrefixMatchOnly) {
|
||||
return true
|
||||
}
|
||||
|
|
@ -267,8 +268,8 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if CheckWhitelist(abs) && !checkWhitelistRoot(dest) {
|
||||
logrus.Debugf("Not adding %s because it is whitelisted", path)
|
||||
if CheckIgnoreList(abs) && !checkIgnoreListRoot(dest) {
|
||||
logrus.Debugf("Not adding %s because it is ignored", path)
|
||||
return nil
|
||||
}
|
||||
switch hdr.Typeflag {
|
||||
|
|
@ -324,8 +325,8 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if CheckWhitelist(abs) {
|
||||
logrus.Tracef("skipping symlink from %s to %s because %s is whitelisted", hdr.Linkname, path, hdr.Linkname)
|
||||
if CheckIgnoreList(abs) {
|
||||
logrus.Tracef("skipping symlink from %s to %s because %s is ignored", hdr.Linkname, path, hdr.Linkname)
|
||||
return nil
|
||||
}
|
||||
// The base directory for a link may not exist before it is created.
|
||||
|
|
@ -364,11 +365,11 @@ func ExtractFile(dest string, hdr *tar.Header, tr io.Reader) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func IsInWhitelist(path string) bool {
|
||||
return IsInProvidedWhitelist(path, whitelist)
|
||||
func IsInIgnoreList(path string) bool {
|
||||
return IsInProvidedIgnoreList(path, ignorelist)
|
||||
}
|
||||
|
||||
func IsInProvidedWhitelist(path string, wl []WhitelistEntry) bool {
|
||||
func IsInProvidedIgnoreList(path string, wl []IgnoreListEntry) bool {
|
||||
for _, entry := range wl {
|
||||
if !entry.PrefixMatchOnly && path == entry.Path {
|
||||
return true
|
||||
|
|
@ -377,8 +378,8 @@ func IsInProvidedWhitelist(path string, wl []WhitelistEntry) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func CheckWhitelist(path string) bool {
|
||||
for _, wl := range whitelist {
|
||||
func CheckIgnoreList(path string) bool {
|
||||
for _, wl := range ignorelist {
|
||||
if HasFilepathPrefix(path, wl.Path, wl.PrefixMatchOnly) {
|
||||
return true
|
||||
}
|
||||
|
|
@ -387,21 +388,21 @@ func CheckWhitelist(path string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func checkWhitelistRoot(root string) bool {
|
||||
func checkIgnoreListRoot(root string) bool {
|
||||
if root == config.RootDir {
|
||||
return false
|
||||
}
|
||||
return CheckWhitelist(root)
|
||||
return CheckIgnoreList(root)
|
||||
}
|
||||
|
||||
// Get whitelist from roots of mounted files
|
||||
// Get ignorelist from roots of mounted files
|
||||
// Each line of /proc/self/mountinfo is in the form:
|
||||
// 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
|
||||
// (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
|
||||
// Where (5) is the mount point relative to the process's root
|
||||
// From: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||
func DetectFilesystemWhitelist(path string) error {
|
||||
whitelist = initialWhitelist
|
||||
func DetectFilesystemIgnoreList(path string) error {
|
||||
ignorelist = initialIgnoreList
|
||||
volumes = []string{}
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
|
|
@ -425,7 +426,7 @@ func DetectFilesystemWhitelist(path string) error {
|
|||
}
|
||||
if lineArr[4] != config.RootDir {
|
||||
logrus.Tracef("Appending %s from line: %s", lineArr[4], line)
|
||||
whitelist = append(whitelist, WhitelistEntry{
|
||||
ignorelist = append(ignorelist, IgnoreListEntry{
|
||||
Path: lineArr[4],
|
||||
PrefixMatchOnly: false,
|
||||
})
|
||||
|
|
@ -447,7 +448,7 @@ func RelativeFiles(fp string, root string) ([]string, error) {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if CheckWhitelist(path) && !HasFilepathPrefix(path, root, false) {
|
||||
if CheckIgnoreList(path) && !HasFilepathPrefix(path, root, false) {
|
||||
return nil
|
||||
}
|
||||
relPath, err := filepath.Rel(root, path)
|
||||
|
|
@ -521,10 +522,10 @@ func CreateFile(path string, reader io.Reader, perm os.FileMode, uid uint32, gid
|
|||
return setFilePermissions(path, perm, int(uid), int(gid))
|
||||
}
|
||||
|
||||
// AddVolumePath adds the given path to the volume whitelist.
|
||||
func AddVolumePathToWhitelist(path string) {
|
||||
logrus.Infof("adding volume %s to whitelist", path)
|
||||
whitelist = append(whitelist, WhitelistEntry{
|
||||
// AddVolumePath adds the given path to the volume ignorelist.
|
||||
func AddVolumePathToIgnoreList(path string) {
|
||||
logrus.Infof("adding volume %s to ignorelist", path)
|
||||
ignorelist = append(ignorelist, IgnoreListEntry{
|
||||
Path: path,
|
||||
PrefixMatchOnly: true,
|
||||
})
|
||||
|
|
@ -678,6 +679,10 @@ func GetExcludedFiles(dockerfilepath string, buildcontext string) error {
|
|||
|
||||
// ExcludeFile returns true if the .dockerignore specified this file should be ignored
|
||||
func ExcludeFile(path, buildcontext string) bool {
|
||||
// Apply dockerfile excludes for first stage only
|
||||
if !IsFirstStage {
|
||||
return false
|
||||
}
|
||||
if HasFilepathPrefix(path, buildcontext, false) {
|
||||
var err error
|
||||
path, err = filepath.Rel(buildcontext, path)
|
||||
|
|
@ -856,13 +861,13 @@ func createParentDirectory(path string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// UpdateInitialWhitelist will add /var/run to whitelisted paths if
|
||||
func UpdateWhitelist(whitelistVarRun bool) {
|
||||
if !whitelistVarRun {
|
||||
// UpdateInitialIgnoreList will add /var/run to ignored paths if
|
||||
func UpdateInitialIgnoreList(ignoreVarRun bool) {
|
||||
if !ignoreVarRun {
|
||||
return
|
||||
}
|
||||
logrus.Trace("Adding /var/run to initialWhitelist ")
|
||||
initialWhitelist = append(initialWhitelist, WhitelistEntry{
|
||||
logrus.Trace("Adding /var/run to initialIgnoreList ")
|
||||
initialIgnoreList = append(initialIgnoreList, IgnoreListEntry{
|
||||
// /var/run is a special case. It's common to mount in /var/run/docker.sock or something similar
|
||||
// which leads to a special mount on the /var/run/docker.sock file itself, but the directory to exist
|
||||
// in the image with no way to tell if it came from the base image or not.
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ import (
|
|||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
func Test_DetectFilesystemWhitelist(t *testing.T) {
|
||||
func Test_DetectFilesystemSkiplist(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating tempdir: %s", err)
|
||||
|
|
@ -58,8 +58,8 @@ func Test_DetectFilesystemWhitelist(t *testing.T) {
|
|||
t.Fatalf("Error writing file contents to %s: %s", path, err)
|
||||
}
|
||||
|
||||
err = DetectFilesystemWhitelist(path)
|
||||
expectedWhitelist := []WhitelistEntry{
|
||||
err = DetectFilesystemIgnoreList(path)
|
||||
expectedSkiplist := []IgnoreListEntry{
|
||||
{"/kaniko", false},
|
||||
{"/proc", false},
|
||||
{"/dev", false},
|
||||
|
|
@ -68,14 +68,14 @@ func Test_DetectFilesystemWhitelist(t *testing.T) {
|
|||
{"/etc/mtab", false},
|
||||
{"/tmp/apt-key-gpghome", true},
|
||||
}
|
||||
actualWhitelist := whitelist
|
||||
sort.Slice(actualWhitelist, func(i, j int) bool {
|
||||
return actualWhitelist[i].Path < actualWhitelist[j].Path
|
||||
actualSkiplist := ignorelist
|
||||
sort.Slice(actualSkiplist, func(i, j int) bool {
|
||||
return actualSkiplist[i].Path < actualSkiplist[j].Path
|
||||
})
|
||||
sort.Slice(expectedWhitelist, func(i, j int) bool {
|
||||
return expectedWhitelist[i].Path < expectedWhitelist[j].Path
|
||||
sort.Slice(expectedSkiplist, func(i, j int) bool {
|
||||
return expectedSkiplist[i].Path < expectedSkiplist[j].Path
|
||||
})
|
||||
testutil.CheckErrorAndDeepEqual(t, false, err, expectedWhitelist, actualWhitelist)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, err, expectedSkiplist, actualSkiplist)
|
||||
}
|
||||
|
||||
var tests = []struct {
|
||||
|
|
@ -251,10 +251,10 @@ func Test_ParentDirectoriesWithoutLeadingSlash(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func Test_CheckWhitelist(t *testing.T) {
|
||||
func Test_CheckIgnoreList(t *testing.T) {
|
||||
type args struct {
|
||||
path string
|
||||
whitelist []WhitelistEntry
|
||||
path string
|
||||
ignorelist []IgnoreListEntry
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
@ -262,56 +262,56 @@ func Test_CheckWhitelist(t *testing.T) {
|
|||
want bool
|
||||
}{
|
||||
{
|
||||
name: "file whitelisted",
|
||||
name: "file ignored",
|
||||
args: args{
|
||||
path: "/foo",
|
||||
whitelist: []WhitelistEntry{{"/foo", false}},
|
||||
path: "/foo",
|
||||
ignorelist: []IgnoreListEntry{{"/foo", false}},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "directory whitelisted",
|
||||
name: "directory ignored",
|
||||
args: args{
|
||||
path: "/foo/bar",
|
||||
whitelist: []WhitelistEntry{{"/foo", false}},
|
||||
path: "/foo/bar",
|
||||
ignorelist: []IgnoreListEntry{{"/foo", false}},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "grandparent whitelisted",
|
||||
name: "grandparent ignored",
|
||||
args: args{
|
||||
path: "/foo/bar/baz",
|
||||
whitelist: []WhitelistEntry{{"/foo", false}},
|
||||
path: "/foo/bar/baz",
|
||||
ignorelist: []IgnoreListEntry{{"/foo", false}},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "sibling whitelisted",
|
||||
name: "sibling ignored",
|
||||
args: args{
|
||||
path: "/foo/bar/baz",
|
||||
whitelist: []WhitelistEntry{{"/foo/bat", false}},
|
||||
path: "/foo/bar/baz",
|
||||
ignorelist: []IgnoreListEntry{{"/foo/bat", false}},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "prefix match only ",
|
||||
args: args{
|
||||
path: "/tmp/apt-key-gpghome.xft/gpg.key",
|
||||
whitelist: []WhitelistEntry{{"/tmp/apt-key-gpghome.*", true}},
|
||||
path: "/tmp/apt-key-gpghome.xft/gpg.key",
|
||||
ignorelist: []IgnoreListEntry{{"/tmp/apt-key-gpghome.*", true}},
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
original := whitelist
|
||||
original := ignorelist
|
||||
defer func() {
|
||||
whitelist = original
|
||||
ignorelist = original
|
||||
}()
|
||||
whitelist = tt.args.whitelist
|
||||
got := CheckWhitelist(tt.args.path)
|
||||
ignorelist = tt.args.ignorelist
|
||||
got := CheckIgnoreList(tt.args.path)
|
||||
if got != tt.want {
|
||||
t.Errorf("CheckWhitelist() = %v, want %v", got, tt.want)
|
||||
t.Errorf("CheckIgnoreList() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -879,10 +879,10 @@ func TestCopySymlink(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func Test_childDirInWhitelist(t *testing.T) {
|
||||
func Test_childDirInSkiplist(t *testing.T) {
|
||||
type args struct {
|
||||
path string
|
||||
whitelist []WhitelistEntry
|
||||
path string
|
||||
ignorelist []IgnoreListEntry
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
@ -890,17 +890,17 @@ func Test_childDirInWhitelist(t *testing.T) {
|
|||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not in whitelist",
|
||||
name: "not in ignorelist",
|
||||
args: args{
|
||||
path: "/foo",
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "child in whitelist",
|
||||
name: "child in ignorelist",
|
||||
args: args{
|
||||
path: "/foo",
|
||||
whitelist: []WhitelistEntry{
|
||||
ignorelist: []IgnoreListEntry{
|
||||
{
|
||||
Path: "/foo/bar",
|
||||
},
|
||||
|
|
@ -909,16 +909,16 @@ func Test_childDirInWhitelist(t *testing.T) {
|
|||
want: true,
|
||||
},
|
||||
}
|
||||
oldWhitelist := whitelist
|
||||
oldIgnoreList := ignorelist
|
||||
defer func() {
|
||||
whitelist = oldWhitelist
|
||||
ignorelist = oldIgnoreList
|
||||
}()
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
whitelist = tt.args.whitelist
|
||||
if got := childDirInWhitelist(tt.args.path); got != tt.want {
|
||||
t.Errorf("childDirInWhitelist() = %v, want %v", got, tt.want)
|
||||
ignorelist = tt.args.ignorelist
|
||||
if got := childDirInIgnoreList(tt.args.path); got != tt.want {
|
||||
t.Errorf("childDirInIgnoreList() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
@ -1315,16 +1315,16 @@ func assertGetFSFromLayers(
|
|||
}
|
||||
}
|
||||
|
||||
func TestUpdateWhitelist(t *testing.T) {
|
||||
func TestUpdateSkiplist(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
whitelistVarRun bool
|
||||
expected []WhitelistEntry
|
||||
name string
|
||||
skipVarRun bool
|
||||
expected []IgnoreListEntry
|
||||
}{
|
||||
{
|
||||
name: "var/run whitelisted",
|
||||
whitelistVarRun: true,
|
||||
expected: []WhitelistEntry{
|
||||
name: "var/run ignored",
|
||||
skipVarRun: true,
|
||||
expected: []IgnoreListEntry{
|
||||
{
|
||||
Path: "/kaniko",
|
||||
PrefixMatchOnly: false,
|
||||
|
|
@ -1344,8 +1344,8 @@ func TestUpdateWhitelist(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "var/run not whitelisted",
|
||||
expected: []WhitelistEntry{
|
||||
name: "var/run not ignored",
|
||||
expected: []IgnoreListEntry{
|
||||
{
|
||||
Path: "/kaniko",
|
||||
PrefixMatchOnly: false,
|
||||
|
|
@ -1363,16 +1363,16 @@ func TestUpdateWhitelist(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
original := initialWhitelist
|
||||
defer func() { initialWhitelist = original }()
|
||||
UpdateWhitelist(tt.whitelistVarRun)
|
||||
original := initialIgnoreList
|
||||
defer func() { initialIgnoreList = original }()
|
||||
UpdateInitialIgnoreList(tt.skipVarRun)
|
||||
sort.Slice(tt.expected, func(i, j int) bool {
|
||||
return tt.expected[i].Path < tt.expected[j].Path
|
||||
})
|
||||
sort.Slice(initialWhitelist, func(i, j int) bool {
|
||||
return initialWhitelist[i].Path < initialWhitelist[j].Path
|
||||
sort.Slice(initialIgnoreList, func(i, j int) bool {
|
||||
return initialIgnoreList[i].Path < initialIgnoreList[j].Path
|
||||
})
|
||||
testutil.CheckDeepEqual(t, tt.expected, initialWhitelist)
|
||||
testutil.CheckDeepEqual(t, tt.expected, initialIgnoreList)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
Copyright 2020 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type CertPool interface {
|
||||
value() *x509.CertPool
|
||||
append(path string) error
|
||||
}
|
||||
|
||||
type X509CertPool struct {
|
||||
inner x509.CertPool
|
||||
}
|
||||
|
||||
func (p *X509CertPool) value() *x509.CertPool {
|
||||
return &p.inner
|
||||
}
|
||||
|
||||
func (p *X509CertPool) append(path string) error {
|
||||
pem, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.inner.AppendCertsFromPEM(pem)
|
||||
return nil
|
||||
}
|
||||
|
||||
var systemCertLoader CertPool
|
||||
|
||||
func init() {
|
||||
systemCertPool, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
logrus.Warn("Failed to load system cert pool. Loading empty one instead.")
|
||||
systemCertPool = x509.NewCertPool()
|
||||
}
|
||||
systemCertLoader = &X509CertPool{
|
||||
inner: *systemCertPool,
|
||||
}
|
||||
}
|
||||
|
||||
func MakeTransport(opts *config.KanikoOptions, registryName string) http.RoundTripper {
|
||||
// Create a transport to set our user-agent.
|
||||
var tr http.RoundTripper = http.DefaultTransport.(*http.Transport).Clone()
|
||||
if opts.SkipTLSVerify || opts.SkipTLSVerifyRegistries.Contains(registryName) {
|
||||
tr.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
} else if certificatePath := opts.RegistriesCertificates[registryName]; certificatePath != "" {
|
||||
if err := systemCertLoader.append(certificatePath); err != nil {
|
||||
logrus.WithError(err).Warnf("Failed to load certificate %s for %s\n", certificatePath, registryName)
|
||||
} else {
|
||||
tr.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||
RootCAs: systemCertLoader.value(),
|
||||
}
|
||||
}
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
Copyright 2020 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
)
|
||||
|
||||
type mockedCertPool struct {
|
||||
certificatesPath []string
|
||||
}
|
||||
|
||||
func (m *mockedCertPool) value() *x509.CertPool {
|
||||
return &x509.CertPool{}
|
||||
}
|
||||
|
||||
func (m *mockedCertPool) append(path string) error {
|
||||
m.certificatesPath = append(m.certificatesPath, path)
|
||||
return nil
|
||||
}
|
||||
|
||||
func Test_makeTransport(t *testing.T) {
|
||||
registryName := "my.registry.name"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
opts *config.KanikoOptions
|
||||
check func(*tls.Config, *mockedCertPool)
|
||||
}{
|
||||
{
|
||||
name: "SkipTLSVerify set",
|
||||
opts: &config.KanikoOptions{SkipTLSVerify: true},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if !config.InsecureSkipVerify {
|
||||
t.Errorf("makeTransport().TLSClientConfig.InsecureSkipVerify not set while SkipTLSVerify set")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SkipTLSVerifyRegistries set with expected registry",
|
||||
opts: &config.KanikoOptions{SkipTLSVerifyRegistries: []string{registryName}},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if !config.InsecureSkipVerify {
|
||||
t.Errorf("makeTransport().TLSClientConfig.InsecureSkipVerify not set while SkipTLSVerifyRegistries set with registry name")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SkipTLSVerifyRegistries set with other registry",
|
||||
opts: &config.KanikoOptions{SkipTLSVerifyRegistries: []string{fmt.Sprintf("other.%s", registryName)}},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if config.InsecureSkipVerify {
|
||||
t.Errorf("makeTransport().TLSClientConfig.InsecureSkipVerify set while SkipTLSVerifyRegistries not set with registry name")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RegistriesCertificates set for registry",
|
||||
opts: &config.KanikoOptions{RegistriesCertificates: map[string]string{registryName: "/path/to/the/certificate.cert"}},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if len(pool.certificatesPath) != 1 || pool.certificatesPath[0] != "/path/to/the/certificate.cert" {
|
||||
t.Errorf("makeTransport().RegistriesCertificates certificate not appended to system certificates")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "RegistriesCertificates set for another registry",
|
||||
opts: &config.KanikoOptions{RegistriesCertificates: map[string]string{fmt.Sprintf("other.%s=", registryName): "/path/to/the/certificate.cert"}},
|
||||
check: func(config *tls.Config, pool *mockedCertPool) {
|
||||
if len(pool.certificatesPath) != 0 {
|
||||
t.Errorf("makeTransport().RegistriesCertificates certificate appended to system certificates while added for other registry")
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
savedSystemCertLoader := systemCertLoader
|
||||
defer func() { systemCertLoader = savedSystemCertLoader }()
|
||||
for _, tt := range tests {
|
||||
var certificatesPath []string
|
||||
certPool := &mockedCertPool{
|
||||
certificatesPath: certificatesPath,
|
||||
}
|
||||
systemCertLoader = certPool
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tr := MakeTransport(tt.opts, registryName)
|
||||
tt.check(tr.(*http.Transport).TLSClientConfig, certPool)
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
|
|
@ -23,14 +23,11 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/minio/highwayhash"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
)
|
||||
|
||||
// Hasher returns a hash function, used in snapshotting to determine if a file has changed
|
||||
|
|
@ -128,14 +125,6 @@ func SHA256(r io.Reader) (string, error) {
|
|||
return hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), nil
|
||||
}
|
||||
|
||||
// CurrentPlatform returns the v1.Platform on which the code runs
|
||||
func currentPlatform() v1.Platform {
|
||||
return v1.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
}
|
||||
}
|
||||
|
||||
// GetInputFrom returns Reader content
|
||||
func GetInputFrom(r io.Reader) ([]byte, error) {
|
||||
output, err := ioutil.ReadAll(r)
|
||||
|
|
|
|||
Loading…
Reference in New Issue