merged master, fixed conflicts
This commit is contained in:
commit
ee9aa954ac
|
|
@ -86,7 +86,7 @@ func addKanikoOptionsFlags(cmd *cobra.Command) {
|
|||
RootCmd.PersistentFlags().StringVarP(&opts.SnapshotMode, "snapshotMode", "", "full", "Change the file attributes inspected during snapshotting")
|
||||
RootCmd.PersistentFlags().VarP(&opts.BuildArgs, "build-arg", "", "This flag allows you to pass in ARG values at build time. Set it repeatedly for multiple values.")
|
||||
RootCmd.PersistentFlags().BoolVarP(&opts.InsecurePush, "insecure", "", false, "Push to insecure registry using plain HTTP")
|
||||
RootCmd.PersistentFlags().BoolVarP(&opts.SkipTlsVerify, "skip-tls-verify", "", false, "Push to insecure registry ignoring TLS verify")
|
||||
RootCmd.PersistentFlags().BoolVarP(&opts.SkipTLSVerify, "skip-tls-verify", "", false, "Push to insecure registry ignoring TLS verify")
|
||||
RootCmd.PersistentFlags().StringVarP(&opts.TarPath, "tarPath", "", "", "Path to save the image in as a tarball instead of pushing")
|
||||
RootCmd.PersistentFlags().BoolVarP(&opts.SingleSnapshot, "single-snapshot", "", false, "Take a single snapshot at the end of the build.")
|
||||
RootCmd.PersistentFlags().BoolVarP(&opts.Reproducible, "reproducible", "", false, "Strip timestamps out of the image to make it reproducible")
|
||||
|
|
@ -145,7 +145,7 @@ func resolveSourceContext() error {
|
|||
opts.SrcContext = opts.Bucket
|
||||
}
|
||||
}
|
||||
// if no prefix use Google Cloud Storage as default for backwards compability
|
||||
// if no prefix use Google Cloud Storage as default for backwards compatibility
|
||||
contextExecutor, err := buildcontext.GetBuildContext(opts.SrcContext)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -18,12 +18,14 @@ from __future__ import print_function
|
|||
|
||||
import argparse
|
||||
import glob
|
||||
import json
|
||||
import mmap
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
SKIPPED_DIRS = ["Godeps", "third_party", ".git", "vendor", "examples", "testdata"]
|
||||
SKIPPED_FILES = ["install_golint.sh"]
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
|
||||
|
||||
|
|
@ -71,7 +73,7 @@ def file_passes(filename, refs, regexs):
|
|||
(data, found) = p.subn("", data, 1)
|
||||
|
||||
# remove shebang from the top of shell files
|
||||
if extension == "sh":
|
||||
elif extension == "sh":
|
||||
p = regexs["shebang"]
|
||||
(data, found) = p.subn("", data, 1)
|
||||
|
||||
|
|
@ -105,17 +107,11 @@ def file_passes(filename, refs, regexs):
|
|||
def file_extension(filename):
|
||||
return os.path.splitext(filename)[1].split(".")[-1].lower()
|
||||
|
||||
skipped_dirs = ['Godeps', 'third_party', '.git', "vendor", "differs/testDirs/pipTests"]
|
||||
|
||||
def normalize_files(files):
|
||||
newfiles = []
|
||||
for pathname in files:
|
||||
if any(x in pathname for x in skipped_dirs):
|
||||
continue
|
||||
newfiles.append(pathname)
|
||||
for i, pathname in enumerate(newfiles):
|
||||
for i, pathname in enumerate(files):
|
||||
if not os.path.isabs(pathname):
|
||||
newfiles[i] = os.path.join(args.rootdir, pathname)
|
||||
newfiles.append(os.path.join(args.rootdir, pathname))
|
||||
return newfiles
|
||||
|
||||
def get_files(extensions):
|
||||
|
|
@ -124,17 +120,14 @@ def get_files(extensions):
|
|||
files = args.filenames
|
||||
else:
|
||||
for root, dirs, walkfiles in os.walk(args.rootdir):
|
||||
# don't visit certain dirs. This is just a performance improvement
|
||||
# as we would prune these later in normalize_files(). But doing it
|
||||
# cuts down the amount of filesystem walking we do and cuts down
|
||||
# the size of the file list
|
||||
for d in skipped_dirs:
|
||||
for d in SKIPPED_DIRS:
|
||||
if d in dirs:
|
||||
dirs.remove(d)
|
||||
|
||||
for name in walkfiles:
|
||||
pathname = os.path.join(root, name)
|
||||
files.append(pathname)
|
||||
if name not in SKIPPED_FILES:
|
||||
pathname = os.path.join(root, name)
|
||||
files.append(pathname)
|
||||
|
||||
files = normalize_files(files)
|
||||
outfiles = []
|
||||
|
|
|
|||
|
|
@ -1,17 +0,0 @@
|
|||
|
||||
{
|
||||
"Vendor": true,
|
||||
"EnableGC": true,
|
||||
"Debug": false,
|
||||
"Sort": ["linter", "severity", "path"],
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"gofmt",
|
||||
"golint",
|
||||
"gosimple",
|
||||
"ineffassign",
|
||||
"vet"
|
||||
],
|
||||
|
||||
"LineLength": 200
|
||||
}
|
||||
|
|
@ -0,0 +1,388 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
# Code generated by godownloader on 2018-06-05T12:04:55Z. DO NOT EDIT.
|
||||
#
|
||||
|
||||
usage() {
|
||||
this=$1
|
||||
cat <<EOF
|
||||
$this: download go binaries for golangci/golangci-lint
|
||||
|
||||
Usage: $this [-b] bindir [-d] [tag]
|
||||
-b sets bindir or installation directory, Defaults to ./bin
|
||||
-d turns on debug logging
|
||||
[tag] is a tag from
|
||||
https://github.com/golangci/golangci-lint/releases
|
||||
If tag is missing, then the latest will be used.
|
||||
|
||||
Generated by godownloader
|
||||
https://github.com/goreleaser/godownloader
|
||||
|
||||
EOF
|
||||
exit 2
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
#BINDIR is ./bin unless set be ENV
|
||||
# over-ridden by flag below
|
||||
|
||||
BINDIR=${BINDIR:-./bin}
|
||||
while getopts "b:dh?" arg; do
|
||||
case "$arg" in
|
||||
b) BINDIR="$OPTARG" ;;
|
||||
d) log_set_priority 10 ;;
|
||||
h | \?) usage "$0" ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
TAG=$1
|
||||
}
|
||||
# this function wraps all the destructive operations
|
||||
# if a curl|bash cuts off the end of the script due to
|
||||
# network, either nothing will happen or will syntax error
|
||||
# out preventing half-done work
|
||||
execute() {
|
||||
tmpdir=$(mktmpdir)
|
||||
log_debug "downloading files into ${tmpdir}"
|
||||
http_download "${tmpdir}/${TARBALL}" "${TARBALL_URL}"
|
||||
http_download "${tmpdir}/${CHECKSUM}" "${CHECKSUM_URL}"
|
||||
hash_sha256_verify "${tmpdir}/${TARBALL}" "${tmpdir}/${CHECKSUM}"
|
||||
srcdir="${tmpdir}/${NAME}"
|
||||
rm -rf "${srcdir}"
|
||||
(cd "${tmpdir}" && untar "${TARBALL}")
|
||||
install -d "${BINDIR}"
|
||||
for binexe in "golangci-lint" ; do
|
||||
if [ "$OS" = "windows" ]; then
|
||||
binexe="${binexe}.exe"
|
||||
fi
|
||||
install "${srcdir}/${binexe}" "${BINDIR}/"
|
||||
log_info "installed ${BINDIR}/${binexe}"
|
||||
done
|
||||
}
|
||||
is_supported_platform() {
|
||||
platform=$1
|
||||
found=1
|
||||
case "$platform" in
|
||||
darwin/amd64) found=0 ;;
|
||||
darwin/386) found=0 ;;
|
||||
windows/amd64) found=0 ;;
|
||||
windows/386) found=0 ;;
|
||||
linux/amd64) found=0 ;;
|
||||
linux/386) found=0 ;;
|
||||
esac
|
||||
return $found
|
||||
}
|
||||
check_platform() {
|
||||
if is_supported_platform "$PLATFORM"; then
|
||||
# optional logging goes here
|
||||
true
|
||||
else
|
||||
log_crit "platform $PLATFORM is not supported. Make sure this script is up-to-date and file request at https://github.com/${PREFIX}/issues/new"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
tag_to_version() {
|
||||
if [ -z "${TAG}" ]; then
|
||||
log_info "checking GitHub for latest tag"
|
||||
else
|
||||
log_info "checking GitHub for tag '${TAG}'"
|
||||
fi
|
||||
REALTAG=$(github_release "$OWNER/$REPO" "${TAG}") && true
|
||||
if test -z "$REALTAG"; then
|
||||
log_crit "unable to find '${TAG}' - use 'latest' or see https://github.com/${PREFIX}/releases for details"
|
||||
exit 1
|
||||
fi
|
||||
# if version starts with 'v', remove it
|
||||
TAG="$REALTAG"
|
||||
VERSION=${TAG#v}
|
||||
}
|
||||
adjust_format() {
|
||||
# change format (tar.gz or zip) based on ARCH
|
||||
case ${ARCH} in
|
||||
windows) FORMAT=zip ;;
|
||||
esac
|
||||
true
|
||||
}
|
||||
adjust_os() {
|
||||
# adjust archive name based on OS
|
||||
true
|
||||
}
|
||||
adjust_arch() {
|
||||
# adjust archive name based on ARCH
|
||||
true
|
||||
}
|
||||
|
||||
cat /dev/null <<EOF
|
||||
------------------------------------------------------------------------
|
||||
https://github.com/client9/shlib - portable posix shell functions
|
||||
Public domain - http://unlicense.org
|
||||
https://github.com/client9/shlib/blob/master/LICENSE.md
|
||||
but credit (and pull requests) appreciated.
|
||||
------------------------------------------------------------------------
|
||||
EOF
|
||||
is_command() {
|
||||
command -v "$1" >/dev/null
|
||||
}
|
||||
echoerr() {
|
||||
echo "$@" 1>&2
|
||||
}
|
||||
log_prefix() {
|
||||
echo "$0"
|
||||
}
|
||||
_logp=6
|
||||
log_set_priority() {
|
||||
_logp="$1"
|
||||
}
|
||||
log_priority() {
|
||||
if test -z "$1"; then
|
||||
echo "$_logp"
|
||||
return
|
||||
fi
|
||||
[ "$1" -le "$_logp" ]
|
||||
}
|
||||
log_tag() {
|
||||
case $1 in
|
||||
0) echo "emerg" ;;
|
||||
1) echo "alert" ;;
|
||||
2) echo "crit" ;;
|
||||
3) echo "err" ;;
|
||||
4) echo "warning" ;;
|
||||
5) echo "notice" ;;
|
||||
6) echo "info" ;;
|
||||
7) echo "debug" ;;
|
||||
*) echo "$1" ;;
|
||||
esac
|
||||
}
|
||||
log_debug() {
|
||||
log_priority 7 || return 0
|
||||
echoerr "$(log_prefix)" "$(log_tag 7)" "$@"
|
||||
}
|
||||
log_info() {
|
||||
log_priority 6 || return 0
|
||||
echoerr "$(log_prefix)" "$(log_tag 6)" "$@"
|
||||
}
|
||||
log_err() {
|
||||
log_priority 3 || return 0
|
||||
echoerr "$(log_prefix)" "$(log_tag 3)" "$@"
|
||||
}
|
||||
log_crit() {
|
||||
log_priority 2 || return 0
|
||||
echoerr "$(log_prefix)" "$(log_tag 2)" "$@"
|
||||
}
|
||||
uname_os() {
|
||||
os=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
case "$os" in
|
||||
msys_nt) os="windows" ;;
|
||||
esac
|
||||
echo "$os"
|
||||
}
|
||||
uname_arch() {
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64) arch="amd64" ;;
|
||||
x86) arch="386" ;;
|
||||
i686) arch="386" ;;
|
||||
i386) arch="386" ;;
|
||||
aarch64) arch="arm64" ;;
|
||||
armv5*) arch="armv5" ;;
|
||||
armv6*) arch="armv6" ;;
|
||||
armv7*) arch="armv7" ;;
|
||||
esac
|
||||
echo ${arch}
|
||||
}
|
||||
uname_os_check() {
|
||||
os=$(uname_os)
|
||||
case "$os" in
|
||||
darwin) return 0 ;;
|
||||
dragonfly) return 0 ;;
|
||||
freebsd) return 0 ;;
|
||||
linux) return 0 ;;
|
||||
android) return 0 ;;
|
||||
nacl) return 0 ;;
|
||||
netbsd) return 0 ;;
|
||||
openbsd) return 0 ;;
|
||||
plan9) return 0 ;;
|
||||
solaris) return 0 ;;
|
||||
windows) return 0 ;;
|
||||
esac
|
||||
log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib"
|
||||
return 1
|
||||
}
|
||||
uname_arch_check() {
|
||||
arch=$(uname_arch)
|
||||
case "$arch" in
|
||||
386) return 0 ;;
|
||||
amd64) return 0 ;;
|
||||
arm64) return 0 ;;
|
||||
armv5) return 0 ;;
|
||||
armv6) return 0 ;;
|
||||
armv7) return 0 ;;
|
||||
ppc64) return 0 ;;
|
||||
ppc64le) return 0 ;;
|
||||
mips) return 0 ;;
|
||||
mipsle) return 0 ;;
|
||||
mips64) return 0 ;;
|
||||
mips64le) return 0 ;;
|
||||
s390x) return 0 ;;
|
||||
amd64p32) return 0 ;;
|
||||
esac
|
||||
log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib"
|
||||
return 1
|
||||
}
|
||||
untar() {
|
||||
tarball=$1
|
||||
case "${tarball}" in
|
||||
*.tar.gz | *.tgz) tar -xzf "${tarball}" ;;
|
||||
*.tar) tar -xf "${tarball}" ;;
|
||||
*.zip) unzip "${tarball}" ;;
|
||||
*)
|
||||
log_err "untar unknown archive format for ${tarball}"
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
mktmpdir() {
|
||||
test -z "$TMPDIR" && TMPDIR="$(mktemp -d)"
|
||||
mkdir -p "${TMPDIR}"
|
||||
echo "${TMPDIR}"
|
||||
}
|
||||
http_download_curl() {
|
||||
local_file=$1
|
||||
source_url=$2
|
||||
header=$3
|
||||
if [ -z "$header" ]; then
|
||||
code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url")
|
||||
else
|
||||
code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url")
|
||||
fi
|
||||
if [ "$code" != "200" ]; then
|
||||
log_debug "http_download_curl received HTTP status $code"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
http_download_wget() {
|
||||
local_file=$1
|
||||
source_url=$2
|
||||
header=$3
|
||||
if [ -z "$header" ]; then
|
||||
wget -q -O "$local_file" "$source_url"
|
||||
else
|
||||
wget -q --header "$header" -O "$local_file" "$source_url"
|
||||
fi
|
||||
}
|
||||
http_download() {
|
||||
log_debug "http_download $2"
|
||||
if is_command curl; then
|
||||
http_download_curl "$@"
|
||||
return
|
||||
elif is_command wget; then
|
||||
http_download_wget "$@"
|
||||
return
|
||||
fi
|
||||
log_crit "http_download unable to find wget or curl"
|
||||
return 1
|
||||
}
|
||||
http_copy() {
|
||||
tmp=$(mktemp)
|
||||
http_download "${tmp}" "$1" "$2" || return 1
|
||||
body=$(cat "$tmp")
|
||||
rm -f "${tmp}"
|
||||
echo "$body"
|
||||
}
|
||||
github_release() {
|
||||
owner_repo=$1
|
||||
version=$2
|
||||
test -z "$version" && version="latest"
|
||||
giturl="https://github.com/${owner_repo}/releases/${version}"
|
||||
json=$(http_copy "$giturl" "Accept:application/json")
|
||||
test -z "$json" && return 1
|
||||
version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//')
|
||||
test -z "$version" && return 1
|
||||
echo "$version"
|
||||
}
|
||||
hash_sha256() {
|
||||
TARGET=${1:-/dev/stdin}
|
||||
if is_command gsha256sum; then
|
||||
hash=$(gsha256sum "$TARGET") || return 1
|
||||
echo "$hash" | cut -d ' ' -f 1
|
||||
elif is_command sha256sum; then
|
||||
hash=$(sha256sum "$TARGET") || return 1
|
||||
echo "$hash" | cut -d ' ' -f 1
|
||||
elif is_command shasum; then
|
||||
hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1
|
||||
echo "$hash" | cut -d ' ' -f 1
|
||||
elif is_command openssl; then
|
||||
hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1
|
||||
echo "$hash" | cut -d ' ' -f a
|
||||
else
|
||||
log_crit "hash_sha256 unable to find command to compute sha-256 hash"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
hash_sha256_verify() {
|
||||
TARGET=$1
|
||||
checksums=$2
|
||||
if [ -z "$checksums" ]; then
|
||||
log_err "hash_sha256_verify checksum file not specified in arg2"
|
||||
return 1
|
||||
fi
|
||||
BASENAME=${TARGET##*/}
|
||||
want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1)
|
||||
if [ -z "$want" ]; then
|
||||
log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'"
|
||||
return 1
|
||||
fi
|
||||
got=$(hash_sha256 "$TARGET")
|
||||
if [ "$want" != "$got" ]; then
|
||||
log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
cat /dev/null <<EOF
|
||||
------------------------------------------------------------------------
|
||||
End of functions from https://github.com/client9/shlib
|
||||
------------------------------------------------------------------------
|
||||
EOF
|
||||
|
||||
PROJECT_NAME="golangci-lint"
|
||||
OWNER=golangci
|
||||
REPO="golangci-lint"
|
||||
BINARY=golangci-lint
|
||||
FORMAT=tar.gz
|
||||
OS=$(uname_os)
|
||||
ARCH=$(uname_arch)
|
||||
PREFIX="$OWNER/$REPO"
|
||||
|
||||
# use in logging routines
|
||||
log_prefix() {
|
||||
echo "$PREFIX"
|
||||
}
|
||||
PLATFORM="${OS}/${ARCH}"
|
||||
GITHUB_DOWNLOAD=https://github.com/${OWNER}/${REPO}/releases/download
|
||||
|
||||
uname_os_check "$OS"
|
||||
uname_arch_check "$ARCH"
|
||||
|
||||
parse_args "$@"
|
||||
|
||||
check_platform
|
||||
|
||||
tag_to_version
|
||||
|
||||
adjust_format
|
||||
|
||||
adjust_os
|
||||
|
||||
adjust_arch
|
||||
|
||||
log_info "found version: ${VERSION} for ${TAG}/${OS}/${ARCH}"
|
||||
|
||||
NAME=${BINARY}-${VERSION}-${OS}-${ARCH}
|
||||
TARBALL=${NAME}.${FORMAT}
|
||||
TARBALL_URL=${GITHUB_DOWNLOAD}/${TAG}/${TARBALL}
|
||||
CHECKSUM=${PROJECT_NAME}-${VERSION}-checksums.txt
|
||||
CHECKSUM_URL=${GITHUB_DOWNLOAD}/${TAG}/${CHECKSUM}
|
||||
|
||||
|
||||
execute
|
||||
|
|
@ -14,22 +14,24 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
#!/bin/bash
|
||||
set -e -o pipefail
|
||||
|
||||
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
install_gometalinter() {
|
||||
echo "Installing gometalinter.v2"
|
||||
go get -u gopkg.in/alecthomas/gometalinter.v2
|
||||
gometalinter.v2 --install
|
||||
}
|
||||
|
||||
if ! [ -x "$(command -v gometalinter.v2)" ]; then
|
||||
install_gometalinter
|
||||
if ! [ -x "$(command -v golangci-lint)" ]; then
|
||||
echo "Installing GolangCI-Lint"
|
||||
${DIR}/install_golint.sh -b $GOPATH/bin v1.9.3
|
||||
fi
|
||||
|
||||
gometalinter.v2 \
|
||||
${GOMETALINTER_OPTS:--deadine 5m} \
|
||||
--config $SCRIPTDIR/gometalinter.json ./...
|
||||
golangci-lint run \
|
||||
--no-config \
|
||||
-E goconst \
|
||||
-E goimports \
|
||||
-E golint \
|
||||
-E interfacer \
|
||||
-E maligned \
|
||||
-E misspell \
|
||||
-E unconvert \
|
||||
-E unparam \
|
||||
-D errcheck \
|
||||
-D gas
|
||||
|
|
@ -71,7 +71,6 @@ func initGCPConfig() *gcpConfig {
|
|||
}
|
||||
|
||||
const (
|
||||
ubuntuImage = "ubuntu"
|
||||
daemonPrefix = "daemon://"
|
||||
dockerfilesPath = "dockerfiles"
|
||||
emptyContainerDiff = `[
|
||||
|
|
@ -240,30 +239,27 @@ func TestLayers(t *testing.T) {
|
|||
kanikoImage := GetKanikoImage(config.imageRepo, dockerfile)
|
||||
pullCmd := exec.Command("docker", "pull", kanikoImage)
|
||||
RunCommand(pullCmd, t)
|
||||
if err := checkLayers(t, dockerImage, kanikoImage, offset[dockerfile]); err != nil {
|
||||
t.Error(err)
|
||||
t.Fail()
|
||||
}
|
||||
checkLayers(t, dockerImage, kanikoImage, offset[dockerfile])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func checkLayers(t *testing.T, image1, image2 string, offset int) error {
|
||||
func checkLayers(t *testing.T, image1, image2 string, offset int) {
|
||||
t.Helper()
|
||||
img1, err := getImageDetails(image1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't get details from image reference for (%s): %s", image1, err)
|
||||
t.Fatalf("Couldn't get details from image reference for (%s): %s", image1, err)
|
||||
}
|
||||
|
||||
img2, err := getImageDetails(image2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Couldn't get details from image reference for (%s): %s", image2, err)
|
||||
t.Fatalf("Couldn't get details from image reference for (%s): %s", image2, err)
|
||||
}
|
||||
|
||||
actualOffset := int(math.Abs(float64(img1.numLayers - img2.numLayers)))
|
||||
if actualOffset != offset {
|
||||
return fmt.Errorf("Difference in number of layers in each image is %d but should be %d. Image 1: %s, Image 2: %s", actualOffset, offset, img1, img2)
|
||||
t.Fatalf("Difference in number of layers in each image is %d but should be %d. Image 1: %s, Image 2: %s", actualOffset, offset, img1, img2)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getImageDetails(image string) (*imageDetails, error) {
|
||||
|
|
|
|||
|
|
@ -18,8 +18,9 @@ package buildcontext
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
)
|
||||
|
||||
// BuildContext unifies calls to download and unpack the build context.
|
||||
|
|
|
|||
|
|
@ -17,14 +17,15 @@ limitations under the License.
|
|||
package buildcontext
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// S3 unifies calls to download and unpack the build context.
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import (
|
|||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type ShellCommand struct {
|
||||
|
|
@ -29,13 +28,7 @@ type ShellCommand struct {
|
|||
|
||||
// ExecuteCommand handles command processing similar to CMD and RUN,
|
||||
func (s *ShellCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
|
||||
logrus.Info("cmd: SHELL")
|
||||
var newShell []string
|
||||
|
||||
newShell = s.cmd.Shell
|
||||
|
||||
logrus.Infof("Replacing Shell in config with %v", newShell)
|
||||
config.Shell = newShell
|
||||
config.Shell = s.cmd.Shell
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,57 +27,57 @@ import (
|
|||
|
||||
var userTests = []struct {
|
||||
user string
|
||||
expectedUid string
|
||||
expectedUID string
|
||||
shouldError bool
|
||||
}{
|
||||
{
|
||||
user: "root",
|
||||
expectedUid: "root",
|
||||
expectedUID: "root",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "0",
|
||||
expectedUid: "0",
|
||||
expectedUID: "0",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "fakeUser",
|
||||
expectedUid: "",
|
||||
expectedUID: "",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
user: "root:root",
|
||||
expectedUid: "root:root",
|
||||
expectedUID: "root:root",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "0:root",
|
||||
expectedUid: "0:root",
|
||||
expectedUID: "0:root",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "root:0",
|
||||
expectedUid: "root:0",
|
||||
expectedUID: "root:0",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "0:0",
|
||||
expectedUid: "0:0",
|
||||
expectedUID: "0:0",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "root:fakeGroup",
|
||||
expectedUid: "",
|
||||
expectedUID: "",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
user: "$envuser",
|
||||
expectedUid: "root",
|
||||
expectedUID: "root",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "root:$envgroup",
|
||||
expectedUid: "root:root",
|
||||
expectedUID: "root:root",
|
||||
shouldError: false,
|
||||
},
|
||||
}
|
||||
|
|
@ -97,6 +97,6 @@ func TestUpdateUser(t *testing.T) {
|
|||
}
|
||||
buildArgs := dockerfile.NewBuildArgs([]string{})
|
||||
err := cmd.ExecuteCommand(cfg, buildArgs)
|
||||
testutil.CheckErrorAndDeepEqual(t, test.shouldError, err, test.expectedUid, cfg.User)
|
||||
testutil.CheckErrorAndDeepEqual(t, test.shouldError, err, test.expectedUID, cfg.User)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,16 +19,16 @@ package config
|
|||
// KanikoOptions are options that are set by command line arguments
|
||||
type KanikoOptions struct {
|
||||
DockerfilePath string
|
||||
Destinations multiArg
|
||||
SrcContext string
|
||||
SnapshotMode string
|
||||
Bucket string
|
||||
InsecurePush bool
|
||||
SkipTlsVerify bool
|
||||
BuildArgs multiArg
|
||||
TarPath string
|
||||
Target string
|
||||
Destinations multiArg
|
||||
BuildArgs multiArg
|
||||
InsecurePush bool
|
||||
SkipTLSVerify bool
|
||||
SingleSnapshot bool
|
||||
Reproducible bool
|
||||
Target string
|
||||
NoPush bool
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,8 +21,8 @@ import "github.com/moby/buildkit/frontend/dockerfile/instructions"
|
|||
// KanikoStage wraps a stage of the Dockerfile and provides extra information
|
||||
type KanikoStage struct {
|
||||
instructions.Stage
|
||||
BaseImageIndex int
|
||||
Final bool
|
||||
BaseImageStoredLocally bool
|
||||
BaseImageIndex int
|
||||
SaveStage bool
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,8 +17,9 @@ limitations under the License.
|
|||
package dockerfile
|
||||
|
||||
import (
|
||||
d "github.com/docker/docker/builder/dockerfile"
|
||||
"strings"
|
||||
|
||||
d "github.com/docker/docker/builder/dockerfile"
|
||||
)
|
||||
|
||||
type BuildArgs struct {
|
||||
|
|
@ -36,14 +37,14 @@ func NewBuildArgs(args []string) *BuildArgs {
|
|||
}
|
||||
}
|
||||
return &BuildArgs{
|
||||
*d.NewBuildArgs(argsFromOptions),
|
||||
BuildArgs: *d.NewBuildArgs(argsFromOptions),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BuildArgs) Clone() *BuildArgs {
|
||||
clone := b.BuildArgs.Clone()
|
||||
return &BuildArgs{
|
||||
*clone,
|
||||
BuildArgs: *clone,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -54,8 +54,8 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) {
|
|||
stage.Name = resolvedBaseName
|
||||
kanikoStages = append(kanikoStages, config.KanikoStage{
|
||||
Stage: stage,
|
||||
BaseImageIndex: baseImageIndex(opts, index, stages),
|
||||
BaseImageStoredLocally: (baseImageIndex(opts, index, stages) != -1),
|
||||
BaseImageIndex: baseImageIndex(index, stages),
|
||||
BaseImageStoredLocally: (baseImageIndex(index, stages) != -1),
|
||||
SaveStage: saveStage(index, stages),
|
||||
Final: index == targetStage,
|
||||
})
|
||||
|
|
@ -68,7 +68,7 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) {
|
|||
|
||||
// baseImageIndex returns the index of the stage the current stage is built off
|
||||
// returns -1 if the current stage isn't built off a previous stage
|
||||
func baseImageIndex(opts *config.KanikoOptions, currentStage int, stages []instructions.Stage) int {
|
||||
func baseImageIndex(currentStage int, stages []instructions.Stage) int {
|
||||
for i, stage := range stages {
|
||||
if i > currentStage {
|
||||
break
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import (
|
|||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||
)
|
||||
|
|
@ -184,7 +183,7 @@ func Test_baseImageIndex(t *testing.T) {
|
|||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
actual := baseImageIndex(&config.KanikoOptions{}, test.currentStage, stages)
|
||||
actual := baseImageIndex(test.currentStage, stages)
|
||||
if actual != test.expected {
|
||||
t.Fatalf("unexpected result, expected %d got %d", test.expected, actual)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,10 +43,10 @@ import (
|
|||
|
||||
// stageBuilder contains all fields necessary to build one stage of a Dockerfile
|
||||
type stageBuilder struct {
|
||||
stage config.KanikoStage
|
||||
image v1.Image
|
||||
cf *v1.ConfigFile
|
||||
*snapshot.Snapshotter
|
||||
stage config.KanikoStage
|
||||
image v1.Image
|
||||
cf *v1.ConfigFile
|
||||
snapshotter *snapshot.Snapshotter
|
||||
baseImageDigest string
|
||||
}
|
||||
|
||||
|
|
@ -67,7 +67,7 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*sta
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l := snapshot.NewLayeredMap(hasher)
|
||||
l := snapshot.NewLayeredMap(hasher, util.CacheHasher())
|
||||
snapshotter := snapshot.NewSnapshotter(l, constants.RootDir)
|
||||
|
||||
digest, err := sourceImage.Digest()
|
||||
|
|
@ -78,7 +78,7 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*sta
|
|||
stage: stage,
|
||||
image: sourceImage,
|
||||
cf: imageConfig,
|
||||
Snapshotter: snapshotter,
|
||||
snapshotter: snapshotter,
|
||||
baseImageDigest: digest.String(),
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -101,7 +101,7 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
|
|||
return err
|
||||
}
|
||||
// Take initial snapshot
|
||||
if err := s.Snapshotter.Init(); err != nil {
|
||||
if err := s.snapshotter.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
args := dockerfile.NewBuildArgs(opts.BuildArgs)
|
||||
|
|
@ -114,6 +114,9 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
|
|||
if command == nil {
|
||||
continue
|
||||
}
|
||||
if err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Info(command.String())
|
||||
if err := command.ExecuteCommand(&s.cf.Config, args); err != nil {
|
||||
return err
|
||||
|
|
@ -126,23 +129,23 @@ func (s *stageBuilder) build(opts *config.KanikoOptions) error {
|
|||
// by previous commands.
|
||||
if !s.stage.Final {
|
||||
if finalCmd {
|
||||
contents, err = s.Snapshotter.TakeSnapshotFS()
|
||||
contents, err = s.snapshotter.TakeSnapshotFS()
|
||||
}
|
||||
} else {
|
||||
// If we are in single snapshot mode, we only take a snapshot once, after all
|
||||
// commands have completed.
|
||||
if opts.SingleSnapshot {
|
||||
if finalCmd {
|
||||
contents, err = s.Snapshotter.TakeSnapshotFS()
|
||||
contents, err = s.snapshotter.TakeSnapshotFS()
|
||||
}
|
||||
} else {
|
||||
// Otherwise, in the final stage we take a snapshot at each command. If we know
|
||||
// the files that were changed, we'll snapshot those explicitly, otherwise we'll
|
||||
// check if anything in the filesystem changed.
|
||||
if files != nil {
|
||||
contents, err = s.Snapshotter.TakeSnapshot(files)
|
||||
contents, err = s.snapshotter.TakeSnapshot(files)
|
||||
} else {
|
||||
contents, err = s.Snapshotter.TakeSnapshotFS()
|
||||
contents, err = s.snapshotter.TakeSnapshotFS()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ func DoPush(image v1.Image, opts *config.KanikoOptions) error {
|
|||
|
||||
// Create a transport to set our user-agent.
|
||||
tr := http.DefaultTransport
|
||||
if opts.SkipTlsVerify {
|
||||
if opts.SkipTLSVerify {
|
||||
tr.(*http.Transport).TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,20 +17,28 @@ limitations under the License.
|
|||
package snapshot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||
)
|
||||
|
||||
type LayeredMap struct {
|
||||
layers []map[string]string
|
||||
whiteouts []map[string]string
|
||||
added []map[string]string
|
||||
hasher func(string) (string, error)
|
||||
// cacheHasher doesn't include mtime in it's hash so that filesystem cache keys are stable
|
||||
cacheHasher func(string) (string, error)
|
||||
}
|
||||
|
||||
func NewLayeredMap(h func(string) (string, error)) *LayeredMap {
|
||||
func NewLayeredMap(h func(string) (string, error), c func(string) (string, error)) *LayeredMap {
|
||||
l := LayeredMap{
|
||||
hasher: h,
|
||||
hasher: h,
|
||||
cacheHasher: c,
|
||||
}
|
||||
l.layers = []map[string]string{}
|
||||
return &l
|
||||
|
|
@ -39,8 +47,18 @@ func NewLayeredMap(h func(string) (string, error)) *LayeredMap {
|
|||
func (l *LayeredMap) Snapshot() {
|
||||
l.whiteouts = append(l.whiteouts, map[string]string{})
|
||||
l.layers = append(l.layers, map[string]string{})
|
||||
l.added = append(l.added, map[string]string{})
|
||||
}
|
||||
|
||||
// Key returns a hash for added files
|
||||
func (l *LayeredMap) Key() (string, error) {
|
||||
c := bytes.NewBuffer([]byte{})
|
||||
enc := json.NewEncoder(c)
|
||||
enc.Encode(l.added)
|
||||
return util.SHA256(c)
|
||||
}
|
||||
|
||||
// GetFlattenedPathsForWhiteOut returns all paths in the current FS
|
||||
func (l *LayeredMap) GetFlattenedPathsForWhiteOut() map[string]struct{} {
|
||||
paths := map[string]struct{}{}
|
||||
for _, l := range l.layers {
|
||||
|
|
@ -85,11 +103,18 @@ func (l *LayeredMap) MaybeAddWhiteout(s string) (bool, error) {
|
|||
|
||||
// Add will add the specified file s to the layered map.
|
||||
func (l *LayeredMap) Add(s string) error {
|
||||
// Use hash function and add to layers
|
||||
newV, err := l.hasher(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating hash for %s: %s", s, err)
|
||||
return fmt.Errorf("Error creating hash for %s: %v", s, err)
|
||||
}
|
||||
l.layers[len(l.layers)-1][s] = newV
|
||||
// Use cache hash function and add to added
|
||||
cacheV, err := l.cacheHasher(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating cache hash for %s: %v", s, err)
|
||||
}
|
||||
l.added[len(l.added)-1][s] = cacheV
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_CacheKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
map1 map[string]string
|
||||
map2 map[string]string
|
||||
equal bool
|
||||
}{
|
||||
{
|
||||
name: "maps are the same",
|
||||
map1: map[string]string{
|
||||
"a": "apple",
|
||||
"b": "bat",
|
||||
"c": "cat",
|
||||
"d": "dog",
|
||||
"e": "egg",
|
||||
},
|
||||
map2: map[string]string{
|
||||
"c": "cat",
|
||||
"d": "dog",
|
||||
"b": "bat",
|
||||
"a": "apple",
|
||||
"e": "egg",
|
||||
},
|
||||
equal: true,
|
||||
},
|
||||
{
|
||||
name: "maps are different",
|
||||
map1: map[string]string{
|
||||
"a": "apple",
|
||||
"b": "bat",
|
||||
"c": "cat",
|
||||
},
|
||||
map2: map[string]string{
|
||||
"c": "",
|
||||
"b": "bat",
|
||||
"a": "apple",
|
||||
},
|
||||
equal: false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
lm1 := LayeredMap{added: []map[string]string{test.map1}}
|
||||
lm2 := LayeredMap{added: []map[string]string{test.map2}}
|
||||
k1, err := lm1.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("error getting key for map 1: %v", err)
|
||||
}
|
||||
k2, err := lm2.Key()
|
||||
if err != nil {
|
||||
t.Fatalf("error getting key for map 2: %v", err)
|
||||
}
|
||||
if test.equal != (k1 == k2) {
|
||||
t.Fatalf("unexpected result: \nExpected\n%s\nActual\n%s\n", k1, k2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -49,6 +49,11 @@ func (s *Snapshotter) Init() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Key returns a string based on the current state of the file system
|
||||
func (s *Snapshotter) Key() (string, error) {
|
||||
return s.l.Key()
|
||||
}
|
||||
|
||||
// TakeSnapshot takes a snapshot of the specified files, avoiding directories in the whitelist, and creates
|
||||
// a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed
|
||||
func (s *Snapshotter) TakeSnapshot(files []string) ([]byte, error) {
|
||||
|
|
@ -102,7 +107,8 @@ func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) {
|
|||
logrus.Info("No files changed in this command, skipping snapshotting.")
|
||||
return false, nil
|
||||
}
|
||||
logrus.Infof("Taking snapshot of files %v...", files)
|
||||
logrus.Info("Taking snapshot of files...")
|
||||
logrus.Debugf("Taking snapshot of files %v", files)
|
||||
snapshottedFiles := make(map[string]bool)
|
||||
filesAdded := false
|
||||
|
||||
|
|
|
|||
|
|
@ -198,7 +198,7 @@ func setUpTestDir() (string, *Snapshotter, error) {
|
|||
}
|
||||
|
||||
// Take the initial snapshot
|
||||
l := NewLayeredMap(util.Hasher())
|
||||
l := NewLayeredMap(util.Hasher(), util.CacheHasher())
|
||||
snapshotter := NewSnapshotter(l, testDir)
|
||||
if err := snapshotter.Init(); err != nil {
|
||||
return testDir, nil, errors.Wrap(err, "initializing snapshotter")
|
||||
|
|
|
|||
|
|
@ -17,8 +17,9 @@ limitations under the License.
|
|||
package util
|
||||
|
||||
import (
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
"strings"
|
||||
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
)
|
||||
|
||||
func GetBucketAndItem(context string) (string, string) {
|
||||
|
|
|
|||
|
|
@ -228,10 +228,7 @@ func IsSrcRemoteFileURL(rawurl string) bool {
|
|||
return false
|
||||
}
|
||||
_, err = http.Get(rawurl)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func UpdateConfigEnv(newEnvs []instructions.KeyValuePair, config *v1.Config, replacementEnvs []string) error {
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||
)
|
||||
|
||||
var testUrl = "https://github.com/GoogleContainerTools/runtimes-common/blob/master/LICENSE"
|
||||
var testURL = "https://github.com/GoogleContainerTools/runtimes-common/blob/master/LICENSE"
|
||||
|
||||
var testEnvReplacement = []struct {
|
||||
path string
|
||||
|
|
@ -220,7 +220,7 @@ var matchSourcesTests = []struct {
|
|||
srcs: []string{
|
||||
"pkg/*",
|
||||
"/root/dir?",
|
||||
testUrl,
|
||||
testURL,
|
||||
},
|
||||
files: []string{
|
||||
"pkg/a",
|
||||
|
|
@ -234,7 +234,7 @@ var matchSourcesTests = []struct {
|
|||
"/root/dir1",
|
||||
"pkg/a",
|
||||
"pkg/b",
|
||||
testUrl,
|
||||
testURL,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -344,11 +344,11 @@ var isSrcValidTests = []struct {
|
|||
},
|
||||
{
|
||||
srcsAndDest: []string{
|
||||
testUrl,
|
||||
testURL,
|
||||
"dest",
|
||||
},
|
||||
resolvedSources: []string{
|
||||
testUrl,
|
||||
testURL,
|
||||
},
|
||||
shouldErr: false,
|
||||
},
|
||||
|
|
@ -369,13 +369,13 @@ var testResolveSources = []struct {
|
|||
srcsAndDest: []string{
|
||||
"context/foo",
|
||||
"context/b*",
|
||||
testUrl,
|
||||
testURL,
|
||||
"dest/",
|
||||
},
|
||||
expectedList: []string{
|
||||
"context/foo",
|
||||
"context/bar",
|
||||
testUrl,
|
||||
testURL,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,6 +40,9 @@ var whitelist = []string{
|
|||
// which leads to a special mount on the /var/run/docker.sock file itself, but the directory to exist
|
||||
// in the image with no way to tell if it came from the base image or not.
|
||||
"/var/run",
|
||||
// similarly, we whitelist /etc/mtab, since there is no way to know if the file was mounted or came
|
||||
// from the base image
|
||||
"/etc/mtab",
|
||||
}
|
||||
var volumeWhitelist = []string{}
|
||||
|
||||
|
|
@ -109,7 +112,7 @@ func GetFSFromImage(root string, img v1.Image) error {
|
|||
// DeleteFilesystem deletes the extracted image file system
|
||||
func DeleteFilesystem() error {
|
||||
logrus.Info("Deleting filesystem...")
|
||||
err := filepath.Walk(constants.RootDir, func(path string, info os.FileInfo, err error) error {
|
||||
return filepath.Walk(constants.RootDir, func(path string, info os.FileInfo, _ error) error {
|
||||
whitelisted, err := CheckWhitelist(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -123,7 +126,6 @@ func DeleteFilesystem() error {
|
|||
}
|
||||
return os.RemoveAll(path)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// ChildDirInWhitelist returns true if there is a child file or directory of the path in the whitelist
|
||||
|
|
@ -195,7 +197,6 @@ func extractFile(dest string, hdr *tar.Header, tr io.Reader) error {
|
|||
return err
|
||||
}
|
||||
currFile.Close()
|
||||
|
||||
case tar.TypeDir:
|
||||
logrus.Debugf("creating dir %s", path)
|
||||
if err := os.MkdirAll(path, mode); err != nil {
|
||||
|
|
@ -310,6 +311,9 @@ func RelativeFiles(fp string, root string) ([]string, error) {
|
|||
fullPath := filepath.Join(root, fp)
|
||||
logrus.Debugf("Getting files and contents at root %s", fullPath)
|
||||
err := filepath.Walk(fullPath, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
whitelisted, err := CheckWhitelist(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ func Test_fileSystemWhitelist(t *testing.T) {
|
|||
}
|
||||
|
||||
actualWhitelist, err := fileSystemWhitelist(path)
|
||||
expectedWhitelist := []string{"/kaniko", "/proc", "/dev", "/dev/pts", "/sys", "/var/run"}
|
||||
expectedWhitelist := []string{"/kaniko", "/proc", "/dev", "/dev/pts", "/sys", "/var/run", "/etc/mtab"}
|
||||
sort.Strings(actualWhitelist)
|
||||
sort.Strings(expectedWhitelist)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, err, expectedWhitelist, actualWhitelist)
|
||||
|
|
@ -343,8 +343,8 @@ func filesAreHardlinks(first, second string) checker {
|
|||
if err != nil {
|
||||
t.Fatalf("error getting file %s", second)
|
||||
}
|
||||
stat1 := getSyscallStat_t(fi1)
|
||||
stat2 := getSyscallStat_t(fi2)
|
||||
stat1 := getSyscallStatT(fi1)
|
||||
stat2 := getSyscallStatT(fi2)
|
||||
if stat1.Ino != stat2.Ino {
|
||||
t.Errorf("%s and %s aren't hardlinks as they dont' have the same inode", first, second)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -61,7 +62,7 @@ func RetrieveSourceImage(stage config.KanikoStage, buildArgs []string) (v1.Image
|
|||
}
|
||||
|
||||
// RetrieveConfigFile returns the config file for an image
|
||||
func RetrieveConfigFile(sourceImage v1.Image) (*v1.ConfigFile, error) {
|
||||
func RetrieveConfigFile(sourceImage partial.WithConfigFile) (*v1.ConfigFile, error) {
|
||||
imageConfig, err := sourceImage.ConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ func (t *Tar) Whiteout(p string) error {
|
|||
func (t *Tar) checkHardlink(p string, i os.FileInfo) (bool, string) {
|
||||
hardlink := false
|
||||
linkDst := ""
|
||||
stat := getSyscallStat_t(i)
|
||||
stat := getSyscallStatT(i)
|
||||
if stat != nil {
|
||||
nlinks := stat.Nlink
|
||||
if nlinks > 1 {
|
||||
|
|
@ -135,7 +135,7 @@ func (t *Tar) checkHardlink(p string, i os.FileInfo) (bool, string) {
|
|||
return hardlink, linkDst
|
||||
}
|
||||
|
||||
func getSyscallStat_t(i os.FileInfo) *syscall.Stat_t {
|
||||
func getSyscallStatT(i os.FileInfo) *syscall.Stat_t {
|
||||
if sys := i.Sys(); sys != nil {
|
||||
if stat, ok := sys.(*syscall.Stat_t); ok {
|
||||
return stat
|
||||
|
|
@ -195,10 +195,10 @@ func fileIsCompressedTar(src string) (bool, archive.Compression) {
|
|||
|
||||
func fileIsUncompressedTar(src string) bool {
|
||||
r, err := os.Open(src)
|
||||
defer r.Close()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer r.Close()
|
||||
fi, err := os.Lstat(src)
|
||||
if err != nil {
|
||||
return false
|
||||
|
|
@ -210,13 +210,8 @@ func fileIsUncompressedTar(src string) bool {
|
|||
if tr == nil {
|
||||
return false
|
||||
}
|
||||
for {
|
||||
_, err := tr.Next()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
_, err = tr.Next()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// UnpackCompressedTar unpacks the compressed tar at path to dir
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package util
|
|||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"os"
|
||||
|
|
@ -72,6 +73,36 @@ func Hasher() func(string) (string, error) {
|
|||
return hasher
|
||||
}
|
||||
|
||||
// CacheHasher takes into account everything the regular hasher does except for mtime
|
||||
func CacheHasher() func(string) (string, error) {
|
||||
hasher := func(p string) (string, error) {
|
||||
h := md5.New()
|
||||
fi, err := os.Lstat(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
h.Write([]byte(fi.Mode().String()))
|
||||
|
||||
h.Write([]byte(strconv.FormatUint(uint64(fi.Sys().(*syscall.Stat_t).Uid), 36)))
|
||||
h.Write([]byte(","))
|
||||
h.Write([]byte(strconv.FormatUint(uint64(fi.Sys().(*syscall.Stat_t).Gid), 36)))
|
||||
|
||||
if fi.Mode().IsRegular() {
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
return hasher
|
||||
}
|
||||
|
||||
// MtimeHasher returns a hash function, which only looks at mtime to determine if a file has changed.
|
||||
// Note that the mtime can lag, so it's possible that a file will have changed but the mtime may look the same.
|
||||
func MtimeHasher() func(string) (string, error) {
|
||||
|
|
@ -86,3 +117,13 @@ func MtimeHasher() func(string) (string, error) {
|
|||
}
|
||||
return hasher
|
||||
}
|
||||
|
||||
// SHA256 returns the shasum of the contents of r
|
||||
func SHA256(r io.Reader) (string, error) {
|
||||
hasher := sha256.New()
|
||||
_, err := io.Copy(hasher, r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), nil
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue