Replace gometalinter with GolangCI-Lint

gometalinter is broken @ HEAD, and I looked into why that was. During
that process, I remembered that we took the linting scripts from
skaffold, and found that in skaffold gometalinter was replaced with
GolangCI-Lint:

https://github.com/GoogleContainerTools/skaffold/pull/619

The change made linting in skaffold faster, so I figured instead of
fixing gometalinter it made more sense to remove it and replace it with
GolangCI-Lint for kaniko as well.
This commit is contained in:
Priya Wadhwa 2018-09-11 10:31:20 -07:00
parent 06defa6552
commit 99ab68e7f4
19 changed files with 466 additions and 100 deletions

View File

@ -86,7 +86,7 @@ func addKanikoOptionsFlags(cmd *cobra.Command) {
RootCmd.PersistentFlags().StringVarP(&opts.SnapshotMode, "snapshotMode", "", "full", "Change the file attributes inspected during snapshotting")
RootCmd.PersistentFlags().VarP(&opts.BuildArgs, "build-arg", "", "This flag allows you to pass in ARG values at build time. Set it repeatedly for multiple values.")
RootCmd.PersistentFlags().BoolVarP(&opts.InsecurePush, "insecure", "", false, "Push to insecure registry using plain HTTP")
RootCmd.PersistentFlags().BoolVarP(&opts.SkipTlsVerify, "skip-tls-verify", "", false, "Push to insecure registry ignoring TLS verify")
RootCmd.PersistentFlags().BoolVarP(&opts.SkipTLSVerify, "skip-tls-verify", "", false, "Push to insecure registry ignoring TLS verify")
RootCmd.PersistentFlags().StringVarP(&opts.TarPath, "tarPath", "", "", "Path to save the image in as a tarball instead of pushing")
RootCmd.PersistentFlags().BoolVarP(&opts.SingleSnapshot, "single-snapshot", "", false, "Take a single snapshot at the end of the build.")
RootCmd.PersistentFlags().BoolVarP(&opts.Reproducible, "reproducible", "", false, "Strip timestamps out of the image to make it reproducible")
@ -145,7 +145,7 @@ func resolveSourceContext() error {
opts.SrcContext = opts.Bucket
}
}
// if no prefix use Google Cloud Storage as default for backwards compability
// if no prefix use Google Cloud Storage as default for backwards compatibility
contextExecutor, err := buildcontext.GetBuildContext(opts.SrcContext)
if err != nil {
return err

View File

@ -18,12 +18,14 @@ from __future__ import print_function
import argparse
import glob
import json
import mmap
import os
import re
import sys
SKIPPED_DIRS = ["Godeps", "third_party", ".git", "vendor", "examples", "testdata"]
SKIPPED_FILES = ["install_golint.sh"]
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
@ -71,7 +73,7 @@ def file_passes(filename, refs, regexs):
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
elif extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
@ -105,17 +107,11 @@ def file_passes(filename, refs, regexs):
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '.git', "vendor", "differs/testDirs/pipTests"]
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
for i, pathname in enumerate(files):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
newfiles.append(os.path.join(args.rootdir, pathname))
return newfiles
def get_files(extensions):
@ -124,17 +120,14 @@ def get_files(extensions):
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
for d in SKIPPED_DIRS:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
if name not in SKIPPED_FILES:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []

View File

@ -1,17 +0,0 @@
{
"Vendor": true,
"EnableGC": true,
"Debug": false,
"Sort": ["linter", "severity", "path"],
"Enable": [
"deadcode",
"gofmt",
"golint",
"gosimple",
"ineffassign",
"vet"
],
"LineLength": 200
}

388
hack/install_golint.sh Executable file
View File

@ -0,0 +1,388 @@
#!/bin/sh
set -e
# Code generated by godownloader on 2018-06-05T12:04:55Z. DO NOT EDIT.
#
usage() {
this=$1
cat <<EOF
$this: download go binaries for golangci/golangci-lint
Usage: $this [-b] bindir [-d] [tag]
-b sets bindir or installation directory, Defaults to ./bin
-d turns on debug logging
[tag] is a tag from
https://github.com/golangci/golangci-lint/releases
If tag is missing, then the latest will be used.
Generated by godownloader
https://github.com/goreleaser/godownloader
EOF
exit 2
}
parse_args() {
#BINDIR is ./bin unless set be ENV
# over-ridden by flag below
BINDIR=${BINDIR:-./bin}
while getopts "b:dh?" arg; do
case "$arg" in
b) BINDIR="$OPTARG" ;;
d) log_set_priority 10 ;;
h | \?) usage "$0" ;;
esac
done
shift $((OPTIND - 1))
TAG=$1
}
# this function wraps all the destructive operations
# if a curl|bash cuts off the end of the script due to
# network, either nothing will happen or will syntax error
# out preventing half-done work
execute() {
tmpdir=$(mktmpdir)
log_debug "downloading files into ${tmpdir}"
http_download "${tmpdir}/${TARBALL}" "${TARBALL_URL}"
http_download "${tmpdir}/${CHECKSUM}" "${CHECKSUM_URL}"
hash_sha256_verify "${tmpdir}/${TARBALL}" "${tmpdir}/${CHECKSUM}"
srcdir="${tmpdir}/${NAME}"
rm -rf "${srcdir}"
(cd "${tmpdir}" && untar "${TARBALL}")
install -d "${BINDIR}"
for binexe in "golangci-lint" ; do
if [ "$OS" = "windows" ]; then
binexe="${binexe}.exe"
fi
install "${srcdir}/${binexe}" "${BINDIR}/"
log_info "installed ${BINDIR}/${binexe}"
done
}
is_supported_platform() {
platform=$1
found=1
case "$platform" in
darwin/amd64) found=0 ;;
darwin/386) found=0 ;;
windows/amd64) found=0 ;;
windows/386) found=0 ;;
linux/amd64) found=0 ;;
linux/386) found=0 ;;
esac
return $found
}
check_platform() {
if is_supported_platform "$PLATFORM"; then
# optional logging goes here
true
else
log_crit "platform $PLATFORM is not supported. Make sure this script is up-to-date and file request at https://github.com/${PREFIX}/issues/new"
exit 1
fi
}
tag_to_version() {
if [ -z "${TAG}" ]; then
log_info "checking GitHub for latest tag"
else
log_info "checking GitHub for tag '${TAG}'"
fi
REALTAG=$(github_release "$OWNER/$REPO" "${TAG}") && true
if test -z "$REALTAG"; then
log_crit "unable to find '${TAG}' - use 'latest' or see https://github.com/${PREFIX}/releases for details"
exit 1
fi
# if version starts with 'v', remove it
TAG="$REALTAG"
VERSION=${TAG#v}
}
adjust_format() {
# change format (tar.gz or zip) based on ARCH
case ${ARCH} in
windows) FORMAT=zip ;;
esac
true
}
adjust_os() {
# adjust archive name based on OS
true
}
adjust_arch() {
# adjust archive name based on ARCH
true
}
cat /dev/null <<EOF
------------------------------------------------------------------------
https://github.com/client9/shlib - portable posix shell functions
Public domain - http://unlicense.org
https://github.com/client9/shlib/blob/master/LICENSE.md
but credit (and pull requests) appreciated.
------------------------------------------------------------------------
EOF
is_command() {
command -v "$1" >/dev/null
}
echoerr() {
echo "$@" 1>&2
}
log_prefix() {
echo "$0"
}
_logp=6
log_set_priority() {
_logp="$1"
}
log_priority() {
if test -z "$1"; then
echo "$_logp"
return
fi
[ "$1" -le "$_logp" ]
}
log_tag() {
case $1 in
0) echo "emerg" ;;
1) echo "alert" ;;
2) echo "crit" ;;
3) echo "err" ;;
4) echo "warning" ;;
5) echo "notice" ;;
6) echo "info" ;;
7) echo "debug" ;;
*) echo "$1" ;;
esac
}
log_debug() {
log_priority 7 || return 0
echoerr "$(log_prefix)" "$(log_tag 7)" "$@"
}
log_info() {
log_priority 6 || return 0
echoerr "$(log_prefix)" "$(log_tag 6)" "$@"
}
log_err() {
log_priority 3 || return 0
echoerr "$(log_prefix)" "$(log_tag 3)" "$@"
}
log_crit() {
log_priority 2 || return 0
echoerr "$(log_prefix)" "$(log_tag 2)" "$@"
}
uname_os() {
os=$(uname -s | tr '[:upper:]' '[:lower:]')
case "$os" in
msys_nt) os="windows" ;;
esac
echo "$os"
}
uname_arch() {
arch=$(uname -m)
case $arch in
x86_64) arch="amd64" ;;
x86) arch="386" ;;
i686) arch="386" ;;
i386) arch="386" ;;
aarch64) arch="arm64" ;;
armv5*) arch="armv5" ;;
armv6*) arch="armv6" ;;
armv7*) arch="armv7" ;;
esac
echo ${arch}
}
uname_os_check() {
os=$(uname_os)
case "$os" in
darwin) return 0 ;;
dragonfly) return 0 ;;
freebsd) return 0 ;;
linux) return 0 ;;
android) return 0 ;;
nacl) return 0 ;;
netbsd) return 0 ;;
openbsd) return 0 ;;
plan9) return 0 ;;
solaris) return 0 ;;
windows) return 0 ;;
esac
log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib"
return 1
}
uname_arch_check() {
arch=$(uname_arch)
case "$arch" in
386) return 0 ;;
amd64) return 0 ;;
arm64) return 0 ;;
armv5) return 0 ;;
armv6) return 0 ;;
armv7) return 0 ;;
ppc64) return 0 ;;
ppc64le) return 0 ;;
mips) return 0 ;;
mipsle) return 0 ;;
mips64) return 0 ;;
mips64le) return 0 ;;
s390x) return 0 ;;
amd64p32) return 0 ;;
esac
log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib"
return 1
}
untar() {
tarball=$1
case "${tarball}" in
*.tar.gz | *.tgz) tar -xzf "${tarball}" ;;
*.tar) tar -xf "${tarball}" ;;
*.zip) unzip "${tarball}" ;;
*)
log_err "untar unknown archive format for ${tarball}"
return 1
;;
esac
}
mktmpdir() {
test -z "$TMPDIR" && TMPDIR="$(mktemp -d)"
mkdir -p "${TMPDIR}"
echo "${TMPDIR}"
}
http_download_curl() {
local_file=$1
source_url=$2
header=$3
if [ -z "$header" ]; then
code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url")
else
code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url")
fi
if [ "$code" != "200" ]; then
log_debug "http_download_curl received HTTP status $code"
return 1
fi
return 0
}
http_download_wget() {
local_file=$1
source_url=$2
header=$3
if [ -z "$header" ]; then
wget -q -O "$local_file" "$source_url"
else
wget -q --header "$header" -O "$local_file" "$source_url"
fi
}
http_download() {
log_debug "http_download $2"
if is_command curl; then
http_download_curl "$@"
return
elif is_command wget; then
http_download_wget "$@"
return
fi
log_crit "http_download unable to find wget or curl"
return 1
}
http_copy() {
tmp=$(mktemp)
http_download "${tmp}" "$1" "$2" || return 1
body=$(cat "$tmp")
rm -f "${tmp}"
echo "$body"
}
github_release() {
owner_repo=$1
version=$2
test -z "$version" && version="latest"
giturl="https://github.com/${owner_repo}/releases/${version}"
json=$(http_copy "$giturl" "Accept:application/json")
test -z "$json" && return 1
version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//')
test -z "$version" && return 1
echo "$version"
}
hash_sha256() {
TARGET=${1:-/dev/stdin}
if is_command gsha256sum; then
hash=$(gsha256sum "$TARGET") || return 1
echo "$hash" | cut -d ' ' -f 1
elif is_command sha256sum; then
hash=$(sha256sum "$TARGET") || return 1
echo "$hash" | cut -d ' ' -f 1
elif is_command shasum; then
hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1
echo "$hash" | cut -d ' ' -f 1
elif is_command openssl; then
hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1
echo "$hash" | cut -d ' ' -f a
else
log_crit "hash_sha256 unable to find command to compute sha-256 hash"
return 1
fi
}
hash_sha256_verify() {
TARGET=$1
checksums=$2
if [ -z "$checksums" ]; then
log_err "hash_sha256_verify checksum file not specified in arg2"
return 1
fi
BASENAME=${TARGET##*/}
want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1)
if [ -z "$want" ]; then
log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'"
return 1
fi
got=$(hash_sha256 "$TARGET")
if [ "$want" != "$got" ]; then
log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got"
return 1
fi
}
cat /dev/null <<EOF
------------------------------------------------------------------------
End of functions from https://github.com/client9/shlib
------------------------------------------------------------------------
EOF
PROJECT_NAME="golangci-lint"
OWNER=golangci
REPO="golangci-lint"
BINARY=golangci-lint
FORMAT=tar.gz
OS=$(uname_os)
ARCH=$(uname_arch)
PREFIX="$OWNER/$REPO"
# use in logging routines
log_prefix() {
echo "$PREFIX"
}
PLATFORM="${OS}/${ARCH}"
GITHUB_DOWNLOAD=https://github.com/${OWNER}/${REPO}/releases/download
uname_os_check "$OS"
uname_arch_check "$ARCH"
parse_args "$@"
check_platform
tag_to_version
adjust_format
adjust_os
adjust_arch
log_info "found version: ${VERSION} for ${TAG}/${OS}/${ARCH}"
NAME=${BINARY}-${VERSION}-${OS}-${ARCH}
TARBALL=${NAME}.${FORMAT}
TARBALL_URL=${GITHUB_DOWNLOAD}/${TAG}/${TARBALL}
CHECKSUM=${PROJECT_NAME}-${VERSION}-checksums.txt
CHECKSUM_URL=${GITHUB_DOWNLOAD}/${TAG}/${CHECKSUM}
execute

View File

@ -14,22 +14,24 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
set -e -o pipefail
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
install_gometalinter() {
echo "Installing gometalinter.v2"
go get -u gopkg.in/alecthomas/gometalinter.v2
gometalinter.v2 --install
}
if ! [ -x "$(command -v gometalinter.v2)" ]; then
install_gometalinter
if ! [ -x "$(command -v golangci-lint)" ]; then
echo "Installing GolangCI-Lint"
${DIR}/install_golint.sh -b $GOPATH/bin v1.9.3
fi
gometalinter.v2 \
${GOMETALINTER_OPTS:--deadine 5m} \
--config $SCRIPTDIR/gometalinter.json ./...
golangci-lint run \
--no-config \
-E goconst \
-E goimports \
-E golint \
-E interfacer \
-E maligned \
-E misspell \
-E unconvert \
-E unparam \
-D errcheck \
-D gas

View File

@ -71,7 +71,6 @@ func initGCPConfig() *gcpConfig {
}
const (
ubuntuImage = "ubuntu"
daemonPrefix = "daemon://"
dockerfilesPath = "dockerfiles"
emptyContainerDiff = `[
@ -240,30 +239,26 @@ func TestLayers(t *testing.T) {
kanikoImage := GetKanikoImage(config.imageRepo, dockerfile)
pullCmd := exec.Command("docker", "pull", kanikoImage)
RunCommand(pullCmd, t)
if err := checkLayers(t, dockerImage, kanikoImage, offset[dockerfile]); err != nil {
t.Error(err)
t.Fail()
}
checkLayers(t, dockerImage, kanikoImage, offset[dockerfile])
})
}
}
func checkLayers(t *testing.T, image1, image2 string, offset int) error {
func checkLayers(t *testing.T, image1, image2 string, offset int) {
img1, err := getImageDetails(image1)
if err != nil {
return fmt.Errorf("Couldn't get details from image reference for (%s): %s", image1, err)
t.Fatalf("Couldn't get details from image reference for (%s): %s", image1, err)
}
img2, err := getImageDetails(image2)
if err != nil {
return fmt.Errorf("Couldn't get details from image reference for (%s): %s", image2, err)
t.Fatalf("Couldn't get details from image reference for (%s): %s", image2, err)
}
actualOffset := int(math.Abs(float64(img1.numLayers - img2.numLayers)))
if actualOffset != offset {
return fmt.Errorf("Difference in number of layers in each image is %d but should be %d. Image 1: %s, Image 2: %s", actualOffset, offset, img1, img2)
t.Fatalf("Difference in number of layers in each image is %d but should be %d. Image 1: %s, Image 2: %s", actualOffset, offset, img1, img2)
}
return nil
}
func getImageDetails(image string) (*imageDetails, error) {

View File

@ -18,8 +18,9 @@ package buildcontext
import (
"errors"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
)
// BuildContext unifies calls to download and unpack the build context.

View File

@ -17,14 +17,15 @@ limitations under the License.
package buildcontext
import (
"os"
"path/filepath"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"os"
"path/filepath"
)
// S3 unifies calls to download and unpack the build context.

View File

@ -27,57 +27,57 @@ import (
var userTests = []struct {
user string
expectedUid string
expectedUID string
shouldError bool
}{
{
user: "root",
expectedUid: "root",
expectedUID: "root",
shouldError: false,
},
{
user: "0",
expectedUid: "0",
expectedUID: "0",
shouldError: false,
},
{
user: "fakeUser",
expectedUid: "",
expectedUID: "",
shouldError: true,
},
{
user: "root:root",
expectedUid: "root:root",
expectedUID: "root:root",
shouldError: false,
},
{
user: "0:root",
expectedUid: "0:root",
expectedUID: "0:root",
shouldError: false,
},
{
user: "root:0",
expectedUid: "root:0",
expectedUID: "root:0",
shouldError: false,
},
{
user: "0:0",
expectedUid: "0:0",
expectedUID: "0:0",
shouldError: false,
},
{
user: "root:fakeGroup",
expectedUid: "",
expectedUID: "",
shouldError: true,
},
{
user: "$envuser",
expectedUid: "root",
expectedUID: "root",
shouldError: false,
},
{
user: "root:$envgroup",
expectedUid: "root:root",
expectedUID: "root:root",
shouldError: false,
},
}
@ -97,6 +97,6 @@ func TestUpdateUser(t *testing.T) {
}
buildArgs := dockerfile.NewBuildArgs([]string{})
err := cmd.ExecuteCommand(cfg, buildArgs)
testutil.CheckErrorAndDeepEqual(t, test.shouldError, err, test.expectedUid, cfg.User)
testutil.CheckErrorAndDeepEqual(t, test.shouldError, err, test.expectedUID, cfg.User)
}
}

View File

@ -19,16 +19,16 @@ package config
// KanikoOptions are options that are set by command line arguments
type KanikoOptions struct {
DockerfilePath string
Destinations multiArg
SrcContext string
SnapshotMode string
Bucket string
InsecurePush bool
SkipTlsVerify bool
BuildArgs multiArg
TarPath string
Target string
Destinations multiArg
BuildArgs multiArg
InsecurePush bool
SkipTLSVerify bool
SingleSnapshot bool
Reproducible bool
Target string
NoPush bool
}

View File

@ -21,8 +21,8 @@ import "github.com/moby/buildkit/frontend/dockerfile/instructions"
// KanikoStage wraps a stage of the Dockerfile and provides extra information
type KanikoStage struct {
instructions.Stage
BaseImageIndex int
FinalStage bool
BaseImageStoredLocally bool
BaseImageIndex int
SaveStage bool
}

View File

@ -17,8 +17,9 @@ limitations under the License.
package dockerfile
import (
d "github.com/docker/docker/builder/dockerfile"
"strings"
d "github.com/docker/docker/builder/dockerfile"
)
type BuildArgs struct {
@ -36,14 +37,14 @@ func NewBuildArgs(args []string) *BuildArgs {
}
}
return &BuildArgs{
*d.NewBuildArgs(argsFromOptions),
BuildArgs: *d.NewBuildArgs(argsFromOptions),
}
}
func (b *BuildArgs) Clone() *BuildArgs {
clone := b.BuildArgs.Clone()
return &BuildArgs{
*clone,
BuildArgs: *clone,
}
}

View File

@ -87,7 +87,7 @@ func DoPush(image v1.Image, opts *config.KanikoOptions) error {
// Create a transport to set our user-agent.
tr := http.DefaultTransport
if opts.SkipTlsVerify {
if opts.SkipTLSVerify {
tr.(*http.Transport).TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}

View File

@ -17,8 +17,9 @@ limitations under the License.
package util
import (
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
)
func GetBucketAndItem(context string) (string, string) {

View File

@ -23,7 +23,7 @@ import (
"github.com/GoogleContainerTools/kaniko/testutil"
)
var testUrl = "https://github.com/GoogleContainerTools/runtimes-common/blob/master/LICENSE"
var testURL = "https://github.com/GoogleContainerTools/runtimes-common/blob/master/LICENSE"
var testEnvReplacement = []struct {
path string
@ -220,7 +220,7 @@ var matchSourcesTests = []struct {
srcs: []string{
"pkg/*",
"/root/dir?",
testUrl,
testURL,
},
files: []string{
"pkg/a",
@ -234,7 +234,7 @@ var matchSourcesTests = []struct {
"/root/dir1",
"pkg/a",
"pkg/b",
testUrl,
testURL,
},
},
}
@ -344,11 +344,11 @@ var isSrcValidTests = []struct {
},
{
srcsAndDest: []string{
testUrl,
testURL,
"dest",
},
resolvedSources: []string{
testUrl,
testURL,
},
shouldErr: false,
},
@ -369,13 +369,13 @@ var testResolveSources = []struct {
srcsAndDest: []string{
"context/foo",
"context/b*",
testUrl,
testURL,
"dest/",
},
expectedList: []string{
"context/foo",
"context/bar",
testUrl,
testURL,
},
},
}

View File

@ -343,8 +343,8 @@ func filesAreHardlinks(first, second string) checker {
if err != nil {
t.Fatalf("error getting file %s", second)
}
stat1 := getSyscallStat_t(fi1)
stat2 := getSyscallStat_t(fi2)
stat1 := getSyscallStatT(fi1)
stat2 := getSyscallStatT(fi2)
if stat1.Ino != stat2.Ino {
t.Errorf("%s and %s aren't hardlinks as they dont' have the same inode", first, second)
}

View File

@ -25,6 +25,7 @@ import (
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/sirupsen/logrus"
@ -61,7 +62,7 @@ func RetrieveSourceImage(stage config.KanikoStage, buildArgs []string) (v1.Image
}
// RetrieveConfigFile returns the config file for an image
func RetrieveConfigFile(sourceImage v1.Image) (*v1.ConfigFile, error) {
func RetrieveConfigFile(sourceImage partial.WithConfigFile) (*v1.ConfigFile, error) {
imageConfig, err := sourceImage.ConfigFile()
if err != nil {
return nil, err

View File

@ -118,7 +118,7 @@ func (t *Tar) Whiteout(p string) error {
func (t *Tar) checkHardlink(p string, i os.FileInfo) (bool, string) {
hardlink := false
linkDst := ""
stat := getSyscallStat_t(i)
stat := getSyscallStatT(i)
if stat != nil {
nlinks := stat.Nlink
if nlinks > 1 {
@ -135,7 +135,7 @@ func (t *Tar) checkHardlink(p string, i os.FileInfo) (bool, string) {
return hardlink, linkDst
}
func getSyscallStat_t(i os.FileInfo) *syscall.Stat_t {
func getSyscallStatT(i os.FileInfo) *syscall.Stat_t {
if sys := i.Sys(); sys != nil {
if stat, ok := sys.(*syscall.Stat_t); ok {
return stat

View File

@ -31,7 +31,7 @@ echo "Running validation scripts..."
scripts=(
"hack/boilerplate.sh"
"hack/gofmt.sh"
"hack/gometalinter.sh"
"hack/linter.sh"
"hack/dep.sh"
)
fail=0