refactor: simpler local integration tests (#2110)

* tests(integration): add fake gcs server and use sdk instead of gsutil

* tests(integration): add flag to run tests for a subset of dockerfiles

Signed-off-by: Höhl, Lukas <lukas.hoehl@accso.de>

* tests(integration): conditionally create gcs client

* refactor: create package for gcs bucket interaction

* tests(integration): use util.Tar for integration tarball creation

* refactor: create TarballOfDirectory func

* chore: add dockerignore for faster builds

* docs: add docs for dockerfile pattern

* fix: issue during personal review

* chore: cleanup

Signed-off-by: Höhl, Lukas <lukas.hoehl@accso.de>

* fix(integration-tests): remove default bucket

Signed-off-by: Lukas Hoehl <ludi.origin@gmail.com>
This commit is contained in:
Lukas 2022-06-14 19:38:01 +02:00 committed by GitHub
parent 9f57952214
commit 679c71c907
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 377 additions and 178 deletions

9
.dockerignore Normal file
View File

@ -0,0 +1,9 @@
integration/**/*
scripts/**/*
hack/**/*
examples/**/*
docs/**/*
.github/**/*
logo/**/*
out/**/*

View File

@ -86,6 +86,8 @@ find . -name "*.go" | grep -v vendor/ | xargs gofmt -l -s -w
Currently the integration tests that live in [`integration`](./integration) can be run against your own gcloud space or a local registry.
These tests will be kicked off by [reviewers](#reviews) for submitted PRs using GitHub Actions.
In either case, you will need the following tools:
* [`container-diff`](https://github.com/GoogleContainerTools/container-diff#installation)
@ -134,33 +136,25 @@ go test ./integration -v --bucket $GCS_BUCKET --repo $IMAGE_REPO -run TestLayers
These tests will be kicked off by [reviewers](#reviews) for submitted PRs by the kokoro task.
#### Local repository
#### Local integration tests
To run integration tests locally against a local registry, install a local docker registry
To run integration tests locally against a local registry and gcs bucket, set the LOCAL environment variable
```shell
docker run --rm -d -p 5000:5000 --name registry registry:2
LOCAL=1 make integration-test
```
Then export the `IMAGE_REPO` variable with the `localhost:5000`value
#### Running integration tests for a specific dockerfile
In order to test only specific dockerfiles during local integration testing, you can specify a pattern to match against inside the integration/dockerfiles directory.
```shell
export IMAGE_REPO=localhost:5000
DOCKERFILE_PATTERN="Dockerfile_test_add*" make integration-test-run
```
And run the integration tests
This will only run dockerfiles that match the pattern `Dockerfile_test_add*`
```shell
make integration-test
```
You can also run tests with `go test`, for example to run tests individually:
```shell
go test ./integration -v --repo localhost:5000 -run TestLayers/test_layer_Dockerfile_test_copy_bucket
```
These tests will be kicked off by [reviewers](#reviews) for submitted PRs using GitHub Actions.
### Benchmarking

2
go.mod
View File

@ -143,7 +143,7 @@ require (
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/api v0.74.0 // indirect
google.golang.org/api v0.74.0
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220405205423-9d709892a2bf // indirect
google.golang.org/grpc v1.45.0 // indirect

View File

@ -60,7 +60,7 @@ func TestSnapshotBenchmark(t *testing.T) {
buildArgs := []string{"--build-arg", fmt.Sprintf("NUM=%d", num)}
var benchmarkDir string
benchmarkDir, *err = buildKanikoImage(t.Logf, "", dockerfile,
buildArgs, []string{}, kanikoImage, contextDir, config.gcsBucket,
buildArgs, []string{}, kanikoImage, contextDir, config.gcsBucket, config.gcsClient,
config.serviceAccount, false)
if *err != nil {
return

View File

@ -16,7 +16,11 @@ limitations under the License.
package integration
import "strings"
import (
"strings"
"cloud.google.com/go/storage"
)
type integrationTestConfig struct {
gcsBucket string
@ -25,6 +29,8 @@ type integrationTestConfig struct {
hardlinkBaseImage string
serviceAccount string
dockerMajorVersion int
gcsClient *storage.Client
dockerfilesPattern string
}
const gcrRepoPrefix string = "gcr.io/"

View File

@ -1,75 +0,0 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"time"
)
// CreateIntegrationTarball will take the contents of the integration directory and write
// them to a tarball in a temmporary dir. It will return a path to the tarball.
func CreateIntegrationTarball() (string, error) {
log.Println("Creating tarball of integration test files to use as build context")
dir, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("Failed find path to integration dir: %w", err)
}
tempDir, err := ioutil.TempDir("", "")
if err != nil {
return "", fmt.Errorf("Failed to create temporary directory to hold tarball: %w", err)
}
contextFile := fmt.Sprintf("%s/context_%d.tar.gz", tempDir, time.Now().UnixNano())
cmd := exec.Command("tar", "-C", dir, "-zcvf", contextFile, ".")
_, err = RunCommandWithoutTest(cmd)
if err != nil {
return "", fmt.Errorf("Failed to create build context tarball from integration dir: %w", err)
}
return contextFile, err
}
// UploadFileToBucket will upload the at filePath to gcsBucket. It will return the path
// of the file in gcsBucket.
func UploadFileToBucket(gcsBucket string, filePath string, gcsPath string) (string, error) {
dst := fmt.Sprintf("%s/%s", gcsBucket, gcsPath)
log.Printf("Uploading file at %s to GCS bucket at %s\n", filePath, dst)
cmd := exec.Command("gsutil", "cp", filePath, dst)
out, err := RunCommandWithoutTest(cmd)
if err != nil {
log.Printf("Error uploading file %s to GCS at %s: %s", filePath, dst, err)
log.Println(string(out))
return "", fmt.Errorf("Failed to copy tarball to GCS bucket %s: %w", gcsBucket, err)
}
return dst, nil
}
// DeleteFromBucket will remove the content at path. path should be the full path
// to a file in GCS.
func DeleteFromBucket(path string) error {
cmd := exec.Command("gsutil", "rm", path)
_, err := RunCommandWithoutTest(cmd)
if err != nil {
return fmt.Errorf("Failed to delete file %s from GCS: %w", path, err)
}
return err
}

View File

@ -18,6 +18,7 @@ package integration
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
@ -30,8 +31,10 @@ import (
"testing"
"time"
"cloud.google.com/go/storage"
"github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/pkg/util/bucket"
)
const (
@ -157,13 +160,16 @@ func GetVersionedKanikoImage(imageRepo, dockerfile string, version int) string {
return strings.ToLower(imageRepo + kanikoPrefix + dockerfile + strconv.Itoa(version))
}
// FindDockerFiles will look for test docker files in the directory dockerfilesPath.
// These files must start with `Dockerfile_test`. If the file is one we are intentionally
// FindDockerFiles will look for test docker files in the directory dir
// and match the files against dockerfilesPattern.
// If the file is one we are intentionally
// skipping, it will not be included in the returned list.
func FindDockerFiles(dockerfilesPath string) ([]string, error) {
allDockerfiles, err := filepath.Glob(path.Join(dockerfilesPath, "Dockerfile_test*"))
func FindDockerFiles(dir, dockerfilesPattern string) ([]string, error) {
pattern := filepath.Join(dir, dockerfilesPattern)
fmt.Printf("finding docker images with pattern %v\n", pattern)
allDockerfiles, err := filepath.Glob(pattern)
if err != nil {
return []string{}, fmt.Errorf("Failed to find docker files at %s: %w", dockerfilesPath, err)
return []string{}, fmt.Errorf("Failed to find docker files with pattern %s: %w", dockerfilesPattern, err)
}
var dockerfiles []string
@ -285,7 +291,7 @@ func (d *DockerFileBuilder) BuildImageWithContext(t *testing.T, config *integrat
if _, present := d.filesBuilt[dockerfile]; present {
return nil
}
gcsBucket, serviceAccount, imageRepo := config.gcsBucket, config.serviceAccount, config.imageRepo
gcsBucket, gcsClient, serviceAccount, imageRepo := config.gcsBucket, config.gcsClient, config.serviceAccount, config.imageRepo
var buildArgs []string
buildArgFlag := "--build-arg"
@ -318,7 +324,7 @@ func (d *DockerFileBuilder) BuildImageWithContext(t *testing.T, config *integrat
kanikoImage := GetKanikoImage(imageRepo, dockerfile)
timer = timing.Start(dockerfile + "_kaniko")
if _, err := buildKanikoImage(t.Logf, dockerfilesPath, dockerfile, buildArgs, additionalKanikoFlags, kanikoImage,
contextDir, gcsBucket, serviceAccount, true); err != nil {
contextDir, gcsBucket, gcsClient, serviceAccount, true); err != nil {
return err
}
timing.DefaultRun.Stop(timer)
@ -443,6 +449,7 @@ func buildKanikoImage(
kanikoImage string,
contextDir string,
gcsBucket string,
gcsClient *storage.Client,
serviceAccount string,
shdUpload bool,
) (string, error) {
@ -457,7 +464,11 @@ func buildKanikoImage(
benchmarkFile := path.Join(benchmarkDir, dockerfile)
fileName := fmt.Sprintf("run_%s_%s", time.Now().Format("2006-01-02-15:04"), dockerfile)
dst := path.Join("benchmarks", fileName)
defer UploadFileToBucket(gcsBucket, benchmarkFile, dst)
file, err := os.Open(benchmarkFile)
if err != nil {
return "", err
}
defer bucket.Upload(context.Background(), gcsBucket, dst, file, gcsClient)
}
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package integration
import (
"context"
"encoding/json"
"flag"
"fmt"
@ -33,9 +34,11 @@ import (
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/daemon"
"github.com/pkg/errors"
"google.golang.org/api/option"
"github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/pkg/util/bucket"
"github.com/GoogleContainerTools/kaniko/testutil"
)
@ -86,22 +89,33 @@ func getDockerMajorVersion() int {
func launchTests(m *testing.M) (int, error) {
if config.isGcrRepository() {
contextFile, err := CreateIntegrationTarball()
contextFilePath, err := CreateIntegrationTarball()
if err != nil {
return 1, errors.Wrap(err, "Failed to create tarball of integration files for build context")
}
fileInBucket, err := UploadFileToBucket(config.gcsBucket, contextFile, contextFile)
bucketName, item, err := bucket.GetNameAndFilepathFromURI(config.gcsBucket)
if err != nil {
return 1, errors.Wrap(err, "failed to get bucket name from uri")
}
contextFile, err := os.Open(contextFilePath)
if err != nil {
return 1, fmt.Errorf("failed to read file at path %v: %w", contextFilePath, err)
}
err = bucket.Upload(context.Background(), bucketName, item, contextFile, config.gcsClient)
if err != nil {
return 1, errors.Wrap(err, "Failed to upload build context")
}
if err = os.Remove(contextFile); err != nil {
return 1, errors.Wrap(err, fmt.Sprintf("Failed to remove tarball at %s", contextFile))
if err = os.Remove(contextFilePath); err != nil {
return 1, errors.Wrap(err, fmt.Sprintf("Failed to remove tarball at %s", contextFilePath))
}
RunOnInterrupt(func() { DeleteFromBucket(fileInBucket) })
defer DeleteFromBucket(fileInBucket)
deleteFunc := func() {
bucket.Delete(context.Background(), bucketName, item, config.gcsClient)
}
RunOnInterrupt(deleteFunc)
defer deleteFunc()
}
if err := buildRequiredImages(); err != nil {
return 1, errors.Wrap(err, "Error while building images")
@ -119,18 +133,18 @@ func TestMain(m *testing.M) {
os.Exit(1)
}
if allDockerfiles, err = FindDockerFiles(dockerfilesPath); err != nil {
config = initIntegrationTestConfig()
if allDockerfiles, err = FindDockerFiles(dockerfilesPath, config.dockerfilesPattern); err != nil {
fmt.Println("Coudn't create map of dockerfiles", err)
os.Exit(1)
} else {
config = initIntegrationTestConfig()
exitCode, err := launchTests(m)
if err != nil {
fmt.Println(err)
}
os.Exit(exitCode)
}
exitCode, err := launchTests(m)
if err != nil {
fmt.Println(err)
}
os.Exit(exitCode)
}
func buildRequiredImages() error {
@ -859,9 +873,16 @@ func (i imageDetails) String() string {
func initIntegrationTestConfig() *integrationTestConfig {
var c integrationTestConfig
var gcsEndpoint string
var disableGcsAuth bool
flag.StringVar(&c.gcsBucket, "bucket", "gs://kaniko-test-bucket", "The gcs bucket argument to uploaded the tar-ed contents of the `integration` dir to.")
flag.StringVar(&c.imageRepo, "repo", "gcr.io/kaniko-test", "The (docker) image repo to build and push images to during the test. `gcloud` must be authenticated with this repo or serviceAccount must be set.")
flag.StringVar(&c.serviceAccount, "serviceAccount", "", "The path to the service account push images to GCR and upload/download files to GCS.")
flag.StringVar(&gcsEndpoint, "gcs-endpoint", "", "Custom endpoint for GCS. Used for local integration tests")
flag.BoolVar(&disableGcsAuth, "disable-gcs-auth", false, "Disable GCS Authentication. Used for local integration tests")
// adds the possibility to run a single dockerfile. This is useful since running all images can exhaust the dockerhub pull limit
flag.StringVar(&c.dockerfilesPattern, "dockerfiles-pattern", "Dockerfile_test*", "The pattern to match dockerfiles with")
flag.Parse()
if len(c.serviceAccount) > 0 {
@ -886,6 +907,23 @@ func initIntegrationTestConfig() *integrationTestConfig {
if !strings.HasSuffix(c.imageRepo, "/") {
c.imageRepo = c.imageRepo + "/"
}
if c.gcsBucket != "" {
var opts []option.ClientOption
if gcsEndpoint != "" {
opts = append(opts, option.WithEndpoint(gcsEndpoint))
}
if disableGcsAuth {
opts = append(opts, option.WithoutAuthentication())
}
gcsClient, err := bucket.NewClient(context.Background(), opts...)
if err != nil {
log.Fatalf("Could not create a new Google Storage Client: %s", err)
}
c.gcsClient = gcsClient
}
c.dockerMajorVersion = getDockerMajorVersion()
c.onbuildBaseImage = c.imageRepo + "onbuild-base:latest"
c.hardlinkBaseImage = c.imageRepo + "hardlink-base:latest"
@ -893,7 +931,7 @@ func initIntegrationTestConfig() *integrationTestConfig {
}
func meetsRequirements() bool {
requiredTools := []string{"container-diff", "gsutil"}
requiredTools := []string{"container-diff"}
hasRequirements := true
for _, tool := range requiredTools {
_, err := exec.LookPath(tool)

58
integration/tar.go Normal file
View File

@ -0,0 +1,58 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"compress/gzip"
"fmt"
"io/ioutil"
"log"
"os"
"time"
"github.com/GoogleContainerTools/kaniko/pkg/util"
)
// CreateIntegrationTarball will take the contents of the integration directory and write
// them to a tarball in a temmporary dir. It will return the path to the tarball.
func CreateIntegrationTarball() (string, error) {
log.Println("Creating tarball of integration test files to use as build context")
dir, err := os.Getwd()
if err != nil {
return "nil", fmt.Errorf("Failed find path to integration dir: %w", err)
}
tempDir, err := ioutil.TempDir("", "")
if err != nil {
return "", fmt.Errorf("Failed to create temporary directory to hold tarball: %w", err)
}
contextFilePath := fmt.Sprintf("%s/context_%d.tar.gz", tempDir, time.Now().UnixNano())
file, err := os.OpenFile(contextFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return "", err
}
gzipWriter := gzip.NewWriter(file)
defer gzipWriter.Close()
err = util.CreateTarballOfDirectory(dir, file)
if err != nil {
return "", fmt.Errorf("creating tarball of integration dir: %w", err)
}
return contextFilePath, nil
}

View File

@ -17,15 +17,15 @@ limitations under the License.
package buildcontext
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"cloud.google.com/go/storage"
kConfig "github.com/GoogleContainerTools/kaniko/pkg/config"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/pkg/util/bucket"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
)
@ -36,27 +36,24 @@ type GCS struct {
}
func (g *GCS) UnpackTarFromBuildContext() (string, error) {
bucket, item := util.GetBucketAndItem(g.context)
return kConfig.BuildContextDir, unpackTarFromGCSBucket(bucket, item, kConfig.BuildContextDir)
bucketName, filepath, err := bucket.GetNameAndFilepathFromURI(g.context)
if err != nil {
return "", fmt.Errorf("getting bucketname and filepath from context: %w", err)
}
return kConfig.BuildContextDir, unpackTarFromGCSBucket(bucketName, filepath, kConfig.BuildContextDir)
}
func UploadToBucket(r io.Reader, dest string) error {
ctx := context.Background()
context := strings.SplitAfter(dest, "://")[1]
bucketName, item := util.GetBucketAndItem(context)
client, err := storage.NewClient(ctx)
bucketName, filepath, err := bucket.GetNameAndFilepathFromURI(dest)
if err != nil {
return fmt.Errorf("getting bucketname and filepath from dest: %w", err)
}
client, err := bucket.NewClient(ctx)
if err != nil {
return err
}
bucket := client.Bucket(bucketName)
w := bucket.Object(item).NewWriter(ctx)
if _, err := io.Copy(w, r); err != nil {
return err
}
if err := w.Close(); err != nil {
return err
}
return nil
return bucket.Upload(ctx, bucketName, filepath, r, client)
}
// unpackTarFromGCSBucket unpacks the context.tar.gz file in the given bucket to the given directory
@ -77,15 +74,14 @@ func unpackTarFromGCSBucket(bucketName, item, directory string) error {
// getTarFromBucket gets context.tar.gz from the GCS bucket and saves it to the filesystem
// It returns the path to the tar file
func getTarFromBucket(bucketName, item, directory string) (string, error) {
func getTarFromBucket(bucketName, filepathInBucket, directory string) (string, error) {
ctx := context.Background()
client, err := storage.NewClient(ctx)
client, err := bucket.NewClient(ctx)
if err != nil {
return "", err
}
bucket := client.Bucket(bucketName)
// Get the tarfile context.tar.gz from the GCS bucket, and save it to a tar object
reader, err := bucket.Object(item).NewReader(ctx)
reader, err := bucket.ReadCloser(ctx, bucketName, filepathInBucket, client)
if err != nil {
return "", err
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package buildcontext
import (
"fmt"
"os"
"path/filepath"
"strings"
@ -24,6 +25,7 @@ import (
kConfig "github.com/GoogleContainerTools/kaniko/pkg/config"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/pkg/util/bucket"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
@ -37,7 +39,11 @@ type S3 struct {
// UnpackTarFromBuildContext download and untar a file from s3
func (s *S3) UnpackTarFromBuildContext() (string, error) {
bucket, item := util.GetBucketAndItem(s.context)
bucket, item, err := bucket.GetNameAndFilepathFromURI(s.context)
if err != nil {
return "", fmt.Errorf("getting bucketname and filepath from context: %w", err)
}
option := session.Options{
SharedConfigState: session.SharedConfigEnable,
}

View File

@ -0,0 +1,88 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bucket
import (
"context"
"fmt"
"io"
"net/url"
"strings"
"cloud.google.com/go/storage"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"google.golang.org/api/option"
)
// Upload uploads everything from Reader to the bucket under path
func Upload(ctx context.Context, bucketName string, path string, r io.Reader, client *storage.Client) error {
bucket := client.Bucket(bucketName)
w := bucket.Object(path).NewWriter(ctx)
if _, err := io.Copy(w, r); err != nil {
return err
}
if err := w.Close(); err != nil {
return err
}
return nil
}
// Delete will remove the content at path. path should be the full path
// to a file in GCS.
func Delete(ctx context.Context, bucketName string, path string, client *storage.Client) error {
err := client.Bucket(bucketName).Object(path).Delete(ctx)
if err != nil {
return fmt.Errorf("failed to delete file at %s in gcs bucket %v: %w", path, bucketName, err)
}
return err
}
// ReadCloser will create io.ReadCloser for the specified bucket and path
func ReadCloser(ctx context.Context, bucketName string, path string, client *storage.Client) (io.ReadCloser, error) {
bucket := client.Bucket(bucketName)
r, err := bucket.Object(path).NewReader(ctx)
if err != nil {
return nil, err
}
return r, nil
}
// NewClient returns a new google storage client
func NewClient(ctx context.Context, opts ...option.ClientOption) (*storage.Client, error) {
client, err := storage.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
return client, err
}
// GetNameAndFilepathFromURI returns the bucketname and the path to the item inside.
// Will error if provided URI is not a valid URL.
// If the filepath is empty, returns the contextTar filename
func GetNameAndFilepathFromURI(bucketURI string) (bucketName string, path string, err error) {
url, err := url.Parse(bucketURI)
if err != nil {
return "", "", err
}
bucketName = url.Host
// remove leading slash
filePath := strings.TrimPrefix(url.Path, "/")
if filePath == "" {
filePath = constants.ContextTar
}
return bucketName, filePath, nil
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package util
package bucket
import (
"testing"
@ -29,28 +29,29 @@ func Test_GetBucketAndItem(t *testing.T) {
context string
expectedBucket string
expectedItem string
expectedErr bool
}{
{
name: "three slashes",
context: "test1/test2/test3",
context: "gs://test1/test2/test3",
expectedBucket: "test1",
expectedItem: "test2/test3",
},
{
name: "two slashes",
context: "test1/test2",
context: "gs://test1/test2",
expectedBucket: "test1",
expectedItem: "test2",
},
{
name: "one slash",
context: "test1/",
context: "gs://test1/",
expectedBucket: "test1",
expectedItem: constants.ContextTar,
},
{
name: "zero slash",
context: "test1",
context: "gs://test1",
expectedBucket: "test1",
expectedItem: constants.ContextTar,
},
@ -58,7 +59,8 @@ func Test_GetBucketAndItem(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
gotBucket, gotItem := GetBucketAndItem(test.context)
gotBucket, gotItem, err := GetNameAndFilepathFromURI(test.context)
testutil.CheckError(t, test.expectedErr, err)
testutil.CheckDeepEqual(t, test.expectedBucket, gotBucket)
testutil.CheckDeepEqual(t, test.expectedItem, gotItem)
})

View File

@ -1,31 +0,0 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
)
func GetBucketAndItem(context string) (string, string) {
split := strings.SplitN(context, "/", 2)
if len(split) == 2 && split[1] != "" {
return split[0], split[1]
}
return split[0], constants.ContextTar
}

View File

@ -22,6 +22,7 @@ import (
"compress/gzip"
"fmt"
"io"
"io/fs"
"io/ioutil"
"os"
"path/filepath"
@ -50,6 +51,26 @@ func NewTar(f io.Writer) Tar {
}
}
func CreateTarballOfDirectory(pathToDir string, f io.Writer) error {
if !filepath.IsAbs(pathToDir) {
return errors.New("pathToDir is not absolute")
}
tarWriter := NewTar(f)
defer tarWriter.Close()
walkFn := func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !filepath.IsAbs(path) {
return fmt.Errorf("path %v is not absolute, cant read file", path)
}
return tarWriter.AddFileToTar(path)
}
return filepath.WalkDir(pathToDir, walkFn)
}
// Close will close any open streams used by Tar.
func (t *Tar) Close() {
t.w.Close()

View File

@ -20,6 +20,7 @@ import (
"archive/tar"
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
@ -129,3 +130,46 @@ func createTar(testdir string, writer io.Writer) error {
}
return nil
}
func Test_CreateTarballOfDirectory(t *testing.T) {
tmpDir := t.TempDir()
wantErr := false
createFilesInTempDir(t, tmpDir)
f := &bytes.Buffer{}
err := CreateTarballOfDirectory(tmpDir, f)
testutil.CheckError(t, wantErr, err)
extracedFilesDir := filepath.Join(tmpDir, "extracted")
err = os.Mkdir(extracedFilesDir, 0755)
if err != nil {
t.Error(err)
return
}
files, err := UnTar(f, extracedFilesDir)
testutil.CheckError(t, wantErr, err)
for _, filePath := range files {
fileInfo, err := os.Lstat(filePath)
testutil.CheckError(t, wantErr, err)
if fileInfo.IsDir() {
// skip directory
continue
}
file, err := os.Open(filePath)
testutil.CheckError(t, wantErr, err)
body, err := io.ReadAll(file)
testutil.CheckError(t, wantErr, err)
index := filepath.Base(filePath)
testutil.CheckDeepEqual(t, string(body), fmt.Sprintf("hello from %s\n", index))
}
}
func createFilesInTempDir(t *testing.T, tmpDir string) {
for i := 0; i < 2; i++ {
fName := filepath.Join(tmpDir, fmt.Sprint(i))
content := fmt.Sprintf("hello from %d\n", i)
if err := os.WriteFile(fName, []byte(content), 0666); err != nil {
t.Error(err)
return
}
}
}

View File

@ -13,9 +13,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
set -e
function start_local_registry {
docker start registry || docker run --name registry -d -p 5000:5000 registry:2
}
# TODO: to get this working, we need a way to override the gcs endpoint of kaniko at runtime
# If this is done, integration test main includes flags --gcs-endpoint and --disable-gcs-auth
# to mock the gcs endpoints and upload files to the fake-gcs-server
function start_fake_gcs_server {
docker start fake-gcs-server || docker run -d -p 4443:4443 --name fake-gcs-server fsouza/fake-gcs-server -scheme http
}
GCS_BUCKET="${GCS_BUCKET:-gs://kaniko-test-bucket}"
IMAGE_REPO="${IMAGE_REPO:-gcr.io/kaniko-test}"
docker version
@ -23,4 +33,26 @@ docker version
echo "Running integration tests..."
make out/executor
make out/warmer
go test ./integration/... --bucket "${GCS_BUCKET}" --repo "${IMAGE_REPO}" --timeout 50m "$@"
FLAGS=(
"--timeout=50m"
)
if [[ -n $DOCKERFILE_PATTERN ]]; then
FLAGS+=("--dockerfiles-pattern=$DOCKERFILE_PATTERN")
fi
if [[ -n $LOCAL ]]; then
echo "running in local mode, mocking registry and gcs bucket..."
start_local_registry
IMAGE_REPO="localhost:5000/kaniko-test"
GCS_BUCKET=""
fi
FLAGS+=(
"--bucket=${GCS_BUCKET}"
"--repo=${IMAGE_REPO}"
)
go test ./integration/... "${FLAGS[@]}" "$@"