refactor: simpler local integration tests (#2110)

* tests(integration): add fake gcs server and use sdk instead of gsutil

* tests(integration): add flag to run tests for a subset of dockerfiles

Signed-off-by: Höhl, Lukas <lukas.hoehl@accso.de>

* tests(integration): conditionally create gcs client

* refactor: create package for gcs bucket interaction

* tests(integration): use util.Tar for integration tarball creation

* refactor: create TarballOfDirectory func

* chore: add dockerignore for faster builds

* docs: add docs for dockerfile pattern

* fix: issue during personal review

* chore: cleanup

Signed-off-by: Höhl, Lukas <lukas.hoehl@accso.de>

* fix(integration-tests): remove default bucket

Signed-off-by: Lukas Hoehl <ludi.origin@gmail.com>
This commit is contained in:
Lukas 2022-06-14 19:38:01 +02:00 committed by GitHub
parent 9f57952214
commit 679c71c907
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 377 additions and 178 deletions

9
.dockerignore Normal file
View File

@ -0,0 +1,9 @@
integration/**/*
scripts/**/*
hack/**/*
examples/**/*
docs/**/*
.github/**/*
logo/**/*
out/**/*

View File

@ -86,6 +86,8 @@ find . -name "*.go" | grep -v vendor/ | xargs gofmt -l -s -w
Currently the integration tests that live in [`integration`](./integration) can be run against your own gcloud space or a local registry. Currently the integration tests that live in [`integration`](./integration) can be run against your own gcloud space or a local registry.
These tests will be kicked off by [reviewers](#reviews) for submitted PRs using GitHub Actions.
In either case, you will need the following tools: In either case, you will need the following tools:
* [`container-diff`](https://github.com/GoogleContainerTools/container-diff#installation) * [`container-diff`](https://github.com/GoogleContainerTools/container-diff#installation)
@ -134,33 +136,25 @@ go test ./integration -v --bucket $GCS_BUCKET --repo $IMAGE_REPO -run TestLayers
These tests will be kicked off by [reviewers](#reviews) for submitted PRs by the kokoro task. These tests will be kicked off by [reviewers](#reviews) for submitted PRs by the kokoro task.
#### Local repository #### Local integration tests
To run integration tests locally against a local registry, install a local docker registry To run integration tests locally against a local registry and gcs bucket, set the LOCAL environment variable
```shell ```shell
docker run --rm -d -p 5000:5000 --name registry registry:2 LOCAL=1 make integration-test
``` ```
Then export the `IMAGE_REPO` variable with the `localhost:5000`value #### Running integration tests for a specific dockerfile
In order to test only specific dockerfiles during local integration testing, you can specify a pattern to match against inside the integration/dockerfiles directory.
```shell ```shell
export IMAGE_REPO=localhost:5000 DOCKERFILE_PATTERN="Dockerfile_test_add*" make integration-test-run
``` ```
And run the integration tests This will only run dockerfiles that match the pattern `Dockerfile_test_add*`
```shell
make integration-test
```
You can also run tests with `go test`, for example to run tests individually:
```shell
go test ./integration -v --repo localhost:5000 -run TestLayers/test_layer_Dockerfile_test_copy_bucket
```
These tests will be kicked off by [reviewers](#reviews) for submitted PRs using GitHub Actions.
### Benchmarking ### Benchmarking

2
go.mod
View File

@ -143,7 +143,7 @@ require (
golang.org/x/text v0.3.7 // indirect golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/api v0.74.0 // indirect google.golang.org/api v0.74.0
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220405205423-9d709892a2bf // indirect google.golang.org/genproto v0.0.0-20220405205423-9d709892a2bf // indirect
google.golang.org/grpc v1.45.0 // indirect google.golang.org/grpc v1.45.0 // indirect

View File

@ -60,7 +60,7 @@ func TestSnapshotBenchmark(t *testing.T) {
buildArgs := []string{"--build-arg", fmt.Sprintf("NUM=%d", num)} buildArgs := []string{"--build-arg", fmt.Sprintf("NUM=%d", num)}
var benchmarkDir string var benchmarkDir string
benchmarkDir, *err = buildKanikoImage(t.Logf, "", dockerfile, benchmarkDir, *err = buildKanikoImage(t.Logf, "", dockerfile,
buildArgs, []string{}, kanikoImage, contextDir, config.gcsBucket, buildArgs, []string{}, kanikoImage, contextDir, config.gcsBucket, config.gcsClient,
config.serviceAccount, false) config.serviceAccount, false)
if *err != nil { if *err != nil {
return return

View File

@ -16,7 +16,11 @@ limitations under the License.
package integration package integration
import "strings" import (
"strings"
"cloud.google.com/go/storage"
)
type integrationTestConfig struct { type integrationTestConfig struct {
gcsBucket string gcsBucket string
@ -25,6 +29,8 @@ type integrationTestConfig struct {
hardlinkBaseImage string hardlinkBaseImage string
serviceAccount string serviceAccount string
dockerMajorVersion int dockerMajorVersion int
gcsClient *storage.Client
dockerfilesPattern string
} }
const gcrRepoPrefix string = "gcr.io/" const gcrRepoPrefix string = "gcr.io/"

View File

@ -1,75 +0,0 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"time"
)
// CreateIntegrationTarball will take the contents of the integration directory and write
// them to a tarball in a temmporary dir. It will return a path to the tarball.
func CreateIntegrationTarball() (string, error) {
log.Println("Creating tarball of integration test files to use as build context")
dir, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("Failed find path to integration dir: %w", err)
}
tempDir, err := ioutil.TempDir("", "")
if err != nil {
return "", fmt.Errorf("Failed to create temporary directory to hold tarball: %w", err)
}
contextFile := fmt.Sprintf("%s/context_%d.tar.gz", tempDir, time.Now().UnixNano())
cmd := exec.Command("tar", "-C", dir, "-zcvf", contextFile, ".")
_, err = RunCommandWithoutTest(cmd)
if err != nil {
return "", fmt.Errorf("Failed to create build context tarball from integration dir: %w", err)
}
return contextFile, err
}
// UploadFileToBucket will upload the at filePath to gcsBucket. It will return the path
// of the file in gcsBucket.
func UploadFileToBucket(gcsBucket string, filePath string, gcsPath string) (string, error) {
dst := fmt.Sprintf("%s/%s", gcsBucket, gcsPath)
log.Printf("Uploading file at %s to GCS bucket at %s\n", filePath, dst)
cmd := exec.Command("gsutil", "cp", filePath, dst)
out, err := RunCommandWithoutTest(cmd)
if err != nil {
log.Printf("Error uploading file %s to GCS at %s: %s", filePath, dst, err)
log.Println(string(out))
return "", fmt.Errorf("Failed to copy tarball to GCS bucket %s: %w", gcsBucket, err)
}
return dst, nil
}
// DeleteFromBucket will remove the content at path. path should be the full path
// to a file in GCS.
func DeleteFromBucket(path string) error {
cmd := exec.Command("gsutil", "rm", path)
_, err := RunCommandWithoutTest(cmd)
if err != nil {
return fmt.Errorf("Failed to delete file %s from GCS: %w", path, err)
}
return err
}

View File

@ -18,6 +18,7 @@ package integration
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@ -30,8 +31,10 @@ import (
"testing" "testing"
"time" "time"
"cloud.google.com/go/storage"
"github.com/GoogleContainerTools/kaniko/pkg/timing" "github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/pkg/util/bucket"
) )
const ( const (
@ -157,13 +160,16 @@ func GetVersionedKanikoImage(imageRepo, dockerfile string, version int) string {
return strings.ToLower(imageRepo + kanikoPrefix + dockerfile + strconv.Itoa(version)) return strings.ToLower(imageRepo + kanikoPrefix + dockerfile + strconv.Itoa(version))
} }
// FindDockerFiles will look for test docker files in the directory dockerfilesPath. // FindDockerFiles will look for test docker files in the directory dir
// These files must start with `Dockerfile_test`. If the file is one we are intentionally // and match the files against dockerfilesPattern.
// If the file is one we are intentionally
// skipping, it will not be included in the returned list. // skipping, it will not be included in the returned list.
func FindDockerFiles(dockerfilesPath string) ([]string, error) { func FindDockerFiles(dir, dockerfilesPattern string) ([]string, error) {
allDockerfiles, err := filepath.Glob(path.Join(dockerfilesPath, "Dockerfile_test*")) pattern := filepath.Join(dir, dockerfilesPattern)
fmt.Printf("finding docker images with pattern %v\n", pattern)
allDockerfiles, err := filepath.Glob(pattern)
if err != nil { if err != nil {
return []string{}, fmt.Errorf("Failed to find docker files at %s: %w", dockerfilesPath, err) return []string{}, fmt.Errorf("Failed to find docker files with pattern %s: %w", dockerfilesPattern, err)
} }
var dockerfiles []string var dockerfiles []string
@ -285,7 +291,7 @@ func (d *DockerFileBuilder) BuildImageWithContext(t *testing.T, config *integrat
if _, present := d.filesBuilt[dockerfile]; present { if _, present := d.filesBuilt[dockerfile]; present {
return nil return nil
} }
gcsBucket, serviceAccount, imageRepo := config.gcsBucket, config.serviceAccount, config.imageRepo gcsBucket, gcsClient, serviceAccount, imageRepo := config.gcsBucket, config.gcsClient, config.serviceAccount, config.imageRepo
var buildArgs []string var buildArgs []string
buildArgFlag := "--build-arg" buildArgFlag := "--build-arg"
@ -318,7 +324,7 @@ func (d *DockerFileBuilder) BuildImageWithContext(t *testing.T, config *integrat
kanikoImage := GetKanikoImage(imageRepo, dockerfile) kanikoImage := GetKanikoImage(imageRepo, dockerfile)
timer = timing.Start(dockerfile + "_kaniko") timer = timing.Start(dockerfile + "_kaniko")
if _, err := buildKanikoImage(t.Logf, dockerfilesPath, dockerfile, buildArgs, additionalKanikoFlags, kanikoImage, if _, err := buildKanikoImage(t.Logf, dockerfilesPath, dockerfile, buildArgs, additionalKanikoFlags, kanikoImage,
contextDir, gcsBucket, serviceAccount, true); err != nil { contextDir, gcsBucket, gcsClient, serviceAccount, true); err != nil {
return err return err
} }
timing.DefaultRun.Stop(timer) timing.DefaultRun.Stop(timer)
@ -443,6 +449,7 @@ func buildKanikoImage(
kanikoImage string, kanikoImage string,
contextDir string, contextDir string,
gcsBucket string, gcsBucket string,
gcsClient *storage.Client,
serviceAccount string, serviceAccount string,
shdUpload bool, shdUpload bool,
) (string, error) { ) (string, error) {
@ -457,7 +464,11 @@ func buildKanikoImage(
benchmarkFile := path.Join(benchmarkDir, dockerfile) benchmarkFile := path.Join(benchmarkDir, dockerfile)
fileName := fmt.Sprintf("run_%s_%s", time.Now().Format("2006-01-02-15:04"), dockerfile) fileName := fmt.Sprintf("run_%s_%s", time.Now().Format("2006-01-02-15:04"), dockerfile)
dst := path.Join("benchmarks", fileName) dst := path.Join("benchmarks", fileName)
defer UploadFileToBucket(gcsBucket, benchmarkFile, dst) file, err := os.Open(benchmarkFile)
if err != nil {
return "", err
}
defer bucket.Upload(context.Background(), gcsBucket, dst, file, gcsClient)
} }
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package integration package integration
import ( import (
"context"
"encoding/json" "encoding/json"
"flag" "flag"
"fmt" "fmt"
@ -33,9 +34,11 @@ import (
"github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/daemon" "github.com/google/go-containerregistry/pkg/v1/daemon"
"github.com/pkg/errors" "github.com/pkg/errors"
"google.golang.org/api/option"
"github.com/GoogleContainerTools/kaniko/pkg/timing" "github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/pkg/util/bucket"
"github.com/GoogleContainerTools/kaniko/testutil" "github.com/GoogleContainerTools/kaniko/testutil"
) )
@ -86,22 +89,33 @@ func getDockerMajorVersion() int {
func launchTests(m *testing.M) (int, error) { func launchTests(m *testing.M) (int, error) {
if config.isGcrRepository() { if config.isGcrRepository() {
contextFile, err := CreateIntegrationTarball() contextFilePath, err := CreateIntegrationTarball()
if err != nil { if err != nil {
return 1, errors.Wrap(err, "Failed to create tarball of integration files for build context") return 1, errors.Wrap(err, "Failed to create tarball of integration files for build context")
} }
fileInBucket, err := UploadFileToBucket(config.gcsBucket, contextFile, contextFile) bucketName, item, err := bucket.GetNameAndFilepathFromURI(config.gcsBucket)
if err != nil {
return 1, errors.Wrap(err, "failed to get bucket name from uri")
}
contextFile, err := os.Open(contextFilePath)
if err != nil {
return 1, fmt.Errorf("failed to read file at path %v: %w", contextFilePath, err)
}
err = bucket.Upload(context.Background(), bucketName, item, contextFile, config.gcsClient)
if err != nil { if err != nil {
return 1, errors.Wrap(err, "Failed to upload build context") return 1, errors.Wrap(err, "Failed to upload build context")
} }
if err = os.Remove(contextFile); err != nil { if err = os.Remove(contextFilePath); err != nil {
return 1, errors.Wrap(err, fmt.Sprintf("Failed to remove tarball at %s", contextFile)) return 1, errors.Wrap(err, fmt.Sprintf("Failed to remove tarball at %s", contextFilePath))
} }
RunOnInterrupt(func() { DeleteFromBucket(fileInBucket) }) deleteFunc := func() {
defer DeleteFromBucket(fileInBucket) bucket.Delete(context.Background(), bucketName, item, config.gcsClient)
}
RunOnInterrupt(deleteFunc)
defer deleteFunc()
} }
if err := buildRequiredImages(); err != nil { if err := buildRequiredImages(); err != nil {
return 1, errors.Wrap(err, "Error while building images") return 1, errors.Wrap(err, "Error while building images")
@ -119,18 +133,18 @@ func TestMain(m *testing.M) {
os.Exit(1) os.Exit(1)
} }
if allDockerfiles, err = FindDockerFiles(dockerfilesPath); err != nil { config = initIntegrationTestConfig()
if allDockerfiles, err = FindDockerFiles(dockerfilesPath, config.dockerfilesPattern); err != nil {
fmt.Println("Coudn't create map of dockerfiles", err) fmt.Println("Coudn't create map of dockerfiles", err)
os.Exit(1) os.Exit(1)
} else {
config = initIntegrationTestConfig()
exitCode, err := launchTests(m)
if err != nil {
fmt.Println(err)
}
os.Exit(exitCode)
} }
exitCode, err := launchTests(m)
if err != nil {
fmt.Println(err)
}
os.Exit(exitCode)
} }
func buildRequiredImages() error { func buildRequiredImages() error {
@ -859,9 +873,16 @@ func (i imageDetails) String() string {
func initIntegrationTestConfig() *integrationTestConfig { func initIntegrationTestConfig() *integrationTestConfig {
var c integrationTestConfig var c integrationTestConfig
var gcsEndpoint string
var disableGcsAuth bool
flag.StringVar(&c.gcsBucket, "bucket", "gs://kaniko-test-bucket", "The gcs bucket argument to uploaded the tar-ed contents of the `integration` dir to.") flag.StringVar(&c.gcsBucket, "bucket", "gs://kaniko-test-bucket", "The gcs bucket argument to uploaded the tar-ed contents of the `integration` dir to.")
flag.StringVar(&c.imageRepo, "repo", "gcr.io/kaniko-test", "The (docker) image repo to build and push images to during the test. `gcloud` must be authenticated with this repo or serviceAccount must be set.") flag.StringVar(&c.imageRepo, "repo", "gcr.io/kaniko-test", "The (docker) image repo to build and push images to during the test. `gcloud` must be authenticated with this repo or serviceAccount must be set.")
flag.StringVar(&c.serviceAccount, "serviceAccount", "", "The path to the service account push images to GCR and upload/download files to GCS.") flag.StringVar(&c.serviceAccount, "serviceAccount", "", "The path to the service account push images to GCR and upload/download files to GCS.")
flag.StringVar(&gcsEndpoint, "gcs-endpoint", "", "Custom endpoint for GCS. Used for local integration tests")
flag.BoolVar(&disableGcsAuth, "disable-gcs-auth", false, "Disable GCS Authentication. Used for local integration tests")
// adds the possibility to run a single dockerfile. This is useful since running all images can exhaust the dockerhub pull limit
flag.StringVar(&c.dockerfilesPattern, "dockerfiles-pattern", "Dockerfile_test*", "The pattern to match dockerfiles with")
flag.Parse() flag.Parse()
if len(c.serviceAccount) > 0 { if len(c.serviceAccount) > 0 {
@ -886,6 +907,23 @@ func initIntegrationTestConfig() *integrationTestConfig {
if !strings.HasSuffix(c.imageRepo, "/") { if !strings.HasSuffix(c.imageRepo, "/") {
c.imageRepo = c.imageRepo + "/" c.imageRepo = c.imageRepo + "/"
} }
if c.gcsBucket != "" {
var opts []option.ClientOption
if gcsEndpoint != "" {
opts = append(opts, option.WithEndpoint(gcsEndpoint))
}
if disableGcsAuth {
opts = append(opts, option.WithoutAuthentication())
}
gcsClient, err := bucket.NewClient(context.Background(), opts...)
if err != nil {
log.Fatalf("Could not create a new Google Storage Client: %s", err)
}
c.gcsClient = gcsClient
}
c.dockerMajorVersion = getDockerMajorVersion() c.dockerMajorVersion = getDockerMajorVersion()
c.onbuildBaseImage = c.imageRepo + "onbuild-base:latest" c.onbuildBaseImage = c.imageRepo + "onbuild-base:latest"
c.hardlinkBaseImage = c.imageRepo + "hardlink-base:latest" c.hardlinkBaseImage = c.imageRepo + "hardlink-base:latest"
@ -893,7 +931,7 @@ func initIntegrationTestConfig() *integrationTestConfig {
} }
func meetsRequirements() bool { func meetsRequirements() bool {
requiredTools := []string{"container-diff", "gsutil"} requiredTools := []string{"container-diff"}
hasRequirements := true hasRequirements := true
for _, tool := range requiredTools { for _, tool := range requiredTools {
_, err := exec.LookPath(tool) _, err := exec.LookPath(tool)

58
integration/tar.go Normal file
View File

@ -0,0 +1,58 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"compress/gzip"
"fmt"
"io/ioutil"
"log"
"os"
"time"
"github.com/GoogleContainerTools/kaniko/pkg/util"
)
// CreateIntegrationTarball will take the contents of the integration directory and write
// them to a tarball in a temmporary dir. It will return the path to the tarball.
func CreateIntegrationTarball() (string, error) {
log.Println("Creating tarball of integration test files to use as build context")
dir, err := os.Getwd()
if err != nil {
return "nil", fmt.Errorf("Failed find path to integration dir: %w", err)
}
tempDir, err := ioutil.TempDir("", "")
if err != nil {
return "", fmt.Errorf("Failed to create temporary directory to hold tarball: %w", err)
}
contextFilePath := fmt.Sprintf("%s/context_%d.tar.gz", tempDir, time.Now().UnixNano())
file, err := os.OpenFile(contextFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return "", err
}
gzipWriter := gzip.NewWriter(file)
defer gzipWriter.Close()
err = util.CreateTarballOfDirectory(dir, file)
if err != nil {
return "", fmt.Errorf("creating tarball of integration dir: %w", err)
}
return contextFilePath, nil
}

View File

@ -17,15 +17,15 @@ limitations under the License.
package buildcontext package buildcontext
import ( import (
"fmt"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"cloud.google.com/go/storage"
kConfig "github.com/GoogleContainerTools/kaniko/pkg/config" kConfig "github.com/GoogleContainerTools/kaniko/pkg/config"
"github.com/GoogleContainerTools/kaniko/pkg/constants" "github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/pkg/util/bucket"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -36,27 +36,24 @@ type GCS struct {
} }
func (g *GCS) UnpackTarFromBuildContext() (string, error) { func (g *GCS) UnpackTarFromBuildContext() (string, error) {
bucket, item := util.GetBucketAndItem(g.context) bucketName, filepath, err := bucket.GetNameAndFilepathFromURI(g.context)
return kConfig.BuildContextDir, unpackTarFromGCSBucket(bucket, item, kConfig.BuildContextDir) if err != nil {
return "", fmt.Errorf("getting bucketname and filepath from context: %w", err)
}
return kConfig.BuildContextDir, unpackTarFromGCSBucket(bucketName, filepath, kConfig.BuildContextDir)
} }
func UploadToBucket(r io.Reader, dest string) error { func UploadToBucket(r io.Reader, dest string) error {
ctx := context.Background() ctx := context.Background()
context := strings.SplitAfter(dest, "://")[1] bucketName, filepath, err := bucket.GetNameAndFilepathFromURI(dest)
bucketName, item := util.GetBucketAndItem(context) if err != nil {
client, err := storage.NewClient(ctx) return fmt.Errorf("getting bucketname and filepath from dest: %w", err)
}
client, err := bucket.NewClient(ctx)
if err != nil { if err != nil {
return err return err
} }
bucket := client.Bucket(bucketName) return bucket.Upload(ctx, bucketName, filepath, r, client)
w := bucket.Object(item).NewWriter(ctx)
if _, err := io.Copy(w, r); err != nil {
return err
}
if err := w.Close(); err != nil {
return err
}
return nil
} }
// unpackTarFromGCSBucket unpacks the context.tar.gz file in the given bucket to the given directory // unpackTarFromGCSBucket unpacks the context.tar.gz file in the given bucket to the given directory
@ -77,15 +74,14 @@ func unpackTarFromGCSBucket(bucketName, item, directory string) error {
// getTarFromBucket gets context.tar.gz from the GCS bucket and saves it to the filesystem // getTarFromBucket gets context.tar.gz from the GCS bucket and saves it to the filesystem
// It returns the path to the tar file // It returns the path to the tar file
func getTarFromBucket(bucketName, item, directory string) (string, error) { func getTarFromBucket(bucketName, filepathInBucket, directory string) (string, error) {
ctx := context.Background() ctx := context.Background()
client, err := storage.NewClient(ctx) client, err := bucket.NewClient(ctx)
if err != nil { if err != nil {
return "", err return "", err
} }
bucket := client.Bucket(bucketName)
// Get the tarfile context.tar.gz from the GCS bucket, and save it to a tar object // Get the tarfile context.tar.gz from the GCS bucket, and save it to a tar object
reader, err := bucket.Object(item).NewReader(ctx) reader, err := bucket.ReadCloser(ctx, bucketName, filepathInBucket, client)
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package buildcontext package buildcontext
import ( import (
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
@ -24,6 +25,7 @@ import (
kConfig "github.com/GoogleContainerTools/kaniko/pkg/config" kConfig "github.com/GoogleContainerTools/kaniko/pkg/config"
"github.com/GoogleContainerTools/kaniko/pkg/constants" "github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/pkg/util/bucket"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3"
@ -37,7 +39,11 @@ type S3 struct {
// UnpackTarFromBuildContext download and untar a file from s3 // UnpackTarFromBuildContext download and untar a file from s3
func (s *S3) UnpackTarFromBuildContext() (string, error) { func (s *S3) UnpackTarFromBuildContext() (string, error) {
bucket, item := util.GetBucketAndItem(s.context) bucket, item, err := bucket.GetNameAndFilepathFromURI(s.context)
if err != nil {
return "", fmt.Errorf("getting bucketname and filepath from context: %w", err)
}
option := session.Options{ option := session.Options{
SharedConfigState: session.SharedConfigEnable, SharedConfigState: session.SharedConfigEnable,
} }

View File

@ -0,0 +1,88 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bucket
import (
"context"
"fmt"
"io"
"net/url"
"strings"
"cloud.google.com/go/storage"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"google.golang.org/api/option"
)
// Upload uploads everything from Reader to the bucket under path
func Upload(ctx context.Context, bucketName string, path string, r io.Reader, client *storage.Client) error {
bucket := client.Bucket(bucketName)
w := bucket.Object(path).NewWriter(ctx)
if _, err := io.Copy(w, r); err != nil {
return err
}
if err := w.Close(); err != nil {
return err
}
return nil
}
// Delete will remove the content at path. path should be the full path
// to a file in GCS.
func Delete(ctx context.Context, bucketName string, path string, client *storage.Client) error {
err := client.Bucket(bucketName).Object(path).Delete(ctx)
if err != nil {
return fmt.Errorf("failed to delete file at %s in gcs bucket %v: %w", path, bucketName, err)
}
return err
}
// ReadCloser will create io.ReadCloser for the specified bucket and path
func ReadCloser(ctx context.Context, bucketName string, path string, client *storage.Client) (io.ReadCloser, error) {
bucket := client.Bucket(bucketName)
r, err := bucket.Object(path).NewReader(ctx)
if err != nil {
return nil, err
}
return r, nil
}
// NewClient returns a new google storage client
func NewClient(ctx context.Context, opts ...option.ClientOption) (*storage.Client, error) {
client, err := storage.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
return client, err
}
// GetNameAndFilepathFromURI returns the bucketname and the path to the item inside.
// Will error if provided URI is not a valid URL.
// If the filepath is empty, returns the contextTar filename
func GetNameAndFilepathFromURI(bucketURI string) (bucketName string, path string, err error) {
url, err := url.Parse(bucketURI)
if err != nil {
return "", "", err
}
bucketName = url.Host
// remove leading slash
filePath := strings.TrimPrefix(url.Path, "/")
if filePath == "" {
filePath = constants.ContextTar
}
return bucketName, filePath, nil
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package util package bucket
import ( import (
"testing" "testing"
@ -29,28 +29,29 @@ func Test_GetBucketAndItem(t *testing.T) {
context string context string
expectedBucket string expectedBucket string
expectedItem string expectedItem string
expectedErr bool
}{ }{
{ {
name: "three slashes", name: "three slashes",
context: "test1/test2/test3", context: "gs://test1/test2/test3",
expectedBucket: "test1", expectedBucket: "test1",
expectedItem: "test2/test3", expectedItem: "test2/test3",
}, },
{ {
name: "two slashes", name: "two slashes",
context: "test1/test2", context: "gs://test1/test2",
expectedBucket: "test1", expectedBucket: "test1",
expectedItem: "test2", expectedItem: "test2",
}, },
{ {
name: "one slash", name: "one slash",
context: "test1/", context: "gs://test1/",
expectedBucket: "test1", expectedBucket: "test1",
expectedItem: constants.ContextTar, expectedItem: constants.ContextTar,
}, },
{ {
name: "zero slash", name: "zero slash",
context: "test1", context: "gs://test1",
expectedBucket: "test1", expectedBucket: "test1",
expectedItem: constants.ContextTar, expectedItem: constants.ContextTar,
}, },
@ -58,7 +59,8 @@ func Test_GetBucketAndItem(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
gotBucket, gotItem := GetBucketAndItem(test.context) gotBucket, gotItem, err := GetNameAndFilepathFromURI(test.context)
testutil.CheckError(t, test.expectedErr, err)
testutil.CheckDeepEqual(t, test.expectedBucket, gotBucket) testutil.CheckDeepEqual(t, test.expectedBucket, gotBucket)
testutil.CheckDeepEqual(t, test.expectedItem, gotItem) testutil.CheckDeepEqual(t, test.expectedItem, gotItem)
}) })

View File

@ -1,31 +0,0 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
)
func GetBucketAndItem(context string) (string, string) {
split := strings.SplitN(context, "/", 2)
if len(split) == 2 && split[1] != "" {
return split[0], split[1]
}
return split[0], constants.ContextTar
}

View File

@ -22,6 +22,7 @@ import (
"compress/gzip" "compress/gzip"
"fmt" "fmt"
"io" "io"
"io/fs"
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
@ -50,6 +51,26 @@ func NewTar(f io.Writer) Tar {
} }
} }
func CreateTarballOfDirectory(pathToDir string, f io.Writer) error {
if !filepath.IsAbs(pathToDir) {
return errors.New("pathToDir is not absolute")
}
tarWriter := NewTar(f)
defer tarWriter.Close()
walkFn := func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !filepath.IsAbs(path) {
return fmt.Errorf("path %v is not absolute, cant read file", path)
}
return tarWriter.AddFileToTar(path)
}
return filepath.WalkDir(pathToDir, walkFn)
}
// Close will close any open streams used by Tar. // Close will close any open streams used by Tar.
func (t *Tar) Close() { func (t *Tar) Close() {
t.w.Close() t.w.Close()

View File

@ -20,6 +20,7 @@ import (
"archive/tar" "archive/tar"
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
@ -129,3 +130,46 @@ func createTar(testdir string, writer io.Writer) error {
} }
return nil return nil
} }
func Test_CreateTarballOfDirectory(t *testing.T) {
tmpDir := t.TempDir()
wantErr := false
createFilesInTempDir(t, tmpDir)
f := &bytes.Buffer{}
err := CreateTarballOfDirectory(tmpDir, f)
testutil.CheckError(t, wantErr, err)
extracedFilesDir := filepath.Join(tmpDir, "extracted")
err = os.Mkdir(extracedFilesDir, 0755)
if err != nil {
t.Error(err)
return
}
files, err := UnTar(f, extracedFilesDir)
testutil.CheckError(t, wantErr, err)
for _, filePath := range files {
fileInfo, err := os.Lstat(filePath)
testutil.CheckError(t, wantErr, err)
if fileInfo.IsDir() {
// skip directory
continue
}
file, err := os.Open(filePath)
testutil.CheckError(t, wantErr, err)
body, err := io.ReadAll(file)
testutil.CheckError(t, wantErr, err)
index := filepath.Base(filePath)
testutil.CheckDeepEqual(t, string(body), fmt.Sprintf("hello from %s\n", index))
}
}
func createFilesInTempDir(t *testing.T, tmpDir string) {
for i := 0; i < 2; i++ {
fName := filepath.Join(tmpDir, fmt.Sprint(i))
content := fmt.Sprintf("hello from %d\n", i)
if err := os.WriteFile(fName, []byte(content), 0666); err != nil {
t.Error(err)
return
}
}
}

View File

@ -13,9 +13,19 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
set -ex set -e
function start_local_registry {
docker start registry || docker run --name registry -d -p 5000:5000 registry:2
}
# TODO: to get this working, we need a way to override the gcs endpoint of kaniko at runtime
# If this is done, integration test main includes flags --gcs-endpoint and --disable-gcs-auth
# to mock the gcs endpoints and upload files to the fake-gcs-server
function start_fake_gcs_server {
docker start fake-gcs-server || docker run -d -p 4443:4443 --name fake-gcs-server fsouza/fake-gcs-server -scheme http
}
GCS_BUCKET="${GCS_BUCKET:-gs://kaniko-test-bucket}"
IMAGE_REPO="${IMAGE_REPO:-gcr.io/kaniko-test}" IMAGE_REPO="${IMAGE_REPO:-gcr.io/kaniko-test}"
docker version docker version
@ -23,4 +33,26 @@ docker version
echo "Running integration tests..." echo "Running integration tests..."
make out/executor make out/executor
make out/warmer make out/warmer
go test ./integration/... --bucket "${GCS_BUCKET}" --repo "${IMAGE_REPO}" --timeout 50m "$@"
FLAGS=(
"--timeout=50m"
)
if [[ -n $DOCKERFILE_PATTERN ]]; then
FLAGS+=("--dockerfiles-pattern=$DOCKERFILE_PATTERN")
fi
if [[ -n $LOCAL ]]; then
echo "running in local mode, mocking registry and gcs bucket..."
start_local_registry
IMAGE_REPO="localhost:5000/kaniko-test"
GCS_BUCKET=""
fi
FLAGS+=(
"--bucket=${GCS_BUCKET}"
"--repo=${IMAGE_REPO}"
)
go test ./integration/... "${FLAGS[@]}" "$@"