feat: allow injecting through stdin tar.gz on kaniko

This commit is contained in:
Jordan Goasdoué 2020-03-14 20:44:15 +01:00 committed by goasdoue
parent fda7ed4f74
commit d08a767454
7 changed files with 257 additions and 19 deletions

View File

@ -120,11 +120,18 @@ Right now, kaniko supports these storage solutions:
- S3 Bucket
- Azure Blob Storage
- Local Directory
- Local Tar
- Standard Input
- Git Repository
_Note: the local directory option refers to a directory within the kaniko container.
_Note about Local Directory: this option refers to a directory within the kaniko container.
If you wish to use this option, you will need to mount in your build context into the container as a directory._
_Note about Local Tar: this option refers to a tar gz file within the kaniko container.
If you wish to use this option, you will need to mount in your build context into the container as a file._
_Note about Standard Input: the only Standard Input allowed by kaniko is in `.tar.gz` format._
If using a GCS or S3 bucket, you will first need to create a compressed tar of your build context and upload it to your bucket.
Once running, kaniko will then download and unpack the compressed tar of the build context before starting the image build.
@ -146,6 +153,7 @@ When running kaniko, use the `--context` flag with the appropriate prefix to spe
|---------|---------|---------|
| Local Directory | dir://[path to a directory in the kaniko container] | `dir:///workspace` |
| Local Tar Gz | tar://[path to a .tar.gz in the kaniko container] | `tar://path/to/context.tar.gz` |
| Standard Input | tar://[stdin] | `tar://stdin` |
| GCS Bucket | gs://[bucket name]/[path to .tar.gz] | `gs://kaniko-bucket/path/to/context.tar.gz` |
| S3 Bucket | s3://[bucket name]/[path to .tar.gz] | `s3://kaniko-bucket/path/to/context.tar.gz` |
| Azure Blob Storage| https://[account].[azureblobhostsuffix]/[container]/[path to .tar.gz] | `https://myaccount.blob.core.windows.net/container/path/to/context.tar.gz` |
@ -160,6 +168,20 @@ If you are using Azure Blob Storage for context file, you will need to pass [Azu
### Using Private Git Repository
You can use `Personal Access Tokens` for Build Contexts from Private Repositories from [GitHub](https://blog.github.com/2012-09-21-easier-builds-and-deployments-using-git-over-https-and-oauth/).
### Using Standard Input
If running kaniko and using Standard Input build context, you will need to add the docker or kubernetes `-i, --interactive` flag.
Once running, kaniko will then get the data from `STDIN` and create the build context as a compressed tar.
It will then unpack the compressed tar of the build context before starting the image build.
If no data is piped during the interactive run, you will need to send the EOF signal by yourself by pressing `Ctrl+D`.
Complete example of how to interactively run kaniko with `.tar.gz` Standard Input data, using docker:
```shell
echo -e 'FROM alpine \nRUN echo "created from standard input"' > Dockerfile | tar -cf - Dockerfile | gzip -9 | docker run \
--interactive -v $(pwd):/workspace gcr.io/kaniko-project/executor:latest \
--context tar://stdin \
--destination=<gcr.io/$project/$image:$tag>
```
### Running kaniko
There are several different ways to deploy and run kaniko:

View File

@ -32,11 +32,11 @@ import (
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/daemon"
"github.com/pkg/errors"
"github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/pkg/errors"
)
var config *integrationTestConfig

View File

@ -0,0 +1,151 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"compress/gzip"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"sync"
"testing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/testutil"
)
func TestBuildWithStdin(t *testing.T) {
_, ex, _, _ := runtime.Caller(0)
cwd := filepath.Dir(ex)
testDir := "test_dir"
testDirLongPath := filepath.Join(cwd, testDir)
if err := os.MkdirAll(testDirLongPath, 0750); err != nil {
t.Errorf("Failed to create dir_where_to_extract: %v", err)
}
dockerfile := "Dockerfile_test_stdin"
files := map[string]string{
dockerfile: "FROM debian:9.11\nRUN echo \"hey\"",
}
if err := testutil.SetupFiles(testDir, files); err != nil {
t.Errorf("Failed to setup files %v on %s: %v", files, testDir, err)
}
if err := os.Chdir(testDir); err != nil {
t.Fatalf("Failed to Chdir on %s: %v", testDir, err)
}
tarPath := fmt.Sprintf("%s.tar.gz", dockerfile)
var wg sync.WaitGroup
wg.Add(1)
// Create Tar Gz File with dockerfile inside
go func(wg *sync.WaitGroup) {
defer wg.Done()
tarFile, err := os.Create(tarPath)
if err != nil {
t.Errorf("Failed to create %s: %v", tarPath, err)
}
defer tarFile.Close()
gw := gzip.NewWriter(tarFile)
defer gw.Close()
tw := util.NewTar(gw)
defer tw.Close()
if err := tw.AddFileToTar(dockerfile); err != nil {
t.Errorf("Failed to add %s to %s: %v", dockerfile, tarPath, err)
}
}(&wg)
// Waiting for the Tar Gz file creation to be done before moving on
wg.Wait()
// Build with docker
dockerImage := GetDockerImage(config.imageRepo, dockerfile)
dockerCmd := exec.Command("docker",
append([]string{"build",
"-t", dockerImage,
"-f", dockerfile,
"."})...)
_, err := RunCommandWithoutTest(dockerCmd)
if err != nil {
t.Fatalf("can't run %s: %v", dockerCmd.String(), err)
}
// Build with kaniko using Stdin
kanikoImageStdin := GetKanikoImage(config.imageRepo, dockerfile)
tarCmd := exec.Command("tar", "-cf", "-", dockerfile)
gzCmd := exec.Command("gzip", "-9")
dockerRunFlags := []string{"run", "--interactive", "--net=host", "-v", cwd + ":/workspace"}
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
dockerRunFlags = append(dockerRunFlags,
ExecutorImage,
"-f", dockerfile,
"-c", "tar://stdin",
"-d", kanikoImageStdin)
kanikoCmdStdin := exec.Command("docker", dockerRunFlags...)
gzCmd.Stdin, err = tarCmd.StdoutPipe()
if err != nil {
t.Fatalf("can't set gzCmd stdin: %v", err)
}
kanikoCmdStdin.Stdin, err = gzCmd.StdoutPipe()
if err != nil {
t.Fatalf("can't set kanikoCmd stdin: %v", err)
}
if err := kanikoCmdStdin.Start(); err != nil {
t.Fatalf("can't start %s: %v", kanikoCmdStdin.String(), err)
}
if err := gzCmd.Start(); err != nil {
t.Fatalf("can't start %s: %v", gzCmd.String(), err)
}
if err := tarCmd.Run(); err != nil {
t.Fatalf("can't start %s: %v", tarCmd.String(), err)
}
if err := gzCmd.Wait(); err != nil {
t.Fatalf("can't wait %s: %v", gzCmd.String(), err)
}
if err := kanikoCmdStdin.Wait(); err != nil {
t.Fatalf("can't wait %s: %v", kanikoCmdStdin.String(), err)
}
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImageStdin, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImageStdin, dockerImage, kanikoImageStdin)
checkContainerDiffOutput(t, diff, expected)
if err := os.RemoveAll(testDirLongPath); err != nil {
t.Errorf("Failed to remove %s: %v", testDirLongPath, err)
}
}

View File

@ -38,25 +38,27 @@ type BuildContext interface {
// parser
func GetBuildContext(srcContext string) (BuildContext, error) {
split := strings.SplitAfter(srcContext, "://")
prefix := split[0]
context := split[1]
if len(split) > 1 {
prefix := split[0]
context := split[1]
switch prefix {
case constants.GCSBuildContextPrefix:
return &GCS{context: context}, nil
case constants.S3BuildContextPrefix:
return &S3{context: context}, nil
case constants.LocalDirBuildContextPrefix:
return &Dir{context: context}, nil
case constants.GitBuildContextPrefix:
return &Git{context: context}, nil
case constants.HTTPSBuildContextPrefix:
if util.ValidAzureBlobStorageHost(srcContext) {
return &AzureBlob{context: srcContext}, nil
switch prefix {
case constants.GCSBuildContextPrefix:
return &GCS{context: context}, nil
case constants.S3BuildContextPrefix:
return &S3{context: context}, nil
case constants.LocalDirBuildContextPrefix:
return &Dir{context: context}, nil
case constants.GitBuildContextPrefix:
return &Git{context: context}, nil
case constants.HTTPSBuildContextPrefix:
if util.ValidAzureBlobStorageHost(srcContext) {
return &AzureBlob{context: srcContext}, nil
}
return nil, errors.New("url provided for https context is not in a supported format, please use the https url for Azure Blob Storage")
case TarBuildContextPrefix:
return &Tar{context: context}, nil
}
return nil, errors.New("url provided for https context is not in a supported format, please use the https url for Azure Blob Storage")
case TarBuildContextPrefix:
return &Tar{context: context}, nil
}
return nil, errors.New("unknown build context prefix provided, please use one of the following: gs://, dir://, tar://, s3://, git://, https://")
}

View File

@ -17,11 +17,15 @@ limitations under the License.
package buildcontext
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Tar unifies calls to download and unpack the build context.
@ -35,6 +39,23 @@ func (t *Tar) UnpackTarFromBuildContext() (string, error) {
if err := os.MkdirAll(directory, 0750); err != nil {
return "", errors.Wrap(err, "unpacking tar from build context")
}
if t.context == "stdin" {
fi, _ := os.Stdin.Stat()
if (fi.Mode() & os.ModeCharDevice) != 0 {
return "", fmt.Errorf("no data found.. don't forget to add the '--interactive, -i' flag")
}
logrus.Infof("To simulate EOF and exit, press 'Ctrl+D'")
// if launched through docker in interactive mode and without piped data
// process will be stuck here until EOF is sent
data, err := util.GetInputFrom(os.Stdin)
if err != nil {
return "", errors.Wrap(err, "fail to get standard input")
}
t.context = filepath.Join(directory, constants.ContextTar)
if err := ioutil.WriteFile(t.context, data, 0644); err != nil {
return "", errors.Wrap(err, "fail to redirect standard input into compressed tar file")
}
}
return directory, util.UnpackCompressedTar(t.context, directory)
}

View File

@ -21,6 +21,7 @@ import (
"crypto/sha256"
"encoding/hex"
"io"
"io/ioutil"
"os"
"runtime"
"strconv"
@ -134,3 +135,12 @@ func currentPlatform() v1.Platform {
Architecture: runtime.GOARCH,
}
}
// GetInputFrom returns Reader content
func GetInputFrom(r io.Reader) ([]byte, error) {
output, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
return output, nil
}

32
pkg/util/util_test.go Normal file
View File

@ -0,0 +1,32 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bufio"
"bytes"
"testing"
"github.com/GoogleContainerTools/kaniko/testutil"
)
func TestGetInputFrom(t *testing.T) {
validInput := []byte("Valid\n")
validReader := bufio.NewReader(bytes.NewReader((validInput)))
validValue, err := GetInputFrom(validReader)
testutil.CheckErrorAndDeepEqual(t, false, err, validInput, validValue)
}