add another snapshotter
This commit is contained in:
parent
ec3ca84ad9
commit
bea020f34f
|
|
@ -115,16 +115,27 @@ var RootCmd = &cobra.Command{
|
|||
benchmarkFile := os.Getenv("BENCHMARK_FILE")
|
||||
// false is a keyword for integration tests to turn off benchmarking
|
||||
if benchmarkFile != "" && benchmarkFile != "false" {
|
||||
f, err := os.Create(benchmarkFile)
|
||||
if err != nil {
|
||||
logrus.Warnf("Unable to create benchmarking file %s: %s", benchmarkFile, err)
|
||||
}
|
||||
defer f.Close()
|
||||
s, err := timing.JSON()
|
||||
if err != nil {
|
||||
logrus.Warnf("Unable to write benchmark file: %s", err)
|
||||
return
|
||||
}
|
||||
if strings.HasPrefix(benchmarkFile, "gs://") {
|
||||
logrus.Info("uploading to gcs")
|
||||
if err := buildcontext.UploadToBucket(strings.NewReader(s), benchmarkFile); err != nil {
|
||||
logrus.Infof("Unable to upload %s due to %v", benchmarkFile, err)
|
||||
}
|
||||
logrus.Infof("benchmark file written at %s", benchmarkFile)
|
||||
} else {
|
||||
f, err := os.Create(benchmarkFile)
|
||||
if err != nil {
|
||||
logrus.Warnf("Unable to create benchmarking file %s: %s", benchmarkFile, err)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
f.WriteString(s)
|
||||
logrus.Infof("benchmark file written at %s", benchmarkFile)
|
||||
}
|
||||
f.WriteString(s)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,12 @@
|
|||
steps:
|
||||
- name: 'gcr.io/kaniko-project/executor:perf-latest'
|
||||
args:
|
||||
- --build-arg=NUM=${_COUNT}
|
||||
- --no-push
|
||||
- --snapshotMode=redo
|
||||
env:
|
||||
- 'BENCHMARK_FILE=gs://tejal-test/redo_gcb/benchmark_file_${_COUNT}'
|
||||
timeout: 2400s
|
||||
timeout: 2400s
|
||||
substitutions:
|
||||
_COUNT: "10000" # default value
|
||||
|
|
@ -17,7 +17,8 @@
|
|||
mkdir /workdir
|
||||
|
||||
i=1
|
||||
while [ $i -le $1 ]
|
||||
targetCnt=$(( $1 + 0 ))
|
||||
while [ $i -le $targetCnt ]
|
||||
do
|
||||
cat context.txt > /workdir/somefile$i
|
||||
i=$(( $i + 1 ))
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
|
@ -32,6 +33,7 @@ type result struct {
|
|||
totalBuildTime float64
|
||||
resolvingFiles float64
|
||||
walkingFiles float64
|
||||
hashingFiles float64
|
||||
}
|
||||
|
||||
func TestSnapshotBenchmark(t *testing.T) {
|
||||
|
|
@ -44,7 +46,7 @@ func TestSnapshotBenchmark(t *testing.T) {
|
|||
}
|
||||
contextDir := filepath.Join(cwd, "benchmark_fs")
|
||||
|
||||
nums := []int{10000, 50000, 100000, 200000, 300000, 500000, 700000, 800000}
|
||||
nums := []int{10000, 50000, 100000, 200000, 300000, 500000, 700000}
|
||||
|
||||
var timeMap sync.Map
|
||||
var wg sync.WaitGroup
|
||||
|
|
@ -53,7 +55,7 @@ func TestSnapshotBenchmark(t *testing.T) {
|
|||
wg.Add(1)
|
||||
var err error
|
||||
go func(num int, err *error) {
|
||||
dockerfile := "Dockerfile_fs_benchmark"
|
||||
dockerfile := "Dockerfile"
|
||||
kanikoImage := fmt.Sprintf("%s_%d", GetKanikoImage(config.imageRepo, dockerfile), num)
|
||||
buildArgs := []string{"--build-arg", fmt.Sprintf("NUM=%d", num)}
|
||||
var benchmarkDir string
|
||||
|
|
@ -106,5 +108,71 @@ func newResult(t *testing.T, f string) result {
|
|||
if c, ok := current["Total Build Time"]; ok {
|
||||
r.totalBuildTime = c.Seconds()
|
||||
}
|
||||
if c, ok := current["Hashing files"]; ok {
|
||||
r.hashingFiles = c.Seconds()
|
||||
}
|
||||
fmt.Println(r)
|
||||
return r
|
||||
}
|
||||
|
||||
func TestSnapshotBenchmarkGcloud(t *testing.T) {
|
||||
if b, err := strconv.ParseBool(os.Getenv("BENCHMARK")); err != nil || !b {
|
||||
t.SkipNow()
|
||||
}
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
contextDir := filepath.Join(cwd, "benchmark_fs")
|
||||
|
||||
nums := []int{10000, 50000, 100000, 200000, 300000, 500000, 700000}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
fmt.Println("Number of Files,Total Build Time,Walking Filesystem, Resolving Files")
|
||||
for _, num := range nums {
|
||||
t.Run(fmt.Sprintf("test_benchmark_%d", num), func(t *testing.T) {
|
||||
wg.Add(1)
|
||||
var err error
|
||||
go func(num int, err error) {
|
||||
dir, err := runInGcloud(contextDir, num)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r := newResult(t, filepath.Join(dir, "results"))
|
||||
fmt.Println(fmt.Sprintf("%d,%f,%f,%f, %f", num, r.totalBuildTime, r.walkingFiles, r.resolvingFiles, r.hashingFiles))
|
||||
wg.Done()
|
||||
defer os.Remove(dir)
|
||||
defer os.Chdir(cwd)
|
||||
}(num, err)
|
||||
if err != nil {
|
||||
t.Errorf("could not run benchmark results for num %d due to %s", num, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func runInGcloud(dir string, num int) (string, error) {
|
||||
os.Chdir(dir)
|
||||
cmd := exec.Command("gcloud", "builds",
|
||||
"submit", "--config=cloudbuild.yaml",
|
||||
fmt.Sprintf("--substitutions=_COUNT=%d", num))
|
||||
_, err := RunCommandWithoutTest(cmd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// grab gcs and to temp dir and return
|
||||
tmpDir, err := ioutil.TempDir("", fmt.Sprintf("%d", num))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
src := fmt.Sprintf("gs://tejal-test/redo_gcb/benchmark_file_%d", num)
|
||||
dest := filepath.Join(tmpDir, "results")
|
||||
copyCommand := exec.Command("gsutil", "cp", src, dest)
|
||||
_, err = RunCommandWithoutTest(copyCommand)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to download file to GCS bucket %s: %s", src, err)
|
||||
}
|
||||
return tmpDir, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,8 +17,10 @@ limitations under the License.
|
|||
package buildcontext
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"cloud.google.com/go/storage"
|
||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||
|
|
@ -37,6 +39,25 @@ func (g *GCS) UnpackTarFromBuildContext() (string, error) {
|
|||
return constants.BuildContextDir, unpackTarFromGCSBucket(bucket, item, constants.BuildContextDir)
|
||||
}
|
||||
|
||||
func UploadToBucket(r io.Reader, dest string) error {
|
||||
ctx := context.Background()
|
||||
context := strings.SplitAfter(dest, "://")[1]
|
||||
bucketName, item := util.GetBucketAndItem(context)
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucket := client.Bucket(bucketName)
|
||||
w := bucket.Object(item).NewWriter(ctx)
|
||||
if _, err := io.Copy(w, r); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unpackTarFromGCSBucket unpacks the context.tar.gz file in the given bucket to the given directory
|
||||
func unpackTarFromGCSBucket(bucketName, item, directory string) error {
|
||||
// Get the tar from the bucket
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@ import (
|
|||
// output set.
|
||||
// * Add all ancestors of each path to the output set.
|
||||
func ResolvePaths(paths []string, wl []util.IgnoreListEntry) (pathsToAdd []string, err error) {
|
||||
logrus.Infof("Resolving %d paths", len(paths))
|
||||
logrus.Tracef("Resolving paths %s", paths)
|
||||
|
||||
fileSet := make(map[string]bool)
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ func RedoHasher() func(string) (string, error) {
|
|||
}
|
||||
h.Write([]byte(fi.Mode().String()))
|
||||
h.Write([]byte(fi.ModTime().String()))
|
||||
h.Write([]byte(strconv.FormatInt(fi.Size(), 64)))
|
||||
h.Write([]byte(strconv.FormatInt(fi.Size(), 16)))
|
||||
h.Write([]byte(strconv.FormatUint(uint64(fi.Sys().(*syscall.Stat_t).Uid), 36)))
|
||||
h.Write([]byte(","))
|
||||
h.Write([]byte(strconv.FormatUint(uint64(fi.Sys().(*syscall.Stat_t).Gid), 36)))
|
||||
|
|
@ -136,7 +136,6 @@ func RedoHasher() func(string) (string, error) {
|
|||
return hasher
|
||||
}
|
||||
|
||||
|
||||
// SHA256 returns the shasum of the contents of r
|
||||
func SHA256(r io.Reader) (string, error) {
|
||||
hasher := sha256.New()
|
||||
|
|
|
|||
Loading…
Reference in New Issue