Renamed to `--oci-layout-path` and added a unit test.
This commit is contained in:
		
							parent
							
								
									7949d0de1d
								
							
						
					
					
						commit
						11f3b791cd
					
				|  | @ -43,8 +43,8 @@ _If you are interested in contributing to kaniko, see [DEVELOPMENT.md](DEVELOPME | |||
|     - [--digest-file](#--digest-file) | ||||
|     - [--insecure](#--insecure) | ||||
|     - [--insecure-pull](#--insecure-pull) | ||||
|     - [--layout-path](#--layout-path) | ||||
|     - [--no-push](#--no-push) | ||||
|     - [--oci-layout-path](#--oci-layout-path) | ||||
|     - [--reproducible](#--reproducible) | ||||
|     - [--single-snapshot](#--single-snapshot) | ||||
|     - [--snapshotMode](#--snapshotmode) | ||||
|  | @ -375,7 +375,7 @@ will write the digest to that file, which is picked up by | |||
| Kubernetes automatically as the `{{.state.terminated.message}}` | ||||
| of the container. | ||||
| 
 | ||||
| #### --layout-path | ||||
| #### --oci-layout-path | ||||
| 
 | ||||
| Set this flag to specify a directory in the container where the OCI image | ||||
| layout of a built image will be placed. This can be used to automatically | ||||
|  | @ -385,6 +385,9 @@ For example, to surface the image digest built in a | |||
| [Tekton task](https://github.com/tektoncd/pipeline/blob/v0.6.0/docs/resources.md#surfacing-the-image-digest-built-in-a-task), | ||||
| this flag should be set to match the image resource `outputImageDir`. | ||||
| 
 | ||||
| _Note: Depending on the built image, the media type of the image manifest might be either | ||||
| `application/vnd.oci.image.manifest.v1+json` or `application/vnd.docker.distribution.manifest.v2+json``._ | ||||
| 
 | ||||
| #### --insecure-registry | ||||
| 
 | ||||
| Set this flag to use plain HTTP requests when accessing a registry. It is supposed to be used for testing purposes only and should not be used in production! | ||||
|  |  | |||
|  | @ -129,7 +129,7 @@ func addKanikoOptionsFlags(cmd *cobra.Command) { | |||
| 	RootCmd.PersistentFlags().StringVarP(&opts.CacheRepo, "cache-repo", "", "", "Specify a repository to use as a cache, otherwise one will be inferred from the destination provided") | ||||
| 	RootCmd.PersistentFlags().StringVarP(&opts.CacheDir, "cache-dir", "", "/cache", "Specify a local directory to use as a cache.") | ||||
| 	RootCmd.PersistentFlags().StringVarP(&opts.DigestFile, "digest-file", "", "", "Specify a file to save the digest of the built image to.") | ||||
| 	RootCmd.PersistentFlags().StringVarP(&opts.LayoutPath, "layout-path", "", "", "Path to save the OCI image spec of the built image.") | ||||
| 	RootCmd.PersistentFlags().StringVarP(&opts.OCILayoutPath, "oci-layout-path", "", "", "Path to save the OCI image layout of the built image.") | ||||
| 	RootCmd.PersistentFlags().BoolVarP(&opts.Cache, "cache", "", false, "Use cache when building image") | ||||
| 	RootCmd.PersistentFlags().BoolVarP(&opts.Cleanup, "cleanup", "", false, "Clean the filesystem at the end") | ||||
| 	RootCmd.PersistentFlags().DurationVarP(&opts.CacheTTL, "cache-ttl", "", time.Hour*336, "Cache timeout in hours. Defaults to two weeks.") | ||||
|  |  | |||
|  | @ -37,7 +37,7 @@ type KanikoOptions struct { | |||
| 	Target                  string | ||||
| 	CacheRepo               string | ||||
| 	DigestFile              string | ||||
| 	LayoutPath              string | ||||
| 	OCILayoutPath           string | ||||
| 	Destinations            multiArg | ||||
| 	BuildArgs               multiArg | ||||
| 	Insecure                bool | ||||
|  |  | |||
|  | @ -102,8 +102,8 @@ func DoPush(image v1.Image, opts *config.KanikoOptions) error { | |||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if opts.LayoutPath != "" { | ||||
| 		path, err := layout.Write(opts.LayoutPath, empty.Index) | ||||
| 	if opts.OCILayoutPath != "" { | ||||
| 		path, err := layout.Write(opts.OCILayoutPath, empty.Index) | ||||
| 		if err != nil { | ||||
| 			return errors.Wrap(err, "writing empty layout") | ||||
| 		} | ||||
|  |  | |||
|  | @ -23,7 +23,11 @@ import ( | |||
| 	"os" | ||||
| 	"testing" | ||||
| 
 | ||||
| 	"github.com/GoogleContainerTools/kaniko/pkg/config" | ||||
| 	"github.com/GoogleContainerTools/kaniko/testutil" | ||||
| 	"github.com/google/go-containerregistry/pkg/v1/layout" | ||||
| 	"github.com/google/go-containerregistry/pkg/v1/random" | ||||
| 	"github.com/google/go-containerregistry/pkg/v1/validate" | ||||
| ) | ||||
| 
 | ||||
| func TestHeaderAdded(t *testing.T) { | ||||
|  | @ -69,3 +73,39 @@ func (m *mockRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { | |||
| 	ua := r.UserAgent() | ||||
| 	return &http.Response{Body: ioutil.NopCloser(bytes.NewBufferString(ua))}, nil | ||||
| } | ||||
| 
 | ||||
| func Test_OCILayoutPath(t *testing.T) { | ||||
| 	tmpDir, err := ioutil.TempDir("", "") | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("could not create temp dir: %s", err) | ||||
| 	} | ||||
| 	defer os.RemoveAll(tmpDir) | ||||
| 
 | ||||
| 	image, err := random.Image(1024, 4) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("could not create image: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	digest, err := image.Digest() | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("could not get image digest: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	opts := config.KanikoOptions{ | ||||
| 		NoPush:        true, | ||||
| 		OCILayoutPath: tmpDir, | ||||
| 	} | ||||
| 
 | ||||
| 	if err := DoPush(image, &opts); err != nil { | ||||
| 		t.Fatalf("could not push image: %s", err) | ||||
| 	} | ||||
| 
 | ||||
| 	index, err := layout.ImageIndexFromPath(tmpDir) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("could not get index from layout: %s", err) | ||||
| 	} | ||||
| 	testutil.CheckError(t, false, validate.Index(index)) | ||||
| 
 | ||||
| 	got, err := index.Image(digest) | ||||
| 	testutil.CheckErrorAndDeepEqual(t, false, err, image, got) | ||||
| } | ||||
|  |  | |||
							
								
								
									
										16
									
								
								vendor/github.com/google/go-containerregistry/pkg/v1/validate/doc.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										16
									
								
								vendor/github.com/google/go-containerregistry/pkg/v1/validate/doc.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,16 @@ | |||
| // Copyright 2018 Google LLC All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //    http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| // Package validate provides methods for validating image correctness.
 | ||||
| package validate | ||||
							
								
								
									
										297
									
								
								vendor/github.com/google/go-containerregistry/pkg/v1/validate/image.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										297
									
								
								vendor/github.com/google/go-containerregistry/pkg/v1/validate/image.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,297 @@ | |||
| // Copyright 2018 Google LLC All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //    http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| package validate | ||||
| 
 | ||||
| import ( | ||||
| 	"archive/tar" | ||||
| 	"bytes" | ||||
| 	"compress/gzip" | ||||
| 	"crypto/sha256" | ||||
| 	"encoding/hex" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"github.com/google/go-cmp/cmp" | ||||
| 	v1 "github.com/google/go-containerregistry/pkg/v1" | ||||
| ) | ||||
| 
 | ||||
| // Image validates that img does not violate any invariants of the image format.
 | ||||
| func Image(img v1.Image) error { | ||||
| 	errs := []string{} | ||||
| 	if err := validateLayers(img); err != nil { | ||||
| 		errs = append(errs, fmt.Sprintf("validating layers: %v", err)) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := validateConfig(img); err != nil { | ||||
| 		errs = append(errs, fmt.Sprintf("validating config: %v", err)) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := validateManifest(img); err != nil { | ||||
| 		errs = append(errs, fmt.Sprintf("validating manifest: %v", err)) | ||||
| 	} | ||||
| 
 | ||||
| 	if len(errs) != 0 { | ||||
| 		return errors.New(strings.Join(errs, "\n\n")) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func validateConfig(img v1.Image) error { | ||||
| 	cn, err := img.ConfigName() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	rc, err := img.RawConfigFile() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	hash, size, err := v1.SHA256(bytes.NewReader(rc)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	m, err := img.Manifest() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	cf, err := img.ConfigFile() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	pcf, err := v1.ParseConfigFile(bytes.NewReader(rc)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	errs := []string{} | ||||
| 	if cn != hash { | ||||
| 		errs = append(errs, fmt.Sprintf("mismatched config digest: ConfigName()=%s, SHA256(RawConfigFile())=%s", cn, hash)) | ||||
| 	} | ||||
| 
 | ||||
| 	if want, got := m.Config.Size, size; want != got { | ||||
| 		errs = append(errs, fmt.Sprintf("mismatched config size: Manifest.Config.Size()=%d, len(RawConfigFile())=%d", want, got)) | ||||
| 	} | ||||
| 
 | ||||
| 	if diff := cmp.Diff(pcf, cf); diff != "" { | ||||
| 		errs = append(errs, fmt.Sprintf("mismatched config content: (-ParseConfigFile(RawConfigFile()) +ConfigFile()) %s", diff)) | ||||
| 	} | ||||
| 
 | ||||
| 	if cf.RootFS.Type != "layers" { | ||||
| 		errs = append(errs, fmt.Sprintf("invalid ConfigFile.RootFS.Type: %q != %q", cf.RootFS.Type, "layers")) | ||||
| 	} | ||||
| 
 | ||||
| 	if len(errs) != 0 { | ||||
| 		return errors.New(strings.Join(errs, "\n")) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func validateLayers(img v1.Image) error { | ||||
| 	layers, err := img.Layers() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	digests := []v1.Hash{} | ||||
| 	diffids := []v1.Hash{} | ||||
| 	sizes := []int64{} | ||||
| 	for _, layer := range layers { | ||||
| 		// TODO: Test layer.Uncompressed.
 | ||||
| 		compressed, err := layer.Compressed() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		// Keep track of compressed digest.
 | ||||
| 		digester := sha256.New() | ||||
| 		// Everything read from compressed is written to digester to compute digest.
 | ||||
| 		hashCompressed := io.TeeReader(compressed, digester) | ||||
| 
 | ||||
| 		// Call io.Copy to write from the layer Reader through to the tarReader on
 | ||||
| 		// the other side of the pipe.
 | ||||
| 		pr, pw := io.Pipe() | ||||
| 		var size int64 | ||||
| 		go func() { | ||||
| 			n, err := io.Copy(pw, hashCompressed) | ||||
| 			if err != nil { | ||||
| 				pw.CloseWithError(err) | ||||
| 				return | ||||
| 			} | ||||
| 			size = n | ||||
| 
 | ||||
| 			// Now close the compressed reader, to flush the gzip stream
 | ||||
| 			// and calculate digest/diffID/size. This will cause pr to
 | ||||
| 			// return EOF which will cause readers of the Compressed stream
 | ||||
| 			// to finish reading.
 | ||||
| 			pw.CloseWithError(compressed.Close()) | ||||
| 		}() | ||||
| 
 | ||||
| 		// Read the bytes through gzip.Reader to compute the DiffID.
 | ||||
| 		uncompressed, err := gzip.NewReader(pr) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		diffider := sha256.New() | ||||
| 		hashUncompressed := io.TeeReader(uncompressed, diffider) | ||||
| 
 | ||||
| 		// Ensure there aren't duplicate file paths.
 | ||||
| 		tarReader := tar.NewReader(hashUncompressed) | ||||
| 		files := make(map[string]struct{}) | ||||
| 		for { | ||||
| 			hdr, err := tarReader.Next() | ||||
| 			if err == io.EOF { | ||||
| 				break | ||||
| 			} | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			if _, ok := files[hdr.Name]; ok { | ||||
| 				return fmt.Errorf("duplicate file path: %s", hdr.Name) | ||||
| 			} | ||||
| 			files[hdr.Name] = struct{}{} | ||||
| 		} | ||||
| 
 | ||||
| 		// Discard any trailing padding that the tar.Reader doesn't consume.
 | ||||
| 		if _, err := io.Copy(ioutil.Discard, hashUncompressed); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		if err := uncompressed.Close(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		digest := v1.Hash{ | ||||
| 			Algorithm: "sha256", | ||||
| 			Hex:       hex.EncodeToString(digester.Sum(make([]byte, 0, digester.Size()))), | ||||
| 		} | ||||
| 
 | ||||
| 		diffid := v1.Hash{ | ||||
| 			Algorithm: "sha256", | ||||
| 			Hex:       hex.EncodeToString(diffider.Sum(make([]byte, 0, diffider.Size()))), | ||||
| 		} | ||||
| 
 | ||||
| 		// Compute all of these first before we call Config() and Manifest() to allow
 | ||||
| 		// for lazy access e.g. for stream.Layer.
 | ||||
| 		digests = append(digests, digest) | ||||
| 		diffids = append(diffids, diffid) | ||||
| 		sizes = append(sizes, size) | ||||
| 	} | ||||
| 
 | ||||
| 	cf, err := img.ConfigFile() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	m, err := img.Manifest() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	errs := []string{} | ||||
| 	for i, layer := range layers { | ||||
| 		digest, err := layer.Digest() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		diffid, err := layer.DiffID() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		size, err := layer.Size() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		if digest != digests[i] { | ||||
| 			errs = append(errs, fmt.Sprintf("mismatched layer[%d] digest: Digest()=%s, SHA256(Compressed())=%s", i, digest, digests[i])) | ||||
| 		} | ||||
| 
 | ||||
| 		if m.Layers[i].Digest != digests[i] { | ||||
| 			errs = append(errs, fmt.Sprintf("mismatched layer[%d] digest: Manifest.Layers[%d].Digest=%s, SHA256(Compressed())=%s", i, i, m.Layers[i].Digest, digests[i])) | ||||
| 		} | ||||
| 
 | ||||
| 		if diffid != diffids[i] { | ||||
| 			errs = append(errs, fmt.Sprintf("mismatched layer[%d] diffid: DiffID()=%s, SHA256(Gunzip(Compressed()))=%s", i, diffid, diffids[i])) | ||||
| 		} | ||||
| 
 | ||||
| 		if cf.RootFS.DiffIDs[i] != diffids[i] { | ||||
| 			errs = append(errs, fmt.Sprintf("mismatched layer[%d] diffid: ConfigFile.RootFS.DiffIDs[%d]=%s, SHA256(Gunzip(Compressed()))=%s", i, i, cf.RootFS.DiffIDs[i], diffids[i])) | ||||
| 		} | ||||
| 
 | ||||
| 		if size != sizes[i] { | ||||
| 			errs = append(errs, fmt.Sprintf("mismatched layer[%d] size: Size()=%d, len(Compressed())=%d", i, size, sizes[i])) | ||||
| 		} | ||||
| 
 | ||||
| 		if m.Layers[i].Size != sizes[i] { | ||||
| 			errs = append(errs, fmt.Sprintf("mismatched layer[%d] size: Manifest.Layers[%d].Size=%d, len(Compressed())=%d", i, i, m.Layers[i].Size, sizes[i])) | ||||
| 		} | ||||
| 
 | ||||
| 	} | ||||
| 	if len(errs) != 0 { | ||||
| 		return errors.New(strings.Join(errs, "\n")) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func validateManifest(img v1.Image) error { | ||||
| 	digest, err := img.Digest() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	rm, err := img.RawManifest() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	hash, _, err := v1.SHA256(bytes.NewReader(rm)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	m, err := img.Manifest() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	pm, err := v1.ParseManifest(bytes.NewReader(rm)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	errs := []string{} | ||||
| 	if digest != hash { | ||||
| 		errs = append(errs, fmt.Sprintf("mismatched manifest digest: Digest()=%s, SHA256(RawManifest())=%s", digest, hash)) | ||||
| 	} | ||||
| 
 | ||||
| 	if diff := cmp.Diff(pm, m); diff != "" { | ||||
| 		errs = append(errs, fmt.Sprintf("mismatched manifest content: (-ParseManifest(RawManifest()) +Manifest()) %s", diff)) | ||||
| 	} | ||||
| 
 | ||||
| 	if len(errs) != 0 { | ||||
| 		return errors.New(strings.Join(errs, "\n")) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										123
									
								
								vendor/github.com/google/go-containerregistry/pkg/v1/validate/index.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							
							
						
						
									
										123
									
								
								vendor/github.com/google/go-containerregistry/pkg/v1/validate/index.go
								
								
									generated
								
								
									vendored
								
								
									Normal file
								
							|  | @ -0,0 +1,123 @@ | |||
| // Copyright 2018 Google LLC All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //    http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| package validate | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"github.com/google/go-cmp/cmp" | ||||
| 	v1 "github.com/google/go-containerregistry/pkg/v1" | ||||
| 	"github.com/google/go-containerregistry/pkg/v1/types" | ||||
| ) | ||||
| 
 | ||||
| // Index validates that idx does not violate any invariants of the index format.
 | ||||
| func Index(idx v1.ImageIndex) error { | ||||
| 	errs := []string{} | ||||
| 
 | ||||
| 	if err := validateChildren(idx); err != nil { | ||||
| 		errs = append(errs, fmt.Sprintf("validating children: %v", err)) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := validateIndexManifest(idx); err != nil { | ||||
| 		errs = append(errs, fmt.Sprintf("validating index manifest: %v", err)) | ||||
| 	} | ||||
| 
 | ||||
| 	if len(errs) != 0 { | ||||
| 		return errors.New(strings.Join(errs, "\n\n")) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func validateChildren(idx v1.ImageIndex) error { | ||||
| 	manifest, err := idx.IndexManifest() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	errs := []string{} | ||||
| 	for i, desc := range manifest.Manifests { | ||||
| 		switch desc.MediaType { | ||||
| 		case types.OCIImageIndex, types.DockerManifestList: | ||||
| 			idx, err := idx.ImageIndex(desc.Digest) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			if err := Index(idx); err != nil { | ||||
| 				errs = append(errs, fmt.Sprintf("failed to validate index Manifests[%d](%s): %v", i, desc.Digest, err)) | ||||
| 			} | ||||
| 		case types.OCIManifestSchema1, types.DockerManifestSchema2: | ||||
| 			img, err := idx.Image(desc.Digest) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			if err := Image(img); err != nil { | ||||
| 				errs = append(errs, fmt.Sprintf("failed to validate image Manifests[%d](%s): %v", i, desc.Digest, err)) | ||||
| 			} | ||||
| 		default: | ||||
| 			return fmt.Errorf("todo: validate index Blob()") | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if len(errs) != 0 { | ||||
| 		return errors.New(strings.Join(errs, "\n")) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func validateIndexManifest(idx v1.ImageIndex) error { | ||||
| 	digest, err := idx.Digest() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	rm, err := idx.RawManifest() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	hash, _, err := v1.SHA256(bytes.NewReader(rm)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	m, err := idx.IndexManifest() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	pm, err := v1.ParseIndexManifest(bytes.NewReader(rm)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	errs := []string{} | ||||
| 	if digest != hash { | ||||
| 		errs = append(errs, fmt.Sprintf("mismatched manifest digest: Digest()=%s, SHA256(RawManifest())=%s", digest, hash)) | ||||
| 	} | ||||
| 
 | ||||
| 	if diff := cmp.Diff(pm, m); diff != "" { | ||||
| 		errs = append(errs, fmt.Sprintf("mismatched manifest content: (-ParseIndexManifest(RawManifest()) +Manifest()) %s", diff)) | ||||
| 	} | ||||
| 
 | ||||
| 	if len(errs) != 0 { | ||||
| 		return errors.New(strings.Join(errs, "\n")) | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
		Loading…
	
		Reference in New Issue