Bump go-containerregistry to pick up ACR fix (#1898)
This commit is contained in:
parent
9ec7ab2d21
commit
ff3ee40293
12
go.mod
12
go.mod
|
|
@ -24,7 +24,7 @@ require (
|
|||
github.com/godbus/dbus/v5 v5.0.6 // indirect
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/google/go-cmp v0.5.7
|
||||
github.com/google/go-containerregistry v0.8.0
|
||||
github.com/google/go-containerregistry v0.8.1-0.20220128225446-c63684ed5f15
|
||||
github.com/google/go-github v17.0.0+incompatible
|
||||
github.com/google/slowjam v1.0.0
|
||||
github.com/karrick/godirwalk v1.16.1
|
||||
|
|
@ -36,7 +36,7 @@ require (
|
|||
github.com/spf13/afero v1.8.0
|
||||
github.com/spf13/cobra v1.3.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
golang.org/x/net v0.0.0-20220121175114-2ed6ce1e1725
|
||||
golang.org/x/net v0.0.0-20220127074510-2fabfed7e28f
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
)
|
||||
|
|
@ -111,7 +111,7 @@ require (
|
|||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/kevinburke/ssh_config v1.1.0 // indirect
|
||||
github.com/klauspost/compress v1.14.1 // indirect
|
||||
github.com/klauspost/compress v1.14.2 // indirect
|
||||
github.com/mattn/go-ieproxy v0.0.2 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
|
|
@ -122,7 +122,7 @@ require (
|
|||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect
|
||||
github.com/opencontainers/runc v1.1.0 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
|
||||
github.com/opencontainers/selinux v1.10.0 // indirect
|
||||
|
|
@ -143,8 +143,8 @@ require (
|
|||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/api v0.65.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5 // indirect
|
||||
google.golang.org/grpc v1.43.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350 // indirect
|
||||
google.golang.org/grpc v1.44.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
)
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ import (
|
|||
|
||||
func main() {
|
||||
// ...
|
||||
ecrHelper := ecr.ECRHelper{ClientFactory: api.DefaultClientFactory()}
|
||||
ecrHelper := ecr.ECRHelper{ClientFactory: api.DefaultClientFactory{}}
|
||||
img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.NewKeychainFromHelper(ecrHelper)))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
|
|||
|
|
@ -101,10 +101,8 @@ func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) {
|
|||
}
|
||||
} else {
|
||||
f, err := os.Open(filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "containers/auth.json"))
|
||||
if os.IsNotExist(err) {
|
||||
if err != nil {
|
||||
return Anonymous, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
cf, err = config.LoadFromReader(f)
|
||||
|
|
@ -156,9 +154,14 @@ func NewKeychainFromHelper(h Helper) Keychain { return wrapper{h} }
|
|||
type wrapper struct{ h Helper }
|
||||
|
||||
func (w wrapper) Resolve(r Resource) (Authenticator, error) {
|
||||
u, p, err := w.h.Get(r.String())
|
||||
u, p, err := w.h.Get(r.RegistryStr())
|
||||
if err != nil {
|
||||
return Anonymous, nil
|
||||
}
|
||||
// If the secret being stored is an identity token, the Username should be set to <token>
|
||||
// ref: https://docs.docker.com/engine/reference/commandline/login/#credential-helper-protocol
|
||||
if u == "<token>" {
|
||||
return FromConfig(AuthConfig{Username: u, IdentityToken: p}), nil
|
||||
}
|
||||
return FromConfig(AuthConfig{Username: u, Password: p}), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ type image struct {
|
|||
ref name.Reference
|
||||
opener *imageOpener
|
||||
tarballImage v1.Image
|
||||
id *v1.Hash
|
||||
|
||||
once sync.Once
|
||||
err error
|
||||
|
|
@ -95,10 +96,20 @@ func Image(ref name.Reference, options ...Option) (v1.Image, error) {
|
|||
ctx: o.ctx,
|
||||
}
|
||||
|
||||
return &image{
|
||||
img := &image{
|
||||
ref: ref,
|
||||
opener: i,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Eagerly fetch Image ID to ensure it actually exists.
|
||||
// https://github.com/google/go-containerregistry/issues/1186
|
||||
id, err := img.ConfigName()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
img.id = &id
|
||||
|
||||
return img, nil
|
||||
}
|
||||
|
||||
func (i *image) initialize() error {
|
||||
|
|
@ -133,6 +144,9 @@ func (i *image) Size() (int64, error) {
|
|||
}
|
||||
|
||||
func (i *image) ConfigName() (v1.Hash, error) {
|
||||
if i.id != nil {
|
||||
return *i.id, nil
|
||||
}
|
||||
res, _, err := i.opener.client.ImageInspectWithRaw(i.opener.ctx, i.ref.String())
|
||||
if err != nil {
|
||||
return v1.Hash{}, err
|
||||
|
|
|
|||
|
|
@ -17,15 +17,19 @@ package layout
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/match"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/stream"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
|
@ -221,34 +225,104 @@ func (l Path) WriteFile(name string, data []byte, perm os.FileMode) error {
|
|||
// WriteBlob copies a file to the blobs/ directory in the Path from the given ReadCloser at
|
||||
// blobs/{hash.Algorithm}/{hash.Hex}.
|
||||
func (l Path) WriteBlob(hash v1.Hash, r io.ReadCloser) error {
|
||||
return l.writeBlob(hash, -1, r, nil)
|
||||
}
|
||||
|
||||
func (l Path) writeBlob(hash v1.Hash, size int64, rc io.ReadCloser, renamer func() (v1.Hash, error)) error {
|
||||
if hash.Hex == "" && renamer == nil {
|
||||
panic("writeBlob called an invalid hash and no renamer")
|
||||
}
|
||||
|
||||
dir := l.path("blobs", hash.Algorithm)
|
||||
if err := os.MkdirAll(dir, os.ModePerm); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if blob already exists and is the correct size
|
||||
file := filepath.Join(dir, hash.Hex)
|
||||
if _, err := os.Stat(file); err == nil {
|
||||
// Blob already exists, that's fine.
|
||||
if s, err := os.Stat(file); err == nil && !s.IsDir() && (s.Size() == size || size == -1) {
|
||||
return nil
|
||||
}
|
||||
w, err := os.Create(file)
|
||||
|
||||
// If a renamer func was provided write to a temporary file
|
||||
open := func() (*os.File, error) { return os.Create(file) }
|
||||
if renamer != nil {
|
||||
open = func() (*os.File, error) { return ioutil.TempFile(dir, hash.Hex) }
|
||||
}
|
||||
w, err := open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if renamer != nil {
|
||||
// Delete temp file if an error is encountered before renaming
|
||||
defer func() {
|
||||
if err := os.Remove(w.Name()); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
logs.Warn.Printf("error removing temporary file after encountering an error while writing blob: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
_, err = io.Copy(w, r)
|
||||
return err
|
||||
// Write to file and exit if not renaming
|
||||
if n, err := io.Copy(w, rc); err != nil || renamer == nil {
|
||||
return err
|
||||
} else if size != -1 && n != size {
|
||||
return fmt.Errorf("expected blob size %d, but only wrote %d", size, n)
|
||||
}
|
||||
|
||||
// Always close reader before renaming, since Close computes the digest in
|
||||
// the case of streaming layers. If Close is not called explicitly, it will
|
||||
// occur in a goroutine that is not guaranteed to succeed before renamer is
|
||||
// called. When renamer is the layer's Digest method, it can return
|
||||
// ErrNotComputed.
|
||||
if err := rc.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Always close file before renaming
|
||||
if err := w.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rename file based on the final hash
|
||||
finalHash, err := renamer()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting final digest of layer: %w", err)
|
||||
}
|
||||
|
||||
renamePath := l.path("blobs", finalHash.Algorithm, finalHash.Hex)
|
||||
return os.Rename(w.Name(), renamePath)
|
||||
}
|
||||
|
||||
// TODO: A streaming version of WriteBlob so we don't have to know the hash
|
||||
// before we write it.
|
||||
|
||||
// TODO: For streaming layers we should write to a tmp file then Rename to the
|
||||
// final digest.
|
||||
// writeLayer writes the compressed layer to a blob. Unlike WriteBlob it will
|
||||
// write to a temporary file (suffixed with .tmp) within the layout until the
|
||||
// compressed reader is fully consumed and written to disk. Also unlike
|
||||
// WriteBlob, it will not skip writing and exit without error when a blob file
|
||||
// exists, but does not have the correct size. (The blob hash is not
|
||||
// considered, because it may be expensive to compute.)
|
||||
func (l Path) writeLayer(layer v1.Layer) error {
|
||||
d, err := layer.Digest()
|
||||
if err != nil {
|
||||
if errors.Is(err, stream.ErrNotComputed) {
|
||||
// Allow digest errors, since streams may not have calculated the hash
|
||||
// yet. Instead, use an empty value, which will be transformed into a
|
||||
// random file name with `ioutil.TempFile` and the final digest will be
|
||||
// calculated after writing to a temp file and before renaming to the
|
||||
// final path.
|
||||
d = v1.Hash{Algorithm: "sha256", Hex: ""}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s, err := layer.Size()
|
||||
if errors.Is(err, stream.ErrNotComputed) {
|
||||
// Allow size errors, since streams may not have calculated the size
|
||||
// yet. Instead, use zero as a sentinel value meaning that no size
|
||||
// comparison can be done and any sized blob file should be considered
|
||||
// valid and not overwritten.
|
||||
//
|
||||
// TODO: Provide an option to always overwrite blobs.
|
||||
s = -1
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -257,7 +331,10 @@ func (l Path) writeLayer(layer v1.Layer) error {
|
|||
return err
|
||||
}
|
||||
|
||||
return l.WriteBlob(d, r)
|
||||
if err := l.writeBlob(d, s, r, layer.Digest); err != nil {
|
||||
return fmt.Errorf("error writing layer: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBlob removes a file from the blobs directory in the Path
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
|
|
@ -141,14 +140,8 @@ func (i *image) compute() error {
|
|||
manifest.Config.MediaType = *i.configMediaType
|
||||
}
|
||||
|
||||
// With OCI media types, this should not be set, see discussion:
|
||||
// https://github.com/opencontainers/image-spec/pull/795
|
||||
if i.mediaType != nil {
|
||||
if strings.Contains(string(*i.mediaType), types.OCIVendorPrefix) {
|
||||
manifest.MediaType = ""
|
||||
} else if strings.Contains(string(*i.mediaType), types.DockerVendorPrefix) {
|
||||
manifest.MediaType = *i.mediaType
|
||||
}
|
||||
manifest.MediaType = *i.mediaType
|
||||
}
|
||||
|
||||
if i.annotations != nil {
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ package mutate
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
|
@ -131,14 +130,8 @@ func (i *index) compute() error {
|
|||
|
||||
manifest.Manifests = manifests
|
||||
|
||||
// With OCI media types, this should not be set, see discussion:
|
||||
// https://github.com/opencontainers/image-spec/pull/795
|
||||
if i.mediaType != nil {
|
||||
if strings.Contains(string(*i.mediaType), types.OCIVendorPrefix) {
|
||||
manifest.MediaType = ""
|
||||
} else if strings.Contains(string(*i.mediaType), types.DockerVendorPrefix) {
|
||||
manifest.MediaType = *i.mediaType
|
||||
}
|
||||
manifest.MediaType = *i.mediaType
|
||||
}
|
||||
|
||||
if i.annotations != nil {
|
||||
|
|
|
|||
|
|
@ -15,7 +15,9 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Platform represents the target os/arch for an image.
|
||||
|
|
@ -28,11 +30,59 @@ type Platform struct {
|
|||
Features []string `json:"features,omitempty"`
|
||||
}
|
||||
|
||||
func (p Platform) String() string {
|
||||
if p.OS == "" {
|
||||
return ""
|
||||
}
|
||||
var b strings.Builder
|
||||
b.WriteString(p.OS)
|
||||
if p.Architecture != "" {
|
||||
b.WriteString("/")
|
||||
b.WriteString(p.Architecture)
|
||||
}
|
||||
if p.Variant != "" {
|
||||
b.WriteString("/")
|
||||
b.WriteString(p.Variant)
|
||||
}
|
||||
if p.OSVersion != "" {
|
||||
b.WriteString(":")
|
||||
b.WriteString(p.OSVersion)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// ParsePlatform parses a string representing a Platform, if possible.
|
||||
func ParsePlatform(s string) (*Platform, error) {
|
||||
var p Platform
|
||||
parts := strings.Split(strings.TrimSpace(s), ":")
|
||||
if len(parts) == 2 {
|
||||
p.OSVersion = parts[1]
|
||||
}
|
||||
parts = strings.Split(parts[0], "/")
|
||||
if len(parts) > 0 {
|
||||
p.OS = parts[0]
|
||||
}
|
||||
if len(parts) > 1 {
|
||||
p.Architecture = parts[1]
|
||||
}
|
||||
if len(parts) > 2 {
|
||||
p.Variant = parts[2]
|
||||
}
|
||||
if len(parts) > 3 {
|
||||
return nil, fmt.Errorf("too many slashes in platform spec: %s", s)
|
||||
}
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
// Equals returns true if the given platform is semantically equivalent to this one.
|
||||
// The order of Features and OSFeatures is not important.
|
||||
func (p Platform) Equals(o Platform) bool {
|
||||
return p.OS == o.OS && p.Architecture == o.Architecture && p.Variant == o.Variant && p.OSVersion == o.OSVersion &&
|
||||
stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) && stringSliceEqualIgnoreOrder(p.Features, o.Features)
|
||||
return p.OS == o.OS &&
|
||||
p.Architecture == o.Architecture &&
|
||||
p.Variant == o.Variant &&
|
||||
p.OSVersion == o.OSVersion &&
|
||||
stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) &&
|
||||
stringSliceEqualIgnoreOrder(p.Features, o.Features)
|
||||
}
|
||||
|
||||
// stringSliceEqual compares 2 string slices and returns if their contents are identical.
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ type Backoff = retry.Backoff
|
|||
var defaultRetryPredicate retry.Predicate = func(err error) bool {
|
||||
// Various failure modes here, as we're often reading from and writing to
|
||||
// the network.
|
||||
if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.EPIPE) {
|
||||
if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF) || errors.Is(err, syscall.EPIPE) {
|
||||
logs.Warn.Printf("retrying %v", err)
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingRes
|
|||
schemes = append(schemes, "http")
|
||||
}
|
||||
|
||||
var errs []string
|
||||
var errs []error
|
||||
for _, scheme := range schemes {
|
||||
url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name())
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
|
|
@ -88,7 +88,7 @@ func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingRes
|
|||
}
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
errs = append(errs, err)
|
||||
// Potentially retry with http.
|
||||
continue
|
||||
}
|
||||
|
|
@ -125,7 +125,7 @@ func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingRes
|
|||
return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized)
|
||||
}
|
||||
}
|
||||
return nil, errors.New(strings.Join(errs, "; "))
|
||||
return nil, multierrs(errs)
|
||||
}
|
||||
|
||||
func pickFromMultipleChallenges(challenges []authchallenge.Challenge) authchallenge.Challenge {
|
||||
|
|
@ -145,3 +145,36 @@ func pickFromMultipleChallenges(challenges []authchallenge.Challenge) authchalle
|
|||
|
||||
return challenges[0]
|
||||
}
|
||||
|
||||
type multierrs []error
|
||||
|
||||
func (m multierrs) Error() string {
|
||||
var b strings.Builder
|
||||
hasWritten := false
|
||||
for _, err := range m {
|
||||
if hasWritten {
|
||||
b.WriteString("; ")
|
||||
}
|
||||
hasWritten = true
|
||||
b.WriteString(err.Error())
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (m multierrs) As(target interface{}) bool {
|
||||
for _, err := range m {
|
||||
if errors.As(err, target) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m multierrs) Is(target error) bool {
|
||||
for _, err := range m {
|
||||
if errors.Is(err, target) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -126,18 +126,36 @@ func (l *Layer) Compressed() (io.ReadCloser, error) {
|
|||
return newCompressedReader(l)
|
||||
}
|
||||
|
||||
// finalize sets the layer to consumed and computes all hash and size values.
|
||||
func (l *Layer) finalize(uncompressed, compressed hash.Hash, size int64) error {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(uncompressed.Sum(nil)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.diffID = &diffID
|
||||
|
||||
digest, err := v1.NewHash("sha256:" + hex.EncodeToString(compressed.Sum(nil)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.digest = &digest
|
||||
|
||||
l.size = size
|
||||
l.consumed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
type compressedReader struct {
|
||||
closer io.Closer // original blob's Closer.
|
||||
|
||||
h, zh hash.Hash // collects digests of compressed and uncompressed stream.
|
||||
pr io.Reader
|
||||
bw *bufio.Writer
|
||||
count *countWriter
|
||||
|
||||
l *Layer // stream.Layer to update upon Close.
|
||||
pr io.Reader
|
||||
closer func() error
|
||||
}
|
||||
|
||||
func newCompressedReader(l *Layer) (*compressedReader, error) {
|
||||
// Collect digests of compressed and uncompressed stream and size of
|
||||
// compressed stream.
|
||||
h := sha256.New()
|
||||
zh := sha256.New()
|
||||
count := &countWriter{}
|
||||
|
|
@ -158,24 +176,74 @@ func newCompressedReader(l *Layer) (*compressedReader, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
doneDigesting := make(chan struct{})
|
||||
|
||||
cr := &compressedReader{
|
||||
closer: newMultiCloser(zw, l.blob),
|
||||
pr: pr,
|
||||
bw: bw,
|
||||
h: h,
|
||||
zh: zh,
|
||||
count: count,
|
||||
l: l,
|
||||
pr: pr,
|
||||
closer: func() error {
|
||||
// Immediately close pw without error. There are three ways to get
|
||||
// here.
|
||||
//
|
||||
// 1. There was a copy error due from the underlying reader, in which
|
||||
// case the error will not be overwritten.
|
||||
// 2. Copying from the underlying reader completed successfully.
|
||||
// 3. Close has been called before the underlying reader has been
|
||||
// fully consumed. In this case pw must be closed in order to
|
||||
// keep the flush of bw from blocking indefinitely.
|
||||
//
|
||||
// NOTE: pw.Close never returns an error. The signature is only to
|
||||
// implement io.Closer.
|
||||
_ = pw.Close()
|
||||
|
||||
// Close the inner ReadCloser.
|
||||
//
|
||||
// NOTE: net/http will call close on success, so if we've already
|
||||
// closed the inner rc, it's not an error.
|
||||
if err := l.blob.Close(); err != nil && !errors.Is(err, os.ErrClosed) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finalize layer with its digest and size values.
|
||||
<-doneDigesting
|
||||
return l.finalize(h, zh, count.n)
|
||||
},
|
||||
}
|
||||
go func() {
|
||||
if _, err := io.Copy(io.MultiWriter(h, zw), l.blob); err != nil {
|
||||
// Copy blob into the gzip writer, which also hashes and counts the
|
||||
// size of the compressed output, and hasher of the raw contents.
|
||||
_, copyErr := io.Copy(io.MultiWriter(h, zw), l.blob)
|
||||
|
||||
// Close the gzip writer once copying is done. If this is done in the
|
||||
// Close method of compressedReader instead, then it can cause a panic
|
||||
// when the compressedReader is closed before the blob is fully
|
||||
// consumed and io.Copy in this goroutine is still blocking.
|
||||
closeErr := zw.Close()
|
||||
|
||||
// Check errors from writing and closing streams.
|
||||
if copyErr != nil {
|
||||
close(doneDigesting)
|
||||
pw.CloseWithError(copyErr)
|
||||
return
|
||||
}
|
||||
if closeErr != nil {
|
||||
close(doneDigesting)
|
||||
pw.CloseWithError(closeErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Flush the buffer once all writes are complete to the gzip writer.
|
||||
if err := bw.Flush(); err != nil {
|
||||
close(doneDigesting)
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
// Now close the compressed reader, to flush the gzip stream
|
||||
// and calculate digest/diffID/size. This will cause pr to
|
||||
// return EOF which will cause readers of the Compressed stream
|
||||
// to finish reading.
|
||||
|
||||
// Notify closer that digests are done being written.
|
||||
close(doneDigesting)
|
||||
|
||||
// Close the compressed reader to calculate digest/diffID/size. This
|
||||
// will cause pr to return EOF which will cause readers of the
|
||||
// Compressed stream to finish reading.
|
||||
pw.CloseWithError(cr.Close())
|
||||
}()
|
||||
|
||||
|
|
@ -184,36 +252,7 @@ func newCompressedReader(l *Layer) (*compressedReader, error) {
|
|||
|
||||
func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) }
|
||||
|
||||
func (cr *compressedReader) Close() error {
|
||||
cr.l.mu.Lock()
|
||||
defer cr.l.mu.Unlock()
|
||||
|
||||
// Close the inner ReadCloser.
|
||||
if err := cr.closer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush the buffer.
|
||||
if err := cr.bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.h.Sum(nil)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cr.l.diffID = &diffID
|
||||
|
||||
digest, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.zh.Sum(nil)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cr.l.digest = &digest
|
||||
|
||||
cr.l.size = cr.count.n
|
||||
cr.l.consumed = true
|
||||
return nil
|
||||
}
|
||||
func (cr *compressedReader) Close() error { return cr.closer() }
|
||||
|
||||
// countWriter counts bytes written to it.
|
||||
type countWriter struct{ n int64 }
|
||||
|
|
@ -222,21 +261,3 @@ func (c *countWriter) Write(p []byte) (int, error) {
|
|||
c.n += int64(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// multiCloser is a Closer that collects multiple Closers and Closes them in order.
|
||||
type multiCloser []io.Closer
|
||||
|
||||
var _ io.Closer = (multiCloser)(nil)
|
||||
|
||||
func newMultiCloser(c ...io.Closer) multiCloser { return multiCloser(c) }
|
||||
|
||||
func (m multiCloser) Close() error {
|
||||
for _, c := range m {
|
||||
// NOTE: net/http will call close on success, so if we've already
|
||||
// closed the inner rc, it's not an error.
|
||||
if err := c.Close(); err != nil && !errors.Is(err, os.ErrClosed) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import (
|
|||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
|
@ -117,8 +118,18 @@ func validateLayers(img v1.Image, opt ...Option) error {
|
|||
diffids := []v1.Hash{}
|
||||
udiffids := []v1.Hash{}
|
||||
sizes := []int64{}
|
||||
for _, layer := range layers {
|
||||
for i, layer := range layers {
|
||||
cl, err := computeLayer(layer)
|
||||
if errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
// Errored while reading tar content of layer because a header or
|
||||
// content section was not the correct length. This is most likely
|
||||
// due to an incomplete download or otherwise interrupted process.
|
||||
m, err := img.Manifest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("undersized layer[%d] content", i)
|
||||
}
|
||||
return fmt.Errorf("undersized layer[%d] content: Manifest.Layers[%d].Size=%d", i, i, m.Layers[i].Size)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
|
|
|
|||
|
|
@ -76,12 +76,11 @@ type blockDec struct {
|
|||
// Window size of the block.
|
||||
WindowSize uint64
|
||||
|
||||
history chan *history
|
||||
input chan struct{}
|
||||
result chan decodeOutput
|
||||
sequenceBuf []seq
|
||||
err error
|
||||
decWG sync.WaitGroup
|
||||
history chan *history
|
||||
input chan struct{}
|
||||
result chan decodeOutput
|
||||
err error
|
||||
decWG sync.WaitGroup
|
||||
|
||||
// Frame to use for singlethreaded decoding.
|
||||
// Should not be used by the decoder itself since parent may be another frame.
|
||||
|
|
@ -512,18 +511,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
|||
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
|
||||
in = in[3:]
|
||||
}
|
||||
// Allocate sequences
|
||||
if cap(b.sequenceBuf) < nSeqs {
|
||||
if b.lowMem {
|
||||
b.sequenceBuf = make([]seq, nSeqs)
|
||||
} else {
|
||||
// Allocate max
|
||||
b.sequenceBuf = make([]seq, nSeqs, maxSequences)
|
||||
}
|
||||
} else {
|
||||
// Reuse buffer
|
||||
b.sequenceBuf = b.sequenceBuf[:nSeqs]
|
||||
}
|
||||
|
||||
var seqs = &sequenceDecs{}
|
||||
if nSeqs > 0 {
|
||||
if len(in) < 1 {
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ package zstd
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
|
@ -15,18 +16,50 @@ const HeaderMaxSize = 14 + 3
|
|||
|
||||
// Header contains information about the first frame and block within that.
|
||||
type Header struct {
|
||||
// Window Size the window of data to keep while decoding.
|
||||
// Will only be set if HasFCS is false.
|
||||
WindowSize uint64
|
||||
// SingleSegment specifies whether the data is to be decompressed into a
|
||||
// single contiguous memory segment.
|
||||
// It implies that WindowSize is invalid and that FrameContentSize is valid.
|
||||
SingleSegment bool
|
||||
|
||||
// Frame content size.
|
||||
// Expected size of the entire frame.
|
||||
FrameContentSize uint64
|
||||
// WindowSize is the window of data to keep while decoding.
|
||||
// Will only be set if SingleSegment is false.
|
||||
WindowSize uint64
|
||||
|
||||
// Dictionary ID.
|
||||
// If 0, no dictionary.
|
||||
DictionaryID uint32
|
||||
|
||||
// HasFCS specifies whether FrameContentSize has a valid value.
|
||||
HasFCS bool
|
||||
|
||||
// FrameContentSize is the expected uncompressed size of the entire frame.
|
||||
FrameContentSize uint64
|
||||
|
||||
// Skippable will be true if the frame is meant to be skipped.
|
||||
// This implies that FirstBlock.OK is false.
|
||||
Skippable bool
|
||||
|
||||
// SkippableID is the user-specific ID for the skippable frame.
|
||||
// Valid values are between 0 to 15, inclusive.
|
||||
SkippableID int
|
||||
|
||||
// SkippableSize is the length of the user data to skip following
|
||||
// the header.
|
||||
SkippableSize uint32
|
||||
|
||||
// HeaderSize is the raw size of the frame header.
|
||||
//
|
||||
// For normal frames, it includes the size of the magic number and
|
||||
// the size of the header (per section 3.1.1.1).
|
||||
// It does not include the size for any data blocks (section 3.1.1.2) nor
|
||||
// the size for the trailing content checksum.
|
||||
//
|
||||
// For skippable frames, this counts the size of the magic number
|
||||
// along with the size of the size field of the payload.
|
||||
// It does not include the size of the skippable payload itself.
|
||||
// The total frame size is the HeaderSize plus the SkippableSize.
|
||||
HeaderSize int
|
||||
|
||||
// First block information.
|
||||
FirstBlock struct {
|
||||
// OK will be set if first block could be decoded.
|
||||
|
|
@ -51,17 +84,9 @@ type Header struct {
|
|||
CompressedSize int
|
||||
}
|
||||
|
||||
// Skippable will be true if the frame is meant to be skipped.
|
||||
// No other information will be populated.
|
||||
Skippable bool
|
||||
|
||||
// If set there is a checksum present for the block content.
|
||||
// The checksum field at the end is always 4 bytes long.
|
||||
HasCheckSum bool
|
||||
|
||||
// If this is true FrameContentSize will have a valid value
|
||||
HasFCS bool
|
||||
|
||||
SingleSegment bool
|
||||
}
|
||||
|
||||
// Decode the header from the beginning of the stream.
|
||||
|
|
@ -71,39 +96,46 @@ type Header struct {
|
|||
// If there isn't enough input, io.ErrUnexpectedEOF is returned.
|
||||
// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
|
||||
func (h *Header) Decode(in []byte) error {
|
||||
*h = Header{}
|
||||
if len(in) < 4 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
h.HeaderSize += 4
|
||||
b, in := in[:4], in[4:]
|
||||
if !bytes.Equal(b, frameMagic) {
|
||||
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
|
||||
return ErrMagicMismatch
|
||||
}
|
||||
*h = Header{Skippable: true}
|
||||
if len(in) < 4 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
h.HeaderSize += 4
|
||||
h.Skippable = true
|
||||
h.SkippableID = int(b[0] & 0xf)
|
||||
h.SkippableSize = binary.LittleEndian.Uint32(in)
|
||||
return nil
|
||||
}
|
||||
if len(in) < 1 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
// Clear output
|
||||
*h = Header{}
|
||||
fhd, in := in[0], in[1:]
|
||||
h.SingleSegment = fhd&(1<<5) != 0
|
||||
h.HasCheckSum = fhd&(1<<2) != 0
|
||||
|
||||
if fhd&(1<<3) != 0 {
|
||||
return errors.New("reserved bit set on frame header")
|
||||
}
|
||||
|
||||
// Read Window_Descriptor
|
||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
|
||||
if len(in) < 1 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
fhd, in := in[0], in[1:]
|
||||
h.HeaderSize++
|
||||
h.SingleSegment = fhd&(1<<5) != 0
|
||||
h.HasCheckSum = fhd&(1<<2) != 0
|
||||
if fhd&(1<<3) != 0 {
|
||||
return errors.New("reserved bit set on frame header")
|
||||
}
|
||||
|
||||
if !h.SingleSegment {
|
||||
if len(in) < 1 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var wd byte
|
||||
wd, in = in[0], in[1:]
|
||||
h.HeaderSize++
|
||||
windowLog := 10 + (wd >> 3)
|
||||
windowBase := uint64(1) << windowLog
|
||||
windowAdd := (windowBase / 8) * uint64(wd&0x7)
|
||||
|
|
@ -120,9 +152,7 @@ func (h *Header) Decode(in []byte) error {
|
|||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b, in = in[:size], in[size:]
|
||||
if b == nil {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
h.HeaderSize += int(size)
|
||||
switch size {
|
||||
case 1:
|
||||
h.DictionaryID = uint32(b[0])
|
||||
|
|
@ -152,9 +182,7 @@ func (h *Header) Decode(in []byte) error {
|
|||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b, in = in[:fcsSize], in[fcsSize:]
|
||||
if b == nil {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
h.HeaderSize += int(fcsSize)
|
||||
switch fcsSize {
|
||||
case 1:
|
||||
h.FrameContentSize = uint64(b[0])
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ type encoderOptions struct {
|
|||
allLitEntropy bool
|
||||
customWindow bool
|
||||
customALEntropy bool
|
||||
customBlockSize bool
|
||||
lowMem bool
|
||||
dict *dict
|
||||
}
|
||||
|
|
@ -33,7 +34,7 @@ func (o *encoderOptions) setDefault() {
|
|||
concurrent: runtime.GOMAXPROCS(0),
|
||||
crc: true,
|
||||
single: nil,
|
||||
blockSize: 1 << 16,
|
||||
blockSize: maxCompressedBlockSize,
|
||||
windowSize: 8 << 20,
|
||||
level: SpeedDefault,
|
||||
allLitEntropy: true,
|
||||
|
|
@ -106,6 +107,7 @@ func WithWindowSize(n int) EOption {
|
|||
o.customWindow = true
|
||||
if o.blockSize > o.windowSize {
|
||||
o.blockSize = o.windowSize
|
||||
o.customBlockSize = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -188,10 +190,9 @@ func EncoderLevelFromZstd(level int) EncoderLevel {
|
|||
return SpeedDefault
|
||||
case level >= 6 && level < 10:
|
||||
return SpeedBetterCompression
|
||||
case level >= 10:
|
||||
default:
|
||||
return SpeedBestCompression
|
||||
}
|
||||
return SpeedDefault
|
||||
}
|
||||
|
||||
// String provides a string representation of the compression level.
|
||||
|
|
@ -222,6 +223,9 @@ func WithEncoderLevel(l EncoderLevel) EOption {
|
|||
switch o.level {
|
||||
case SpeedFastest:
|
||||
o.windowSize = 4 << 20
|
||||
if !o.customBlockSize {
|
||||
o.blockSize = 1 << 16
|
||||
}
|
||||
case SpeedDefault:
|
||||
o.windowSize = 8 << 20
|
||||
case SpeedBetterCompression:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,13 +1,13 @@
|
|||
// +build gc,!purego
|
||||
// +build gc,!purego,!noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation.
|
||||
#define digest R1
|
||||
#define h R2 // Return value.
|
||||
#define p R3 // Input pointer.
|
||||
#define h R2 // Return value.
|
||||
#define p R3 // Input pointer.
|
||||
#define len R4
|
||||
#define nblocks R5 // len / 32.
|
||||
#define nblocks R5 // len / 32.
|
||||
#define prime1 R7
|
||||
#define prime2 R8
|
||||
#define prime3 R9
|
||||
|
|
@ -22,50 +22,48 @@
|
|||
#define x3 R22
|
||||
#define x4 R23
|
||||
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc \
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc \
|
||||
|
||||
// x = round(0, x).
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x \
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x \
|
||||
|
||||
#define mergeRound(x) \
|
||||
round0(x) \
|
||||
EOR x, h \
|
||||
MADD h, prime4, prime1, h \
|
||||
#define mergeRound(x) \
|
||||
round0(x) \
|
||||
EOR x, h \
|
||||
MADD h, prime4, prime1, h \
|
||||
|
||||
// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
|
||||
#define blocksLoop() \
|
||||
LSR $5, len, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 32(p), (x1, x2) \
|
||||
round(v1, x1) \
|
||||
LDP -16(p), (x3, x4) \
|
||||
round(v2, x2) \
|
||||
SUB $1, nblocks \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
CBNZ nblocks, loop \
|
||||
|
||||
#define blocksLoop() \
|
||||
LSR $5, len, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 32(p), (x1, x2) \
|
||||
round(v1, x1) \
|
||||
LDP -16(p), (x3, x4) \
|
||||
round(v2, x2) \
|
||||
SUB $1, nblocks \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
CBNZ nblocks, loop \
|
||||
|
||||
// The primes are repeated here to ensure that they're stored
|
||||
// in a contiguous array, so we can load them with LDP.
|
||||
DATA primes<> +0(SB)/8, $11400714785074694791
|
||||
DATA primes<> +8(SB)/8, $14029467366897019727
|
||||
DATA primes<>+16(SB)/8, $1609587929392839161
|
||||
DATA primes<>+24(SB)/8, $9650029242287828579
|
||||
DATA primes<>+32(SB)/8, $2870177450012600261
|
||||
DATA primes<> +0(SB)/8, $11400714785074694791
|
||||
DATA primes<> +8(SB)/8, $14029467366897019727
|
||||
DATA primes<>+16(SB)/8, $1609587929392839161
|
||||
DATA primes<>+24(SB)/8, $9650029242287828579
|
||||
DATA primes<>+32(SB)/8, $2870177450012600261
|
||||
GLOBL primes<>(SB), NOPTR+RODATA, $40
|
||||
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
|
||||
LDP b_base+0(FP), (p, len)
|
||||
LDP b_base+0(FP), (p, len)
|
||||
|
||||
LDP primes<> +0(SB), (prime1, prime2)
|
||||
LDP primes<>+16(SB), (prime3, prime4)
|
||||
|
|
@ -156,24 +154,23 @@ try1:
|
|||
|
||||
end:
|
||||
EOR h >> 33, h
|
||||
MUL prime2, h
|
||||
MUL prime2, h
|
||||
EOR h >> 29, h
|
||||
MUL prime3, h
|
||||
MUL prime3, h
|
||||
EOR h >> 32, h
|
||||
|
||||
MOVD h, ret+24(FP)
|
||||
RET
|
||||
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
//
|
||||
// Assumes len(b) >= 32.
|
||||
TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
|
||||
LDP primes<>(SB), (prime1, prime2)
|
||||
LDP primes<>(SB), (prime1, prime2)
|
||||
|
||||
// Load state. Assume v[1-4] are stored contiguously.
|
||||
MOVD d+0(FP), digest
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 16(digest), (v3, v4)
|
||||
|
||||
LDP b_base+8(FP), (p, len)
|
||||
|
|
@ -181,7 +178,7 @@ TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
|
|||
blocksLoop()
|
||||
|
||||
// Store updated state.
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v3, v4), 16(digest)
|
||||
|
||||
BIC $31, len
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||
//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
// +build !noasm
|
||||
|
||||
package xxhash
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||
// +build !amd64,!arm64 appengine !gc purego
|
||||
//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm
|
||||
// +build !amd64,!arm64 appengine !gc purego noasm
|
||||
|
||||
package xxhash
|
||||
|
||||
|
|
|
|||
|
|
@ -53,4 +53,10 @@ const (
|
|||
|
||||
// AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image.
|
||||
AnnotationDescription = "org.opencontainers.image.description"
|
||||
|
||||
// AnnotationBaseImageDigest is the annotation key for the digest of the image's base image.
|
||||
AnnotationBaseImageDigest = "org.opencontainers.image.base.digest"
|
||||
|
||||
// AnnotationBaseImageName is the annotation key for the image reference of the image's base image.
|
||||
AnnotationBaseImageName = "org.opencontainers.image.base.name"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -89,9 +89,20 @@ type Image struct {
|
|||
// Architecture is the CPU architecture which the binaries in this image are built to run on.
|
||||
Architecture string `json:"architecture"`
|
||||
|
||||
// Variant is the variant of the specified CPU architecture which image binaries are intended to run on.
|
||||
Variant string `json:"variant,omitempty"`
|
||||
|
||||
// OS is the name of the operating system which the image is built to run on.
|
||||
OS string `json:"os"`
|
||||
|
||||
// OSVersion is an optional field specifying the operating system
|
||||
// version, for example on Windows `10.0.14393.1066`.
|
||||
OSVersion string `json:"os.version,omitempty"`
|
||||
|
||||
// OSFeatures is an optional field specifying an array of strings,
|
||||
// each listing a required OS feature (for example on Windows `win32k`).
|
||||
OSFeatures []string `json:"os.features,omitempty"`
|
||||
|
||||
// Config defines the execution parameters which should be used as a base when running a container using the image.
|
||||
Config ImageConfig `json:"config,omitempty"`
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import "github.com/opencontainers/image-spec/specs-go"
|
|||
type Index struct {
|
||||
specs.Versioned
|
||||
|
||||
// MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json`
|
||||
// MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json`
|
||||
MediaType string `json:"mediaType,omitempty"`
|
||||
|
||||
// Manifests references platform specific manifests.
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import "github.com/opencontainers/image-spec/specs-go"
|
|||
type Manifest struct {
|
||||
specs.Versioned
|
||||
|
||||
// MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json`
|
||||
// MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json`
|
||||
MediaType string `json:"mediaType,omitempty"`
|
||||
|
||||
// Config references a configuration object for a container, by digest.
|
||||
|
|
|
|||
|
|
@ -34,6 +34,10 @@ const (
|
|||
// referenced by the manifest.
|
||||
MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip"
|
||||
|
||||
// MediaTypeImageLayerZstd is the media type used for zstd compressed
|
||||
// layers referenced by the manifest.
|
||||
MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd"
|
||||
|
||||
// MediaTypeImageLayerNonDistributable is the media type for layers referenced by
|
||||
// the manifest but with distribution restrictions.
|
||||
MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar"
|
||||
|
|
@ -43,6 +47,11 @@ const (
|
|||
// restrictions.
|
||||
MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip"
|
||||
|
||||
// MediaTypeImageLayerNonDistributableZstd is the media type for zstd
|
||||
// compressed layers referenced by the manifest but with distribution
|
||||
// restrictions.
|
||||
MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd"
|
||||
|
||||
// MediaTypeImageConfig specifies the media type for the image configuration.
|
||||
MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ const (
|
|||
VersionPatch = 2
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
VersionDev = "-dev"
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -69,7 +69,9 @@ func (a *Attributes) Value(key interface{}) interface{} {
|
|||
// bool' is implemented for a value in the attributes, it is called to
|
||||
// determine if the value matches the one stored in the other attributes. If
|
||||
// Equal is not implemented, standard equality is used to determine if the two
|
||||
// values are equal.
|
||||
// values are equal. Note that some types (e.g. maps) aren't comparable by
|
||||
// default, so they must be wrapped in a struct, or in an alias type, with Equal
|
||||
// defined.
|
||||
func (a *Attributes) Equal(o *Attributes) bool {
|
||||
if a == nil && o == nil {
|
||||
return true
|
||||
|
|
|
|||
2
vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
generated
vendored
2
vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go
generated
vendored
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.1.0
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.14.0
|
||||
// source: grpc/lb/v1/load_balancer.proto
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.1.0
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.14.0
|
||||
// source: grpc/gcp/handshaker.proto
|
||||
|
||||
|
|
|
|||
|
|
@ -18,11 +18,6 @@
|
|||
|
||||
// Package insecure provides an implementation of the
|
||||
// credentials.TransportCredentials interface which disables transport security.
|
||||
//
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
package insecure
|
||||
|
||||
import (
|
||||
|
|
|
|||
|
|
@ -272,7 +272,7 @@ func withBackoff(bs internalbackoff.Strategy) DialOption {
|
|||
})
|
||||
}
|
||||
|
||||
// WithBlock returns a DialOption which makes caller of Dial blocks until the
|
||||
// WithBlock returns a DialOption which makes callers of Dial block until the
|
||||
// underlying connection is up. Without this, Dial returns immediately and
|
||||
// connecting the server happens in background.
|
||||
func WithBlock() DialOption {
|
||||
|
|
@ -304,7 +304,7 @@ func WithReturnConnectionError() DialOption {
|
|||
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
|
||||
// security is incompatible and will cause grpc.Dial() to fail.
|
||||
//
|
||||
// Deprecated: use insecure.NewCredentials() instead.
|
||||
// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead.
|
||||
// Will be supported throughout 1.x.
|
||||
func WithInsecure() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
|
|
|
|||
|
|
@ -248,12 +248,12 @@ func (g *loggerT) V(l int) bool {
|
|||
// later release.
|
||||
type DepthLoggerV2 interface {
|
||||
LoggerV2
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,6 +42,7 @@ const (
|
|||
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
|
||||
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
|
||||
rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
|
||||
|
||||
c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
|
||||
)
|
||||
|
|
@ -85,6 +86,12 @@ var (
|
|||
// XDSFederation indicates whether federation support is enabled.
|
||||
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
|
||||
|
||||
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
||||
// support for the RLS CLuster Specifier is enabled, which can be enabled by
|
||||
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
||||
// "true".
|
||||
XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true")
|
||||
|
||||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -115,12 +115,12 @@ type LoggerV2 interface {
|
|||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type DepthLoggerV2 interface {
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,9 +20,12 @@ package grpcutil
|
|||
|
||||
import "regexp"
|
||||
|
||||
// FullMatchWithRegex returns whether the full string matches the regex provided.
|
||||
func FullMatchWithRegex(re *regexp.Regexp, string string) bool {
|
||||
// FullMatchWithRegex returns whether the full text matches the regex provided.
|
||||
func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
|
||||
if len(text) == 0 {
|
||||
return re.MatchString(text)
|
||||
}
|
||||
re.Longest()
|
||||
rem := re.FindString(string)
|
||||
return len(rem) == len(string)
|
||||
rem := re.FindString(text)
|
||||
return len(rem) == len(text)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,7 +76,21 @@ SOURCES=(
|
|||
# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
|
||||
# import path of 'bar' in the generated code when 'foo.proto' is imported in
|
||||
# one of the sources.
|
||||
OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core
|
||||
#
|
||||
# Note that the protos listed here are all for testing purposes. All protos to
|
||||
# be used externally should have a go_package option (and they don't need to be
|
||||
# listed here).
|
||||
OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\
|
||||
Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
|
||||
Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\
|
||||
Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
|
||||
|
||||
for src in ${SOURCES[@]}; do
|
||||
echo "protoc ${src}"
|
||||
|
|
@ -85,7 +99,6 @@ for src in ${SOURCES[@]}; do
|
|||
-I${WORKDIR}/grpc-proto \
|
||||
-I${WORKDIR}/googleapis \
|
||||
-I${WORKDIR}/protobuf/src \
|
||||
-I${WORKDIR}/istio \
|
||||
${src}
|
||||
done
|
||||
|
||||
|
|
@ -96,7 +109,6 @@ for src in ${LEGACY_SOURCES[@]}; do
|
|||
-I${WORKDIR}/grpc-proto \
|
||||
-I${WORKDIR}/googleapis \
|
||||
-I${WORKDIR}/protobuf/src \
|
||||
-I${WORKDIR}/istio \
|
||||
${src}
|
||||
done
|
||||
|
||||
|
|
|
|||
|
|
@ -19,4 +19,4 @@
|
|||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.43.0"
|
||||
const Version = "1.44.1-dev"
|
||||
|
|
|
|||
|
|
@ -538,7 +538,7 @@ github.com/google/go-cmp/cmp/internal/diff
|
|||
github.com/google/go-cmp/cmp/internal/flags
|
||||
github.com/google/go-cmp/cmp/internal/function
|
||||
github.com/google/go-cmp/cmp/internal/value
|
||||
# github.com/google/go-containerregistry v0.8.0
|
||||
# github.com/google/go-containerregistry v0.8.1-0.20220128225446-c63684ed5f15
|
||||
## explicit; go 1.14
|
||||
github.com/google/go-containerregistry/internal/and
|
||||
github.com/google/go-containerregistry/internal/estargz
|
||||
|
|
@ -609,7 +609,7 @@ github.com/karrick/godirwalk
|
|||
# github.com/kevinburke/ssh_config v1.1.0
|
||||
## explicit
|
||||
github.com/kevinburke/ssh_config
|
||||
# github.com/klauspost/compress v1.14.1
|
||||
# github.com/klauspost/compress v1.14.2
|
||||
## explicit; go 1.15
|
||||
github.com/klauspost/compress
|
||||
github.com/klauspost/compress/fse
|
||||
|
|
@ -659,8 +659,8 @@ github.com/morikuni/aec
|
|||
# github.com/opencontainers/go-digest v1.0.0
|
||||
## explicit; go 1.13
|
||||
github.com/opencontainers/go-digest
|
||||
# github.com/opencontainers/image-spec v1.0.2
|
||||
## explicit
|
||||
# github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198
|
||||
## explicit; go 1.11
|
||||
github.com/opencontainers/image-spec/specs-go
|
||||
github.com/opencontainers/image-spec/specs-go/v1
|
||||
# github.com/opencontainers/runc v1.1.0 => github.com/opencontainers/runc v1.0.0-rc92
|
||||
|
|
@ -769,7 +769,7 @@ golang.org/x/crypto/ssh
|
|||
golang.org/x/crypto/ssh/agent
|
||||
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
||||
golang.org/x/crypto/ssh/knownhosts
|
||||
# golang.org/x/net v0.0.0-20220121175114-2ed6ce1e1725
|
||||
# golang.org/x/net v0.0.0-20220127074510-2fabfed7e28f
|
||||
## explicit; go 1.17
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/context/ctxhttp
|
||||
|
|
@ -849,7 +849,7 @@ google.golang.org/appengine/internal/socket
|
|||
google.golang.org/appengine/internal/urlfetch
|
||||
google.golang.org/appengine/socket
|
||||
google.golang.org/appengine/urlfetch
|
||||
# google.golang.org/genproto v0.0.0-20220118154757-00ab72f36ad5
|
||||
# google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350
|
||||
## explicit; go 1.11
|
||||
google.golang.org/genproto/googleapis/api/annotations
|
||||
google.golang.org/genproto/googleapis/iam/v1
|
||||
|
|
@ -859,7 +859,7 @@ google.golang.org/genproto/googleapis/rpc/status
|
|||
google.golang.org/genproto/googleapis/storage/v2
|
||||
google.golang.org/genproto/googleapis/type/date
|
||||
google.golang.org/genproto/googleapis/type/expr
|
||||
# google.golang.org/grpc v1.43.0
|
||||
# google.golang.org/grpc v1.44.0
|
||||
## explicit; go 1.14
|
||||
google.golang.org/grpc
|
||||
google.golang.org/grpc/attributes
|
||||
|
|
|
|||
Loading…
Reference in New Issue