Update go-containerregistry (#599)

* Update go-containerregistry

Update go-containerregistry since it can now handle image names of the
format repo:tag@digest.

Should fix #535.

Thanks @ViceIce for the fix!

* update go-containerregistry again
This commit is contained in:
priyawadhwa 2019-03-06 10:39:51 -08:00 committed by dlorenc
parent cc5a4d2e45
commit 969321521e
41 changed files with 1240 additions and 449 deletions

11
Gopkg.lock generated
View File

@ -430,7 +430,7 @@
version = "v0.2.0"
[[projects]]
digest = "1:f1b23f53418c1b035a5965ac2600a28b16c08643683d5213fb581ecf4e79a02a"
digest = "1:a4f41b57b6a09cf498024fd9d2872b99c32bfc1462a8f34ac625e88531d52930"
name = "github.com/google/go-containerregistry"
packages = [
"pkg/authn",
@ -444,12 +444,13 @@
"pkg/v1/random",
"pkg/v1/remote",
"pkg/v1/remote/transport",
"pkg/v1/stream",
"pkg/v1/tarball",
"pkg/v1/types",
"pkg/v1/v1util",
]
pruneopts = "NUT"
revision = "88d8d18eb1bde1fcef23c745205c738074290515"
revision = "678f6c51f585140f8d0c07f6f7e193f7a4c8e457"
[[projects]]
digest = "1:f4f203acd8b11b8747bdcd91696a01dbc95ccb9e2ca2db6abf81c3a4f5e950ce"
@ -1102,7 +1103,7 @@
version = "kubernetes-1.11.0"
[[projects]]
digest = "1:b960fc62d636ccdc3265dd1e190b7f5e7bf5f8d29bf4f02af7f1352768c58f3f"
digest = "1:2f523dd16b56091fab1f329f772c3540742920e270bf0f9b8451106b7f005a66"
name = "k8s.io/client-go"
packages = [
"discovery",
@ -1154,8 +1155,8 @@
"util/integer",
]
pruneopts = "NUT"
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
version = "kubernetes-1.11.0"
revision = "2cefa64ff137e128daeddbd1775cd775708a05bf"
version = "kubernetes-1.11.3"
[[projects]]
digest = "1:e345c95cf277bb7f650306556904df69e0904395c56959a56002d0140747eda0"

View File

@ -33,11 +33,11 @@ required = [
[[constraint]]
name = "k8s.io/client-go"
version = "kubernetes-1.11.0"
version = "kubernetes-1.11.3"
[[constraint]]
name = "github.com/google/go-containerregistry"
revision = "88d8d18eb1bde1fcef23c745205c738074290515"
revision = "678f6c51f585140f8d0c07f6f7e193f7a4c8e457"
[[override]]
name = "k8s.io/apimachinery"

View File

@ -72,7 +72,7 @@ func (h *helper) Authorization() (string, error) {
var out bytes.Buffer
cmd.Stdout = &out
err := h.r.Run(cmd)
cmdErr := h.r.Run(cmd)
// If we see this specific message, it means the domain wasn't found
// and we should fall back on anonymous auth.
@ -81,16 +81,22 @@ func (h *helper) Authorization() (string, error) {
return Anonymous.Authorization()
}
if err != nil {
return "", err
}
// Any other output should be parsed as JSON and the Username / Secret
// fields used for Basic authentication.
ho := helperOutput{}
if err := json.Unmarshal([]byte(output), &ho); err != nil {
if cmdErr != nil {
// If we failed to parse output, it won't contain Secret, so returning it
// in an error should be fine.
return "", fmt.Errorf("invoking %s: %v; output: %s", helperName, cmdErr, output)
}
return "", err
}
if cmdErr != nil {
return "", fmt.Errorf("invoking %s: %v", helperName, cmdErr)
}
b := Basic{Username: ho.Username, Password: ho.Secret}
return b.Authorization()
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// package k8schain exposes an implementation of the authn.Keychain interface
// Package k8schain exposes an implementation of the authn.Keychain interface
// based on the semantics the Kubelet follows when pulling the images for a
// Pod in Kubernetes.
package k8schain

View File

@ -17,7 +17,7 @@ package k8schain
import (
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"

View File

@ -73,14 +73,14 @@ func NewDigest(name string, strict Strictness) (Digest, error) {
base := parts[0]
digest := parts[1]
// We don't require a digest, but if we get one check it's valid,
// even when not being strict.
// If we are being strict, we want to validate the digest regardless in case
// it's empty.
if digest != "" || strict == StrictValidation {
// Always check that the digest is valid.
if err := checkDigest(digest); err != nil {
return Digest{}, err
}
tag, err := NewTag(base, strict)
if err == nil {
base = tag.Repository.Name()
}
repo, err := NewRepository(base, strict)

View File

@ -15,12 +15,14 @@
package name
import (
"net"
"net/url"
"regexp"
"strings"
)
const (
// DefaultRegistry is Docker Hub, assumed when a hostname is omitted.
DefaultRegistry = "index.docker.io"
defaultRegistryAlias = "docker.io"
)
@ -63,11 +65,29 @@ func (r Registry) Scope(string) string {
return "registry:catalog:*"
}
func (r Registry) isRFC1918() bool {
ipStr := strings.Split(r.Name(), ":")[0]
ip := net.ParseIP(ipStr)
if ip == nil {
return false
}
for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} {
_, block, _ := net.ParseCIDR(cidr)
if block.Contains(ip) {
return true
}
}
return false
}
// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined.
func (r Registry) Scheme() string {
if r.insecure {
return "http"
}
if r.isRFC1918() {
return "http"
}
if strings.HasPrefix(r.Name(), "localhost:") {
return "http"
}

View File

@ -21,27 +21,28 @@ import (
)
// ConfigFile is the configuration file that holds the metadata describing
// how to launch a container. The names of the fields are chosen to reflect
// the JSON payload of the ConfigFile as defined here: https://git.io/vrAEY
// how to launch a container. See:
// https://github.com/opencontainers/image-spec/blob/master/config.md
type ConfigFile struct {
Architecture string `json:"architecture"`
Container string `json:"container"`
Created Time `json:"created"`
DockerVersion string `json:"docker_version"`
History []History `json:"history"`
Author string `json:"author,omitempty"`
Container string `json:"container,omitempty"`
Created Time `json:"created,omitempty"`
DockerVersion string `json:"docker_version,omitempty"`
History []History `json:"history,omitempty"`
OS string `json:"os"`
RootFS RootFS `json:"rootfs"`
Config Config `json:"config"`
ContainerConfig Config `json:"container_config"`
OSVersion string `json:"osversion"`
ContainerConfig Config `json:"container_config,omitempty"`
OSVersion string `json:"osversion,omitempty"`
}
// History is one entry of a list recording how this container image was built.
type History struct {
Author string `json:"author"`
Created Time `json:"created"`
CreatedBy string `json:"created_by"`
Comment string `json:"comment"`
Author string `json:"author,omitempty"`
Created Time `json:"created,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
Comment string `json:"comment,omitempty"`
EmptyLayer bool `json:"empty_layer,omitempty"`
}

View File

@ -20,11 +20,10 @@ import (
"io"
"io/ioutil"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/docker/docker/client"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/tarball"
)
// image accesses an image from a docker daemon
@ -42,6 +41,7 @@ type imageOpener struct {
buffered bool
}
// ImageOption is a functional option for Image.
type ImageOption func(*imageOpener) error
func (i *imageOpener) Open() (v1.Image, error) {
@ -66,7 +66,7 @@ func (i *imageOpener) Open() (v1.Image, error) {
return img, nil
}
// API interface for testing.
// ImageSaver is an interface for testing.
type ImageSaver interface {
ImageSave(context.Context, []string) (io.ReadCloser, error)
}

View File

@ -14,12 +14,14 @@
package daemon
// WithBufferedOpener buffers the image.
func WithBufferedOpener() ImageOption {
return func(i *imageOpener) error {
return i.setBuffered(true)
}
}
// WithUnbufferedOpener streams the image to avoid buffering.
func WithUnbufferedOpener() ImageOption {
return func(i *imageOpener) error {
return i.setBuffered(false)

View File

@ -19,22 +19,21 @@ import (
"io"
"io/ioutil"
"github.com/pkg/errors"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/pkg/errors"
)
// API interface for testing.
// ImageLoader is an interface for testing.
type ImageLoader interface {
ImageLoad(context.Context, io.Reader, bool) (types.ImageLoadResponse, error)
ImageTag(context.Context, string, string) error
}
// This is a variable so we can override in tests.
// GetImageLoader is a variable so we can override in tests.
var GetImageLoader = func() (ImageLoader, error) {
cli, err := client.NewEnvClient()
if err != nil {
@ -44,6 +43,16 @@ var GetImageLoader = func() (ImageLoader, error) {
return cli, nil
}
// Tag adds a tag to an already existent image.
func Tag(src, dest name.Tag) error {
cli, err := GetImageLoader()
if err != nil {
return err
}
return cli.ImageTag(context.Background(), src.String(), dest.String())
}
// Write saves the image into the daemon as the given tag.
func Write(tag name.Tag, img v1.Image) (string, error) {
cli, err := GetImageLoader()

View File

@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package v1 defines structured types for OCI v1 images
//go:generate deepcopy-gen -O zz_deepcopy_generated --go-header-file $BOILER_PLATE_FILE -i .
// +k8s:deepcopy-gen=package
//go:generate deepcopy-gen -O zz_deepcopy_generated --go-header-file $BOILER_PLATE_FILE -i .
// Package v1 defines structured types for OCI v1 images
package v1

View File

@ -0,0 +1,59 @@
// Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package empty
import (
"encoding/json"
"errors"
"github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/types"
)
// Index is a singleton empty index, think: FROM scratch.
var Index = emptyIndex{}
type emptyIndex struct{}
func (i emptyIndex) MediaType() (types.MediaType, error) {
return types.OCIImageIndex, nil
}
func (i emptyIndex) Digest() (v1.Hash, error) {
return partial.Digest(i)
}
func (i emptyIndex) IndexManifest() (*v1.IndexManifest, error) {
return &v1.IndexManifest{
SchemaVersion: 2,
}, nil
}
func (i emptyIndex) RawManifest() ([]byte, error) {
im, err := i.IndexManifest()
if err != nil {
return nil, err
}
return json.Marshal(im)
}
func (i emptyIndex) Image(v1.Hash) (v1.Image, error) {
return nil, errors.New("empty index")
}
func (i emptyIndex) ImageIndex(v1.Hash) (v1.ImageIndex, error) {
return nil, errors.New("empty index")
}

View File

@ -49,7 +49,7 @@ func NewHash(s string) (Hash, error) {
}
// MarshalJSON implements json.Marshaler
func (h *Hash) MarshalJSON() ([]byte, error) {
func (h Hash) MarshalJSON() ([]byte, error) {
return json.Marshal(h.String())
}

View File

@ -24,9 +24,6 @@ type Image interface {
// The order of the list is oldest/base layer first, and most-recent/top layer last.
Layers() ([]Layer, error)
// BlobSet returns an unordered collection of all the blobs in the image.
BlobSet() (map[Hash]struct{}, error)
// MediaType of this image's manifest.
MediaType() (types.MediaType, error)

View File

@ -18,6 +18,7 @@ import (
"github.com/google/go-containerregistry/pkg/v1/types"
)
// ImageIndex defines the interface for interacting with an OCI image index.
type ImageIndex interface {
// MediaType of this image's manifest.
MediaType() (types.MediaType, error)
@ -28,6 +29,12 @@ type ImageIndex interface {
// IndexManifest returns this image index's manifest object.
IndexManifest() (*IndexManifest, error)
// RawIndexManifest returns the serialized bytes of IndexManifest().
RawIndexManifest() ([]byte, error)
// RawManifest returns the serialized bytes of IndexManifest().
RawManifest() ([]byte, error)
// Image returns a v1.Image that this ImageIndex references.
Image(Hash) (Image, error)
// ImageIndex returns a v1.ImageIndex that this ImageIndex references.
ImageIndex(Hash) (ImageIndex, error)
}

View File

@ -26,9 +26,10 @@ import (
"strings"
"time"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/stream"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
@ -58,77 +59,14 @@ func Append(base v1.Image, adds ...Addendum) (v1.Image, error) {
if len(adds) == 0 {
return base, nil
}
if err := validate(adds); err != nil {
return nil, err
}
m, err := base.Manifest()
if err != nil {
return nil, err
}
cf, err := base.ConfigFile()
if err != nil {
return nil, err
}
image := &image{
Image: base,
manifest: m.DeepCopy(),
configFile: cf.DeepCopy(),
diffIDMap: make(map[v1.Hash]v1.Layer),
digestMap: make(map[v1.Hash]v1.Layer),
}
diffIDs := image.configFile.RootFS.DiffIDs
history := image.configFile.History
for _, add := range adds {
diffID, err := add.Layer.DiffID()
if err != nil {
return nil, err
}
diffIDs = append(diffIDs, diffID)
history = append(history, add.History)
image.diffIDMap[diffID] = add.Layer
}
manifestLayers := image.manifest.Layers
for _, add := range adds {
d := v1.Descriptor{
MediaType: types.DockerLayer,
}
if d.Size, err = add.Layer.Size(); err != nil {
return nil, err
}
if d.Digest, err = add.Layer.Digest(); err != nil {
return nil, err
}
manifestLayers = append(manifestLayers, d)
image.digestMap[d.Digest] = add.Layer
}
image.configFile.RootFS.DiffIDs = diffIDs
image.configFile.History = history
image.manifest.Layers = manifestLayers
rcfg, err := image.RawConfigFile()
if err != nil {
return nil, err
}
d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg))
if err != nil {
return nil, err
}
image.manifest.Config.Digest = d
image.manifest.Config.Size = sz
return image, nil
return &image{
base: base,
adds: adds,
}, nil
}
// Config mutates the provided v1.Image to have the provided v1.Config
@ -150,22 +88,11 @@ func configFile(base v1.Image, cfg *v1.ConfigFile) (v1.Image, error) {
}
image := &image{
Image: base,
base: base,
manifest: m.DeepCopy(),
configFile: cfg,
digestMap: make(map[v1.Hash]v1.Layer),
}
rcfg, err := image.RawConfigFile()
if err != nil {
return nil, err
}
d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg))
if err != nil {
return nil, err
}
image.manifest.Config.Digest = d
image.manifest.Config.Size = sz
return image, nil
}
@ -183,16 +110,118 @@ func CreatedAt(base v1.Image, created v1.Time) (v1.Image, error) {
}
type image struct {
v1.Image
base v1.Image
adds []Addendum
computed bool
configFile *v1.ConfigFile
manifest *v1.Manifest
diffIDMap map[v1.Hash]v1.Layer
digestMap map[v1.Hash]v1.Layer
}
var _ v1.Image = (*image)(nil)
func (i *image) MediaType() (types.MediaType, error) { return i.base.MediaType() }
func (i *image) compute() error {
// Don't re-compute if already computed.
if i.computed {
return nil
}
var configFile *v1.ConfigFile
if i.configFile != nil {
configFile = i.configFile
} else {
cf, err := i.base.ConfigFile()
if err != nil {
return err
}
configFile = cf.DeepCopy()
}
diffIDs := configFile.RootFS.DiffIDs
history := configFile.History
diffIDMap := make(map[v1.Hash]v1.Layer)
digestMap := make(map[v1.Hash]v1.Layer)
for _, add := range i.adds {
diffID, err := add.Layer.DiffID()
if err != nil {
return err
}
diffIDs = append(diffIDs, diffID)
history = append(history, add.History)
diffIDMap[diffID] = add.Layer
}
m, err := i.base.Manifest()
if err != nil {
return err
}
manifest := m.DeepCopy()
manifestLayers := manifest.Layers
for _, add := range i.adds {
d := v1.Descriptor{
MediaType: types.DockerLayer,
}
var err error
if d.Size, err = add.Layer.Size(); err != nil {
return err
}
if d.Digest, err = add.Layer.Digest(); err != nil {
return err
}
manifestLayers = append(manifestLayers, d)
digestMap[d.Digest] = add.Layer
}
configFile.RootFS.DiffIDs = diffIDs
configFile.History = history
manifest.Layers = manifestLayers
rcfg, err := json.Marshal(configFile)
if err != nil {
return err
}
d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg))
if err != nil {
return err
}
manifest.Config.Digest = d
manifest.Config.Size = sz
i.configFile = configFile
i.manifest = manifest
i.diffIDMap = diffIDMap
i.digestMap = digestMap
i.computed = true
return nil
}
// Layers returns the ordered collection of filesystem layers that comprise this image.
// The order of the list is oldest/base layer first, and most-recent/top layer last.
func (i *image) Layers() ([]v1.Layer, error) {
if err := i.compute(); err == stream.ErrNotComputed {
// Image contains a streamable layer which has not yet been
// consumed. Just return the layers we have in case the caller
// is going to consume the layers.
layers, err := i.base.Layers()
if err != nil {
return nil, err
}
for _, add := range i.adds {
layers = append(layers, add.Layer)
}
return layers, nil
} else if err != nil {
return nil, err
}
diffIDs, err := partial.DiffIDs(i)
if err != nil {
return nil, err
@ -208,38 +237,51 @@ func (i *image) Layers() ([]v1.Layer, error) {
return ls, nil
}
// BlobSet returns an unordered collection of all the blobs in the image.
func (i *image) BlobSet() (map[v1.Hash]struct{}, error) {
return partial.BlobSet(i)
}
// ConfigName returns the hash of the image's config file.
func (i *image) ConfigName() (v1.Hash, error) {
if err := i.compute(); err != nil {
return v1.Hash{}, err
}
return partial.ConfigName(i)
}
// ConfigFile returns this image's config file.
func (i *image) ConfigFile() (*v1.ConfigFile, error) {
if err := i.compute(); err != nil {
return nil, err
}
return i.configFile, nil
}
// RawConfigFile returns the serialized bytes of ConfigFile()
func (i *image) RawConfigFile() ([]byte, error) {
if err := i.compute(); err != nil {
return nil, err
}
return json.Marshal(i.configFile)
}
// Digest returns the sha256 of this image's manifest.
func (i *image) Digest() (v1.Hash, error) {
if err := i.compute(); err != nil {
return v1.Hash{}, err
}
return partial.Digest(i)
}
// Manifest returns this image's Manifest object.
func (i *image) Manifest() (*v1.Manifest, error) {
if err := i.compute(); err != nil {
return nil, err
}
return i.manifest, nil
}
// RawManifest returns the serialized bytes of Manifest()
func (i *image) RawManifest() ([]byte, error) {
if err := i.compute(); err != nil {
return nil, err
}
return json.Marshal(i.manifest)
}
@ -254,7 +296,7 @@ func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) {
if layer, ok := i.digestMap[h]; ok {
return layer, nil
}
return i.Image.LayerByDigest(h)
return i.base.LayerByDigest(h)
}
// LayerByDiffID is an analog to LayerByDigest, looking up by "diff id"
@ -263,7 +305,7 @@ func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) {
if layer, ok := i.diffIDMap[h]; ok {
return layer, nil
}
return i.Image.LayerByDiffID(h)
return i.base.LayerByDiffID(h)
}
func validate(adds []Addendum) error {
@ -468,6 +510,10 @@ func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) {
}
}
if err := tarWriter.Close(); err != nil {
return nil, err
}
b := w.Bytes()
// gzip the contents, then create the layer
opener := func() (io.ReadCloser, error) {

View File

@ -17,10 +17,11 @@ package mutate
import (
"fmt"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
)
// Rebase returns a new v1.Image where the oldBase in orig is replaced by newBase.
func Rebase(orig, oldBase, newBase v1.Image) (v1.Image, error) {
// Verify that oldBase's layers are present in orig, otherwise orig is
// not based on oldBase at all.

View File

@ -17,7 +17,7 @@ package partial
import (
"io"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/v1util"
)
@ -91,11 +91,6 @@ type compressedImageExtender struct {
// Assert that our extender type completes the v1.Image interface
var _ v1.Image = (*compressedImageExtender)(nil)
// BlobSet implements v1.Image
func (i *compressedImageExtender) BlobSet() (map[v1.Hash]struct{}, error) {
return BlobSet(i)
}
// Digest implements v1.Image
func (i *compressedImageExtender) Digest() (v1.Hash, error) {
return Digest(i)
@ -125,11 +120,6 @@ func (i *compressedImageExtender) Layers() ([]v1.Layer, error) {
// LayerByDigest implements v1.Image
func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) {
if cfgName, err := i.ConfigName(); err != nil {
return nil, err
} else if cfgName == h {
return ConfigLayer(i)
}
cl, err := i.CompressedImageCore.LayerByDigest(h)
if err != nil {
return nil, err

View File

@ -19,7 +19,7 @@ import (
"io"
"sync"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
)
@ -112,11 +112,6 @@ type uncompressedImageExtender struct {
// Assert that our extender type completes the v1.Image interface
var _ v1.Image = (*uncompressedImageExtender)(nil)
// BlobSet implements v1.Image
func (i *uncompressedImageExtender) BlobSet() (map[v1.Hash]struct{}, error) {
return BlobSet(i)
}
// Digest implements v1.Image
func (i *uncompressedImageExtender) Digest() (v1.Hash, error) {
return Digest(i)
@ -220,13 +215,6 @@ func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, err
// LayerByDigest implements v1.Image
func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) {
// Support returning the ConfigFile when asked for its hash.
if cfgName, err := i.ConfigName(); err != nil {
return nil, err
} else if cfgName == h {
return ConfigLayer(i)
}
diffID, err := BlobToDiffID(i, h)
if err != nil {
return nil, err

View File

@ -19,8 +19,9 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/v1util"
)
@ -49,8 +50,6 @@ func ConfigName(i WithRawConfigFile) (v1.Hash, error) {
return h, err
}
// configLayer implements v1.Layer from the raw config bytes.
// This is so that clients (e.g. remote) can access the config as a blob.
type configLayer struct {
hash v1.Hash
content []byte
@ -68,12 +67,12 @@ func (cl *configLayer) DiffID() (v1.Hash, error) {
// Uncompressed implements v1.Layer
func (cl *configLayer) Uncompressed() (io.ReadCloser, error) {
return v1util.NopReadCloser(bytes.NewBuffer(cl.content)), nil
return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil
}
// Compressed implements v1.Layer
func (cl *configLayer) Compressed() (io.ReadCloser, error) {
return v1util.NopReadCloser(bytes.NewBuffer(cl.content)), nil
return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil
}
// Size implements v1.Layer
@ -83,6 +82,8 @@ func (cl *configLayer) Size() (int64, error) {
var _ v1.Layer = (*configLayer)(nil)
// ConfigLayer implements v1.Layer from the raw config bytes.
// This is so that clients (e.g. remote) can access the config as a blob.
func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) {
h, err := ConfigName(i)
if err != nil {
@ -190,20 +191,6 @@ func FSLayers(i WithManifest) ([]v1.Hash, error) {
return fsl, nil
}
// BlobSet is a helper for implementing v1.Image
func BlobSet(i WithManifest) (map[v1.Hash]struct{}, error) {
m, err := i.Manifest()
if err != nil {
return nil, err
}
bs := make(map[v1.Hash]struct{})
for _, l := range m.Layers {
bs[l.Digest] = struct{}{}
}
bs[m.Config.Digest] = struct{}{}
return bs, nil
}
// BlobSize is a helper for implementing v1.Image
func BlobSize(i WithManifest, h v1.Hash) (int64, error) {
m, err := i.Manifest()

View File

@ -20,12 +20,12 @@ import (
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"time"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
)
// uncompressedLayer implements partial.UncompressedLayer from raw bytes.
@ -42,7 +42,7 @@ func (ul *uncompressedLayer) DiffID() (v1.Hash, error) {
// Uncompressed implements partial.UncompressedLayer
func (ul *uncompressedLayer) Uncompressed() (io.ReadCloser, error) {
return v1util.NopReadCloser(bytes.NewBuffer(ul.content)), nil
return ioutil.NopCloser(bytes.NewBuffer(ul.content)), nil
}
var _ partial.UncompressedLayer = (*uncompressedLayer)(nil)
@ -56,12 +56,16 @@ func Image(byteSize, layers int64) (v1.Image, error) {
if err := tw.WriteHeader(&tar.Header{
Name: fmt.Sprintf("random_file_%d.txt", i),
Size: byteSize,
Typeflag: tar.TypeRegA,
}); err != nil {
return nil, err
}
if _, err := io.CopyN(tw, rand.Reader, byteSize); err != nil {
return nil, err
}
if err := tw.Close(); err != nil {
return nil, err
}
bts := b.Bytes()
h, _, err := v1.SHA256(bytes.NewReader(bts))
if err != nil {
@ -75,6 +79,9 @@ func Image(byteSize, layers int64) (v1.Image, error) {
cfg := &v1.ConfigFile{}
// Some clients check this.
cfg.RootFS.Type = "layers"
// It is ok that iteration order is random in Go, because this is the random image anyways.
for k := range layerz {
cfg.RootFS.DiffIDs = append(cfg.RootFS.DiffIDs, k)

View File

@ -0,0 +1,106 @@
// Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package random
import (
"bytes"
"encoding/json"
"fmt"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/types"
)
type randomIndex struct {
images map[v1.Hash]v1.Image
manifest *v1.IndexManifest
}
// Index returns a pseudo-randomly generated ImageIndex with count images, each
// having the given number of layers of size byteSize.
func Index(byteSize, layers, count int64) (v1.ImageIndex, error) {
manifest := v1.IndexManifest{
SchemaVersion: 2,
Manifests: []v1.Descriptor{},
}
images := make(map[v1.Hash]v1.Image)
for i := int64(0); i < count; i++ {
img, err := Image(byteSize, layers)
if err != nil {
return nil, err
}
rawManifest, err := img.RawManifest()
if err != nil {
return nil, err
}
digest, size, err := v1.SHA256(bytes.NewReader(rawManifest))
if err != nil {
return nil, err
}
mediaType, err := img.MediaType()
if err != nil {
return nil, err
}
manifest.Manifests = append(manifest.Manifests, v1.Descriptor{
Digest: digest,
Size: size,
MediaType: mediaType,
})
images[digest] = img
}
return &randomIndex{
images: images,
manifest: &manifest,
}, nil
}
func (i *randomIndex) MediaType() (types.MediaType, error) {
return types.OCIImageIndex, nil
}
func (i *randomIndex) Digest() (v1.Hash, error) {
return partial.Digest(i)
}
func (i *randomIndex) IndexManifest() (*v1.IndexManifest, error) {
return i.manifest, nil
}
func (i *randomIndex) RawManifest() ([]byte, error) {
m, err := i.IndexManifest()
if err != nil {
return nil, err
}
return json.Marshal(m)
}
func (i *randomIndex) Image(h v1.Hash) (v1.Image, error) {
if img, ok := i.images[h]; ok {
return img, nil
}
return nil, fmt.Errorf("image not found: %v", h)
}
func (i *randomIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) {
// This is a single level index (for now?).
return nil, fmt.Errorf("image not found: %v", h)
}

View File

@ -21,11 +21,12 @@ import (
"io/ioutil"
"net/http"
"net/url"
"strings"
"sync"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
"github.com/google/go-containerregistry/pkg/v1/types"
@ -34,14 +35,15 @@ import (
// remoteImage accesses an image from a remote registry
type remoteImage struct {
ref name.Reference
client *http.Client
fetcher
manifestLock sync.Mutex // Protects manifest
manifest []byte
configLock sync.Mutex // Protects config
config []byte
mediaType types.MediaType
}
// ImageOption is a functional option for Image.
type ImageOption func(*imageOpener) error
var _ partial.CompressedImageCore = (*remoteImage)(nil)
@ -59,8 +61,10 @@ func (i *imageOpener) Open() (v1.Image, error) {
return nil, err
}
ri := &remoteImage{
ref: i.ref,
client: &http.Client{Transport: tr},
fetcher: fetcher{
Ref: i.ref,
Client: &http.Client{Transport: tr},
},
}
imgCore, err := partial.CompressedToImage(ri)
if err != nil {
@ -91,58 +95,57 @@ func Image(ref name.Reference, options ...ImageOption) (v1.Image, error) {
return img.Open()
}
func (r *remoteImage) url(resource, identifier string) url.URL {
// fetcher implements methods for reading from a remote image.
type fetcher struct {
Ref name.Reference
Client *http.Client
}
// url returns a url.Url for the specified path in the context of this remote image reference.
func (f *fetcher) url(resource, identifier string) url.URL {
return url.URL{
Scheme: r.ref.Context().Registry.Scheme(),
Host: r.ref.Context().RegistryStr(),
Path: fmt.Sprintf("/v2/%s/%s/%s", r.ref.Context().RepositoryStr(), resource, identifier),
Scheme: f.Ref.Context().Registry.Scheme(),
Host: f.Ref.Context().RegistryStr(),
Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier),
}
}
func (r *remoteImage) MediaType() (types.MediaType, error) {
// TODO(jonjohnsonjr): Determine this based on response.
return types.DockerManifestSchema2, nil
}
// TODO(jonjohnsonjr): Handle manifest lists.
func (r *remoteImage) RawManifest() ([]byte, error) {
r.manifestLock.Lock()
defer r.manifestLock.Unlock()
if r.manifest != nil {
return r.manifest, nil
}
u := r.url("manifests", r.ref.Identifier())
func (f *fetcher) fetchManifest(acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) {
u := f.url("manifests", f.Ref.Identifier())
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
return nil, nil, err
}
// TODO(jonjohnsonjr): Accept OCI manifest, manifest list, and image index.
req.Header.Set("Accept", string(types.DockerManifestSchema2))
resp, err := r.client.Do(req)
accept := []string{}
for _, mt := range acceptable {
accept = append(accept, string(mt))
}
req.Header.Set("Accept", strings.Join(accept, ","))
resp, err := f.Client.Do(req)
if err != nil {
return nil, err
return nil, nil, err
}
defer resp.Body.Close()
if err := CheckError(resp, http.StatusOK); err != nil {
return nil, err
if err := transport.CheckError(resp, http.StatusOK); err != nil {
return nil, nil, err
}
manifest, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
return nil, nil, err
}
digest, _, err := v1.SHA256(bytes.NewReader(manifest))
digest, size, err := v1.SHA256(bytes.NewReader(manifest))
if err != nil {
return nil, err
return nil, nil, err
}
// Validate the digest matches what we asked for, if pulling by digest.
if dgst, ok := r.ref.(name.Digest); ok {
if dgst, ok := f.Ref.(name.Digest); ok {
if digest.String() != dgst.DigestStr() {
return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), r.ref)
return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref)
}
} else {
// Do nothing for tags; I give up.
@ -155,6 +158,42 @@ func (r *remoteImage) RawManifest() ([]byte, error) {
// https://github.com/GoogleContainerTools/kaniko/issues/298
}
// Return all this info since we have to calculate it anyway.
desc := v1.Descriptor{
Digest: digest,
Size: size,
MediaType: types.MediaType(resp.Header.Get("Content-Type")),
}
return manifest, &desc, nil
}
func (r *remoteImage) MediaType() (types.MediaType, error) {
if string(r.mediaType) != "" {
return r.mediaType, nil
}
return types.DockerManifestSchema2, nil
}
// TODO(jonjohnsonjr): Handle manifest lists.
func (r *remoteImage) RawManifest() ([]byte, error) {
r.manifestLock.Lock()
defer r.manifestLock.Unlock()
if r.manifest != nil {
return r.manifest, nil
}
// TODO(jonjohnsonjr): Accept manifest list and image index?
acceptable := []types.MediaType{
types.DockerManifestSchema2,
types.OCIManifestSchema1,
}
manifest, desc, err := r.fetchManifest(acceptable)
if err != nil {
return nil, err
}
r.mediaType = desc.MediaType
r.manifest = manifest
return r.manifest, nil
}
@ -202,12 +241,12 @@ func (rl *remoteLayer) Digest() (v1.Hash, error) {
// Compressed implements partial.CompressedLayer
func (rl *remoteLayer) Compressed() (io.ReadCloser, error) {
u := rl.ri.url("blobs", rl.digest.String())
resp, err := rl.ri.client.Get(u.String())
resp, err := rl.ri.Client.Get(u.String())
if err != nil {
return nil, err
}
if err := CheckError(resp, http.StatusOK); err != nil {
if err := transport.CheckError(resp, http.StatusOK); err != nil {
resp.Body.Close()
return nil, err
}

View File

@ -0,0 +1,139 @@
// Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"bytes"
"fmt"
"net/http"
"sync"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
"github.com/google/go-containerregistry/pkg/v1/types"
)
// remoteIndex accesses an index from a remote registry
type remoteIndex struct {
fetcher
manifestLock sync.Mutex // Protects manifest
manifest []byte
mediaType types.MediaType
}
// Index provides access to a remote index reference, applying functional options
// to the underlying imageOpener before resolving the reference into a v1.ImageIndex.
func Index(ref name.Reference, options ...ImageOption) (v1.ImageIndex, error) {
i := &imageOpener{
auth: authn.Anonymous,
transport: http.DefaultTransport,
ref: ref,
}
for _, option := range options {
if err := option(i); err != nil {
return nil, err
}
}
tr, err := transport.New(i.ref.Context().Registry, i.auth, i.transport, []string{i.ref.Scope(transport.PullScope)})
if err != nil {
return nil, err
}
return &remoteIndex{
fetcher: fetcher{
Ref: i.ref,
Client: &http.Client{Transport: tr},
},
}, nil
}
func (r *remoteIndex) MediaType() (types.MediaType, error) {
if string(r.mediaType) != "" {
return r.mediaType, nil
}
return types.DockerManifestList, nil
}
func (r *remoteIndex) Digest() (v1.Hash, error) {
return partial.Digest(r)
}
func (r *remoteIndex) RawManifest() ([]byte, error) {
r.manifestLock.Lock()
defer r.manifestLock.Unlock()
if r.manifest != nil {
return r.manifest, nil
}
acceptable := []types.MediaType{
types.DockerManifestList,
types.OCIImageIndex,
}
manifest, desc, err := r.fetchManifest(acceptable)
if err != nil {
return nil, err
}
r.mediaType = desc.MediaType
r.manifest = manifest
return r.manifest, nil
}
func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) {
b, err := r.RawManifest()
if err != nil {
return nil, err
}
return v1.ParseIndexManifest(bytes.NewReader(b))
}
func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) {
imgRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), h), name.StrictValidation)
if err != nil {
return nil, err
}
ri := &remoteImage{
fetcher: fetcher{
Ref: imgRef,
Client: r.Client,
},
}
imgCore, err := partial.CompressedToImage(ri)
if err != nil {
return imgCore, err
}
// Wrap the v1.Layers returned by this v1.Image in a hint for downstream
// remote.Write calls to facilitate cross-repo "mounting".
return &mountableImage{
Image: imgCore,
Reference: r.Ref,
}, nil
}
func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) {
idxRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), h), name.StrictValidation)
if err != nil {
return nil, err
}
return &remoteIndex{
fetcher: fetcher{
Ref: idxRef,
Client: r.Client,
},
}, nil
}

View File

@ -25,12 +25,12 @@ import (
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
)
type Tags struct {
type tags struct {
Name string `json:"name"`
Tags []string `json:"tags"`
}
// TODO(jonjohnsonjr): return []name.Tag?
// List calls /tags/list for the given repository.
func List(repo name.Repository, auth authn.Authenticator, t http.RoundTripper) ([]string, error) {
scopes := []string{repo.Scope(transport.PullScope)}
tr, err := transport.New(repo.Registry, auth, t, scopes)
@ -51,14 +51,14 @@ func List(repo name.Repository, auth authn.Authenticator, t http.RoundTripper) (
}
defer resp.Body.Close()
if err := CheckError(resp, http.StatusOK); err != nil {
if err := transport.CheckError(resp, http.StatusOK); err != nil {
return nil, err
}
tags := Tags{}
if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil {
parsed := tags{}
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
return nil, err
}
return tags.Tags, nil
return parsed.Tags, nil
}

View File

@ -16,7 +16,7 @@ package remote
import (
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
)
// MountableLayer wraps a v1.Layer in a shim that enables the layer to be

View File

@ -15,9 +15,8 @@
package transport
import (
"fmt"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
@ -40,6 +39,8 @@ type bearerTransport struct {
// See https://docs.docker.com/registry/spec/auth/token/
service string
scopes []string
// Scheme we should use, determined by ping response.
scheme string
}
var _ http.RoundTripper = (*bearerTransport)(nil)
@ -61,6 +62,8 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) {
in.Header.Set("Authorization", hdr)
}
in.Header.Set("User-Agent", transportName)
in.URL.Scheme = bt.scheme
return bt.inner.RoundTrip(in)
}
@ -103,6 +106,10 @@ func (bt *bearerTransport) refresh() error {
}
defer resp.Body.Close()
if err := CheckError(resp, http.StatusOK); err != nil {
return err
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
package transport
import (
"encoding/json"
@ -35,7 +35,7 @@ var _ error = (*Error)(nil)
func (e *Error) Error() string {
switch len(e.Errors) {
case 0:
return "<empty remote.Error response>"
return "<empty transport.Error response>"
case 1:
return e.Errors[0].String()
default:
@ -55,9 +55,13 @@ type Diagnostic struct {
Detail interface{} `json:"detail,omitempty"`
}
// String stringifies the Diagnostic
// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail]
func (d Diagnostic) String() string {
return fmt.Sprintf("%s: %q", d.Code, d.Message)
msg := fmt.Sprintf("%s: %s", d.Code, d.Message)
if d.Detail != nil {
msg = fmt.Sprintf("%s; %v", msg, d.Detail)
}
return msg
}
// ErrorCode is an enumeration of supported error codes.
@ -83,6 +87,7 @@ const (
UnsupportedErrorCode ErrorCode = "UNSUPPORTED"
)
// CheckError returns a structured error if the response status is not in codes.
func CheckError(resp *http.Response, codes ...int) error {
for _, code := range codes {
if resp.StatusCode == code {

View File

@ -36,6 +36,9 @@ type pingResp struct {
// Following the challenge there are often key/value pairs
// e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz"
parameters map[string]string
// The registry's scheme to use. Communicates whether we fell back to http.
scheme string
}
func (c challenge) Canonical() challenge {
@ -63,17 +66,32 @@ func parseChallenge(suffix string) map[string]string {
func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) {
client := http.Client{Transport: t}
url := fmt.Sprintf("%s://%s/v2/", reg.Scheme(), reg.Name())
// This first attempts to use "https" for every request, falling back to http
// if the registry matches our localhost heuristic or if it is intentionally
// set to insecure via name.NewInsecureRegistry.
schemes := []string{"https"}
if reg.Scheme() == "http" {
schemes = append(schemes, "http")
}
var connErr error
for _, scheme := range schemes {
url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name())
resp, err := client.Get(url)
if err != nil {
return nil, err
connErr = err
// Potentially retry with http.
continue
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusOK:
// If we get a 200, then no authentication is needed.
return &pingResp{challenge: anonymous}, nil
return &pingResp{
challenge: anonymous,
scheme: scheme,
}, nil
case http.StatusUnauthorized:
wac := resp.Header.Get(http.CanonicalHeaderKey("WWW-Authenticate"))
if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 {
@ -81,13 +99,17 @@ func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) {
return &pingResp{
challenge: challenge(parts[0]).Canonical(),
parameters: parseChallenge(parts[1]),
scheme: scheme,
}, nil
}
// Otherwise, just return the challenge without parameters.
return &pingResp{
challenge: challenge(wac).Canonical(),
scheme: scheme,
}, nil
default:
return nil, fmt.Errorf("unrecognized HTTP status: %v", resp.Status)
}
}
return nil, connErr
}

View File

@ -73,6 +73,7 @@ func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scope
registry: reg,
service: service,
scopes: scopes,
scheme: pr.scheme,
}
if err := bt.refresh(); err != nil {
return nil, err

View File

@ -18,16 +18,27 @@ import (
"bytes"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/url"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
"github.com/google/go-containerregistry/pkg/v1/stream"
"github.com/google/go-containerregistry/pkg/v1/types"
"golang.org/x/sync/errgroup"
)
type manifest interface {
RawManifest() ([]byte, error)
MediaType() (types.MediaType, error)
Digest() (v1.Hash, error)
}
// Write pushes the provided img to the specified image reference.
func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.RoundTripper) error {
ls, err := img.Layers()
@ -43,46 +54,72 @@ func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.Ro
w := writer{
ref: ref,
client: &http.Client{Transport: tr},
img: img,
}
bs, err := img.BlobSet()
// Upload individual layers in goroutines and collect any errors.
// If we can dedupe by the layer digest, try to do so. If the layer is
// a stream.Layer, we can't dedupe and might re-upload.
var g errgroup.Group
uploaded := map[v1.Hash]bool{}
for _, l := range ls {
l := l
if _, ok := l.(*stream.Layer); !ok {
h, err := l.Digest()
if err != nil {
return err
}
// Spin up go routines to publish each of the members of BlobSet(),
// and use an error channel to collect their results.
errCh := make(chan error)
defer close(errCh)
for h := range bs {
go func(h v1.Hash) {
errCh <- w.uploadOne(h)
}(h)
// If we can determine the layer's digest ahead of
// time, use it to dedupe uploads.
if uploaded[h] {
continue // Already uploading.
}
uploaded[h] = true
}
// Now wait for all of the blob uploads to complete.
var errors []error
for _ = range bs {
if err := <-errCh; err != nil {
errors = append(errors, err)
g.Go(func() error {
return w.uploadOne(l)
})
}
if l, err := partial.ConfigLayer(img); err == stream.ErrNotComputed {
// We can't read the ConfigLayer, because of streaming layers, since the
// config hasn't been calculated yet.
if err := g.Wait(); err != nil {
return err
}
// Now that all the layers are uploaded, upload the config file blob.
l, err := partial.ConfigLayer(img)
if err != nil {
return err
}
if err := w.uploadOne(l); err != nil {
return err
}
} else if err != nil {
// This is an actual error, not a streaming error, just return it.
return err
} else {
// We *can* read the ConfigLayer, so upload it concurrently with the layers.
g.Go(func() error {
return w.uploadOne(l)
})
// Wait for the layers + config.
if err := g.Wait(); err != nil {
return err
}
if len(errors) > 0 {
// Return the first error we encountered.
return errors[0]
}
// With all of the constituent elements uploaded, upload the manifest
// to commit the image.
return w.commitImage()
return w.commitImage(img)
}
// writer writes the elements of an image to a remote image reference.
type writer struct {
ref name.Reference
client *http.Client
img v1.Image
}
// url returns a url.Url for the specified path in the context of this remote image reference.
@ -110,11 +147,11 @@ func (w *writer) nextLocation(resp *http.Response) (string, error) {
return resp.Request.URL.ResolveReference(u).String(), nil
}
// checkExisting checks if a blob exists already in the repository by making a
// checkExistingBlob checks if a blob exists already in the repository by making a
// HEAD request to the blob store API. GCR performs an existence check on the
// initiation if "mount" is specified, even if no "from" sources are specified.
// However, this is not broadly applicable to all registries, e.g. ECR.
func (w *writer) checkExisting(h v1.Hash) (bool, error) {
func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) {
u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.ref.Context().RepositoryStr(), h.String()))
resp, err := w.client.Head(u.String())
@ -123,7 +160,31 @@ func (w *writer) checkExisting(h v1.Hash) (bool, error) {
}
defer resp.Body.Close()
if err := CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
return false, err
}
return resp.StatusCode == http.StatusOK, nil
}
// checkExistingManifest checks if a manifest exists already in the repository
// by making a HEAD request to the manifest API.
func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, error) {
u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.ref.Context().RepositoryStr(), h.String()))
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
if err != nil {
return false, err
}
req.Header.Set("Accept", string(mt))
resp, err := w.client.Do(req)
if err != nil {
return false, err
}
defer resp.Body.Close()
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
return false, err
}
@ -136,20 +197,14 @@ func (w *writer) checkExisting(h v1.Hash) (bool, error) {
// On success, the layer was either mounted (nothing more to do) or a blob
// upload was initiated and the body of that blob should be sent to the returned
// location.
func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err error) {
func (w *writer) initiateUpload(from, mount string) (location string, mounted bool, err error) {
u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.ref.Context().RepositoryStr()))
uv := url.Values{
"mount": []string{h.String()},
}
l, err := w.img.LayerByDigest(h)
if err != nil {
return "", false, err
}
if ml, ok := l.(*MountableLayer); ok {
if w.ref.Context().RegistryStr() == ml.Reference.Context().RegistryStr() {
uv["from"] = []string{ml.Reference.Context().RepositoryStr()}
uv := url.Values{}
if mount != "" {
uv["mount"] = []string{mount}
}
if from != "" {
uv["from"] = []string{from}
}
u.RawQuery = uv.Encode()
@ -160,7 +215,7 @@ func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err e
}
defer resp.Body.Close()
if err := CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil {
if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil {
return "", false, err
}
@ -181,15 +236,7 @@ func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err e
// streamBlob streams the contents of the blob to the specified location.
// On failure, this will return an error. On success, this will return the location
// header indicating how to commit the streamed blob.
func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation string, err error) {
l, err := w.img.LayerByDigest(h)
if err != nil {
return "", err
}
blob, err := l.Compressed()
if err != nil {
return "", err
}
func (w *writer) streamBlob(blob io.ReadCloser, streamLocation string) (commitLocation string, err error) {
defer blob.Close()
req, err := http.NewRequest(http.MethodPatch, streamLocation, blob)
@ -203,7 +250,7 @@ func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation st
}
defer resp.Body.Close()
if err := CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil {
if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil {
return "", err
}
@ -212,14 +259,15 @@ func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation st
return w.nextLocation(resp)
}
// commitBlob commits this blob by sending a PUT to the location returned from streaming the blob.
func (w *writer) commitBlob(h v1.Hash, location string) (err error) {
// commitBlob commits this blob by sending a PUT to the location returned from
// streaming the blob.
func (w *writer) commitBlob(location, digest string) error {
u, err := url.Parse(location)
if err != nil {
return err
}
v := u.Query()
v.Set("digest", h.String())
v.Set("digest", digest)
u.RawQuery = v.Encode()
req, err := http.NewRequest(http.MethodPut, u.String(), nil)
@ -233,12 +281,25 @@ func (w *writer) commitBlob(h v1.Hash, location string) (err error) {
}
defer resp.Body.Close()
return CheckError(resp, http.StatusCreated)
return transport.CheckError(resp, http.StatusCreated)
}
// uploadOne performs a complete upload of a single layer.
func (w *writer) uploadOne(h v1.Hash) error {
existing, err := w.checkExisting(h)
func (w *writer) uploadOne(l v1.Layer) error {
var from, mount, digest string
if _, ok := l.(*stream.Layer); !ok {
// Layer isn't streamable, we should take advantage of that to
// skip uploading if possible.
// By sending ?digest= in the request, we'll also check that
// our computed digest matches the one computed by the
// registry.
h, err := l.Digest()
if err != nil {
return err
}
digest = h.String()
existing, err := w.checkExistingBlob(h)
if err != nil {
return err
}
@ -247,33 +308,55 @@ func (w *writer) uploadOne(h v1.Hash) error {
return nil
}
location, mounted, err := w.initiateUpload(h)
mount = h.String()
}
if ml, ok := l.(*MountableLayer); ok {
if w.ref.Context().RegistryStr() == ml.Reference.Context().RegistryStr() {
from = ml.Reference.Context().RepositoryStr()
}
}
location, mounted, err := w.initiateUpload(from, mount)
if err != nil {
return err
} else if mounted {
log.Printf("mounted blob: %v", h)
h, err := l.Digest()
if err != nil {
return err
}
log.Printf("mounted blob: %s", h.String())
return nil
}
location, err = w.streamBlob(h, location)
blob, err := l.Compressed()
if err != nil {
return err
}
location, err = w.streamBlob(blob, location)
if err != nil {
return err
}
if err := w.commitBlob(h, location); err != nil {
h, err := l.Digest()
if err != nil {
return err
}
log.Printf("pushed blob %v", h)
digest = h.String()
if err := w.commitBlob(location, digest); err != nil {
return err
}
log.Printf("pushed blob: %s", digest)
return nil
}
// commitImage does a PUT of the image's manifest.
func (w *writer) commitImage() error {
raw, err := w.img.RawManifest()
func (w *writer) commitImage(man manifest) error {
raw, err := man.RawManifest()
if err != nil {
return err
}
mt, err := w.img.MediaType()
mt, err := man.MediaType()
if err != nil {
return err
}
@ -293,11 +376,11 @@ func (w *writer) commitImage() error {
}
defer resp.Body.Close()
if err := CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil {
if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil {
return err
}
digest, err := w.img.Digest()
digest, err := man.Digest()
if err != nil {
return err
}
@ -324,11 +407,68 @@ func scopesForUploadingImage(ref name.Reference, layers []v1.Layer) []string {
// Push scope should be the first element because a few registries just look at the first scope to determine access.
scopes = append(scopes, ref.Scope(transport.PushScope))
for scope, _ := range scopeSet {
for scope := range scopeSet {
scopes = append(scopes, scope)
}
return scopes
}
// TODO(mattmoor): WriteIndex
// WriteIndex pushes the provided ImageIndex to the specified image reference.
// WriteIndex will attempt to push all of the referenced manifests before
// attempting to push the ImageIndex, to retain referential integrity.
func WriteIndex(ref name.Reference, ii v1.ImageIndex, auth authn.Authenticator, t http.RoundTripper) error {
index, err := ii.IndexManifest()
if err != nil {
return err
}
scopes := []string{ref.Scope(transport.PushScope)}
tr, err := transport.New(ref.Context().Registry, auth, t, scopes)
if err != nil {
return err
}
w := writer{
ref: ref,
client: &http.Client{Transport: tr},
}
for _, desc := range index.Manifests {
ref, err := name.ParseReference(fmt.Sprintf("%s@%s", ref.Context(), desc.Digest), name.StrictValidation)
if err != nil {
return err
}
exists, err := w.checkExistingManifest(desc.Digest, desc.MediaType)
if err != nil {
return err
}
if exists {
log.Printf("existing manifest: %v", desc.Digest)
continue
}
switch desc.MediaType {
case types.OCIImageIndex, types.DockerManifestList:
ii, err := ii.ImageIndex(desc.Digest)
if err != nil {
return err
}
if err := WriteIndex(ref, ii, auth, t); err != nil {
return err
}
case types.OCIManifestSchema1, types.DockerManifestSchema2:
img, err := ii.Image(desc.Digest)
if err != nil {
return err
}
if err := Write(ref, img, auth, t); err != nil {
return err
}
}
}
// With all of the constituent elements uploaded, upload the manifest
// to commit the image.
return w.commitImage(ii)
}

View File

@ -0,0 +1,194 @@
// Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stream
import (
"compress/gzip"
"crypto/sha256"
"encoding/hex"
"errors"
"hash"
"io"
"sync"
v1 "github.com/google/go-containerregistry/pkg/v1"
)
var (
// ErrNotComputed is returned when the requested value is not yet
// computed because the stream has not been consumed yet.
ErrNotComputed = errors.New("value not computed until stream is consumed")
// ErrConsumed is returned by Compressed when the underlying stream has
// already been consumed and closed.
ErrConsumed = errors.New("stream was already consumed")
)
// Layer is a streaming implementation of v1.Layer.
type Layer struct {
blob io.ReadCloser
consumed bool
mu sync.Mutex
digest, diffID *v1.Hash
size int64
}
var _ v1.Layer = (*Layer)(nil)
// NewLayer creates a Layer from an io.ReadCloser.
func NewLayer(rc io.ReadCloser) *Layer { return &Layer{blob: rc} }
// Digest implements v1.Layer.
func (l *Layer) Digest() (v1.Hash, error) {
l.mu.Lock()
defer l.mu.Unlock()
if l.digest == nil {
return v1.Hash{}, ErrNotComputed
}
return *l.digest, nil
}
// DiffID implements v1.Layer.
func (l *Layer) DiffID() (v1.Hash, error) {
l.mu.Lock()
defer l.mu.Unlock()
if l.diffID == nil {
return v1.Hash{}, ErrNotComputed
}
return *l.diffID, nil
}
// Size implements v1.Layer.
func (l *Layer) Size() (int64, error) {
l.mu.Lock()
defer l.mu.Unlock()
if l.size == 0 {
return 0, ErrNotComputed
}
return l.size, nil
}
// Uncompressed implements v1.Layer.
func (l *Layer) Uncompressed() (io.ReadCloser, error) {
return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented")
}
// Compressed implements v1.Layer.
func (l *Layer) Compressed() (io.ReadCloser, error) {
if l.consumed {
return nil, ErrConsumed
}
return newCompressedReader(l)
}
type compressedReader struct {
closer io.Closer // original blob's Closer.
h, zh hash.Hash // collects digests of compressed and uncompressed stream.
pr io.Reader
count *countWriter
l *Layer // stream.Layer to update upon Close.
}
func newCompressedReader(l *Layer) (*compressedReader, error) {
h := sha256.New()
zh := sha256.New()
count := &countWriter{}
// gzip.Writer writes to the output stream via pipe, a hasher to
// capture compressed digest, and a countWriter to capture compressed
// size.
pr, pw := io.Pipe()
zw, err := gzip.NewWriterLevel(io.MultiWriter(pw, zh, count), gzip.BestSpeed)
if err != nil {
return nil, err
}
cr := &compressedReader{
closer: newMultiCloser(zw, l.blob),
pr: pr,
h: h,
zh: zh,
count: count,
l: l,
}
go func() {
if _, err := io.Copy(io.MultiWriter(h, zw), l.blob); err != nil {
pw.CloseWithError(err)
return
}
// Now close the compressed reader, to flush the gzip stream
// and calculate digest/diffID/size. This will cause pr to
// return EOF which will cause readers of the Compressed stream
// to finish reading.
pw.CloseWithError(cr.Close())
}()
return cr, nil
}
func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) }
func (cr *compressedReader) Close() error {
cr.l.mu.Lock()
defer cr.l.mu.Unlock()
// Close the inner ReadCloser.
if err := cr.closer.Close(); err != nil {
return err
}
diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.h.Sum(nil)))
if err != nil {
return err
}
cr.l.diffID = &diffID
digest, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.zh.Sum(nil)))
if err != nil {
return err
}
cr.l.digest = &digest
cr.l.size = cr.count.n
cr.l.consumed = true
return nil
}
// countWriter counts bytes written to it.
type countWriter struct{ n int64 }
func (c *countWriter) Write(p []byte) (int, error) {
c.n += int64(len(p))
return len(p), nil
}
// multiCloser is a Closer that collects multiple Closers and Closes them in order.
type multiCloser []io.Closer
var _ io.Closer = (multiCloser)(nil)
func newMultiCloser(c ...io.Closer) multiCloser { return multiCloser(c) }
func (m multiCloser) Close() error {
for _, c := range m {
if err := c.Close(); err != nil {
return err
}
}
return nil
}

View File

@ -26,7 +26,7 @@ import (
"sync"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
@ -54,6 +54,7 @@ type compressedImage struct {
var _ partial.UncompressedImageCore = (*uncompressedImage)(nil)
var _ partial.CompressedImageCore = (*compressedImage)(nil)
// Opener is a thunk for opening a tar file.
type Opener func() (io.ReadCloser, error)
func pathOpener(path string) Opener {
@ -62,6 +63,7 @@ func pathOpener(path string) Opener {
}
}
// ImageFromPath returns a v1.Image from a tarball located on path.
func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) {
return Image(pathOpener(path), tag)
}

View File

@ -20,7 +20,7 @@ import (
"io/ioutil"
"os"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/v1util"
)

View File

@ -23,7 +23,7 @@ import (
"os"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
)
// WriteToFile writes in the compressed format to a tarball, on disk.

View File

@ -1,40 +0,0 @@
// Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1util
import (
"io"
)
func nop() error {
return nil
}
// NopWriteCloser wraps the io.Writer as an io.WriteCloser with a Close() method that does nothing.
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &writeAndCloser{
Writer: w,
CloseFunc: nop,
}
}
// NopReadCloser wraps the io.Reader as an io.ReadCloser with a Close() method that does nothing.
// This is technically redundant with ioutil.NopCloser, but provided for symmetry and clarity.
func NopReadCloser(r io.Reader) io.ReadCloser {
return &readAndCloser{
Reader: r,
CloseFunc: nop,
}
}

View File

@ -20,7 +20,7 @@ import (
"hash"
"io"
"github.com/google/go-containerregistry/pkg/v1"
v1 "github.com/google/go-containerregistry/pkg/v1"
)
type verifyReader struct {

View File

@ -70,56 +70,14 @@ func GunzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) {
}, nil
}
// GzipWriteCloser returns an io.WriteCloser to which uncompressed data may be
// written, and the compressed data is then written to the provided
// io.WriteCloser.
func GzipWriteCloser(w io.WriteCloser) io.WriteCloser {
gw := gzip.NewWriter(w)
return &writeAndCloser{
Writer: gw,
CloseFunc: func() error {
if err := gw.Close(); err != nil {
return err
}
return w.Close()
},
}
}
// gunzipWriteCloser implements io.WriteCloser
// It is used to implement GunzipWriteClose.
type gunzipWriteCloser struct {
*bytes.Buffer
writer io.WriteCloser
}
// Close implements io.WriteCloser
func (gwc *gunzipWriteCloser) Close() error {
// TODO(mattmoor): How to avoid buffering this whole thing into memory?
gr, err := gzip.NewReader(gwc.Buffer)
if err != nil {
return err
}
if _, err := io.Copy(gwc.writer, gr); err != nil {
return err
}
return gwc.writer.Close()
}
// GunzipWriteCloser returns an io.WriteCloser to which compressed data may be
// written, and the uncompressed data is then written to the provided
// io.WriteCloser.
func GunzipWriteCloser(w io.WriteCloser) (io.WriteCloser, error) {
return &gunzipWriteCloser{
Buffer: bytes.NewBuffer(nil),
writer: w,
}, nil
}
// IsGzipped detects whether the input stream is compressed.
func IsGzipped(r io.Reader) (bool, error) {
magicHeader := make([]byte, 2)
if _, err := r.Read(magicHeader); err != nil {
n, err := r.Read(magicHeader)
if n == 0 && err == io.EOF {
return false, nil
}
if err != nil {
return false, err
}
return bytes.Equal(magicHeader, gzipMagicHeader), nil

View File

@ -20,6 +20,7 @@ import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
@ -178,21 +179,10 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error {
return &roundTripper{a, rt}
}
getCert := c.TLS.GetCert
c.TLS.GetCert = func() (*tls.Certificate, error) {
// If previous GetCert is present and returns a valid non-nil
// certificate, use that. Otherwise use cert from exec plugin.
if getCert != nil {
cert, err := getCert()
if err != nil {
return nil, err
}
if cert != nil {
return cert, nil
}
}
return a.cert()
if c.TLS.GetCert != nil {
return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set")
}
c.TLS.GetCert = a.cert
var dial func(ctx context.Context, network, addr string) (net.Conn, error)
if c.Dial != nil {

View File

@ -129,7 +129,7 @@ func SetAuthProxyHeaders(req *http.Request, username string, groups []string, ex
}
for key, values := range extra {
for _, value := range values {
req.Header.Add("X-Remote-Extra-"+key, value)
req.Header.Add("X-Remote-Extra-"+headerKeyEscape(key), value)
}
}
}
@ -246,7 +246,7 @@ func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Respons
}
for k, vv := range rt.impersonate.Extra {
for _, v := range vv {
req.Header.Add(ImpersonateUserExtraHeaderPrefix+k, v)
req.Header.Add(ImpersonateUserExtraHeaderPrefix+headerKeyEscape(k), v)
}
}
@ -422,3 +422,110 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper {
return rt.delegatedRoundTripper
}
func legalHeaderByte(b byte) bool {
return int(b) < len(legalHeaderKeyBytes) && legalHeaderKeyBytes[b]
}
func shouldEscape(b byte) bool {
// url.PathUnescape() returns an error if any '%' is not followed by two
// hexadecimal digits, so we'll intentionally encode it.
return !legalHeaderByte(b) || b == '%'
}
func headerKeyEscape(key string) string {
buf := strings.Builder{}
for i := 0; i < len(key); i++ {
b := key[i]
if shouldEscape(b) {
// %-encode bytes that should be escaped:
// https://tools.ietf.org/html/rfc3986#section-2.1
fmt.Fprintf(&buf, "%%%02X", b)
continue
}
buf.WriteByte(b)
}
return buf.String()
}
// legalHeaderKeyBytes was copied from net/http/lex.go's isTokenTable.
// See https://httpwg.github.io/specs/rfc7230.html#rule.token.separators
var legalHeaderKeyBytes = [127]bool{
'%': true,
'!': true,
'#': true,
'$': true,
'&': true,
'\'': true,
'*': true,
'+': true,
'-': true,
'.': true,
'0': true,
'1': true,
'2': true,
'3': true,
'4': true,
'5': true,
'6': true,
'7': true,
'8': true,
'9': true,
'A': true,
'B': true,
'C': true,
'D': true,
'E': true,
'F': true,
'G': true,
'H': true,
'I': true,
'J': true,
'K': true,
'L': true,
'M': true,
'N': true,
'O': true,
'P': true,
'Q': true,
'R': true,
'S': true,
'T': true,
'U': true,
'W': true,
'V': true,
'X': true,
'Y': true,
'Z': true,
'^': true,
'_': true,
'`': true,
'a': true,
'b': true,
'c': true,
'd': true,
'e': true,
'f': true,
'g': true,
'h': true,
'i': true,
'j': true,
'k': true,
'l': true,
'm': true,
'n': true,
'o': true,
'p': true,
'q': true,
'r': true,
's': true,
't': true,
'u': true,
'v': true,
'w': true,
'x': true,
'y': true,
'z': true,
'|': true,
'~': true,
}