Update ggcr to pick up estargz and caching option (#1527)

This commit is contained in:
Matt Moore 2020-12-21 10:09:21 -08:00 committed by GitHub
parent b04399eeac
commit 1ad4295462
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
457 changed files with 24712 additions and 9498 deletions

17
go.mod
View File

@ -3,31 +3,36 @@ module github.com/GoogleContainerTools/kaniko
go 1.14
replace (
github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.3+incompatible
github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible
github.com/containerd/containerd v1.4.0-0.20191014053712-acdcf13d5eaf => github.com/containerd/containerd v0.0.0-20191014053712-acdcf13d5eaf
github.com/docker/docker v1.14.0-0.20190319215453-e7b5f7dbe98c => github.com/docker/docker v0.0.0-20190319215453-e7b5f7dbe98c
github.com/tonistiigi/fsutil v0.0.0-20190819224149-3d2716dd0a4d => github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1
)
require (
cloud.google.com/go v0.57.0 // indirect
cloud.google.com/go/storage v1.8.0
github.com/Azure/azure-pipeline-go v0.2.2 // indirect
github.com/Azure/azure-storage-blob-go v0.8.0
github.com/aws/aws-sdk-go v1.31.6
github.com/coreos/etcd v3.3.13+incompatible // indirect
github.com/docker/docker v1.14.0-0.20190319215453-e7b5f7dbe98c
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 // indirect
github.com/docker/swarmkit v1.12.1-0.20180726190244-7567d47988d8 // indirect
github.com/evanphx/json-patch v4.2.0+incompatible // indirect
github.com/genuinetools/bpfd v0.0.2-0.20190525234658-c12d8cd9aac8
github.com/go-git/go-billy/v5 v5.0.0
github.com/go-git/go-git/v5 v5.1.0
github.com/golang/mock v1.4.3
github.com/google/go-cmp v0.4.1
github.com/google/go-containerregistry v0.1.2-0.20200804170047-b0d31a182cf0
github.com/google/go-containerregistry v0.2.2-0.20201217235130-8b4c3b5b21a3
github.com/google/go-github v17.0.0+incompatible
github.com/google/go-querystring v1.0.0 // indirect
github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible // indirect
github.com/hashicorp/go-memdb v0.0.0-20180223233045-1289e7fffe71 // indirect
github.com/hashicorp/go-uuid v1.0.1 // indirect
github.com/karrick/godirwalk v1.7.7
github.com/mattn/go-ieproxy v0.0.1 // indirect
github.com/mattn/go-shellwords v1.0.10 // indirect
github.com/minio/highwayhash v1.0.0
github.com/moby/buildkit v0.0.0-20191111154543-00bfbab0390c
github.com/opencontainers/runtime-spec v1.0.1 // indirect
@ -41,7 +46,9 @@ require (
github.com/spf13/pflag v1.0.5
github.com/tonistiigi/fsutil v0.0.0-20191018213012-0f039a052ca1 // indirect
github.com/vbatts/tar-split v0.10.2 // indirect
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2
golang.org/x/net v0.0.0-20200822124328-c89045814202
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
google.golang.org/api v0.25.0 // indirect
honnef.co/go/tools v0.0.1-2020.1.4 // indirect
)

457
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -24,6 +24,7 @@ go_test(
"integration_with_stdin_test.go",
"k8s_test.go",
],
data = glob(["testdata/**"]),
embed = [":integration"],
tags = ["manual"],
deps = [

View File

@ -17,6 +17,7 @@ limitations under the License.
package creds
import (
"context"
"sync"
"github.com/genuinetools/bpfd/proc"
@ -37,7 +38,7 @@ func GetKeychain() authn.Keychain {
// Add the Kubernetes keychain if we're on Kubernetes
if proc.GetContainerRuntime(0, 0) == proc.RuntimeKubernetes {
k8sc, err := k8schain.NewNoClient()
k8sc, err := k8schain.NewNoClient(context.Background())
if err != nil {
logrus.Warnf("Error setting up k8schain. Using default keychain %s", err)
return

View File

@ -472,7 +472,7 @@ func (s *stageBuilder) saveSnapshotToLayer(tarPath string) (v1.Layer, error) {
return nil, nil
}
layer, err := tarball.LayerFromFile(tarPath)
layer, err := tarball.LayerFromFile(tarPath, tarball.WithCompressedCaching)
if err != nil {
return nil, err
}

View File

@ -290,7 +290,7 @@ func writeImageOutputs(image v1.Image, destRefs []name.Tag) error {
// pushLayerToCache pushes layer (tagged with cacheKey) to opts.Cache
// if opts.Cache doesn't exist, infer the cache from the given destination
func pushLayerToCache(opts *config.KanikoOptions, cacheKey string, tarPath string, createdBy string) error {
layer, err := tarball.LayerFromFile(tarPath)
layer, err := tarball.LayerFromFile(tarPath, tarball.WithCompressedCaching)
if err != nil {
return err
}

32
vendor/github.com/Azure/go-autorest/.gitignore generated vendored Normal file
View File

@ -0,0 +1,32 @@
# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore)
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
.DS_Store
.idea/
.vscode/
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
# go-autorest specific
vendor/
autorest/azure/example/example

1004
vendor/github.com/Azure/go-autorest/CHANGELOG.md generated vendored Normal file

File diff suppressed because it is too large Load Diff

23
vendor/github.com/Azure/go-autorest/GNUmakefile generated vendored Normal file
View File

@ -0,0 +1,23 @@
DIR?=./autorest/
default: build
build: fmt
go install $(DIR)
test:
go test $(DIR) || exit 1
vet:
@echo "go vet ."
@go vet $(DIR)... ; if [ $$? -eq 1 ]; then \
echo ""; \
echo "Vet found suspicious constructs. Please check the reported constructs"; \
echo "and fix them if necessary before submitting the code for review."; \
exit 1; \
fi
fmt:
gofmt -w $(DIR)
.PHONY: build test vet fmt

324
vendor/github.com/Azure/go-autorest/Gopkg.lock generated vendored Normal file
View File

@ -0,0 +1,324 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e"
name = "contrib.go.opencensus.io/exporter/ocagent"
packages = ["."]
pruneopts = "UT"
revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea"
version = "v0.6.0"
[[projects]]
digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20"
name = "github.com/census-instrumentation/opencensus-proto"
packages = [
"gen-go/agent/common/v1",
"gen-go/agent/metrics/v1",
"gen-go/agent/trace/v1",
"gen-go/metrics/v1",
"gen-go/resource/v1",
"gen-go/trace/v1",
]
pruneopts = "UT"
revision = "d89fa54de508111353cb0b06403c00569be780d8"
version = "v0.2.1"
[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = "UT"
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55"
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
pruneopts = "UT"
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965"
name = "github.com/dimchansky/utfbom"
packages = ["."]
pruneopts = "UT"
revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
name = "github.com/golang/groupcache"
packages = ["lru"]
pruneopts = "UT"
revision = "611e8accdfc92c4187d399e95ce826046d4c8d73"
[[projects]]
digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa"
name = "github.com/golang/protobuf"
packages = [
"descriptor",
"jsonpb",
"proto",
"protoc-gen-go/descriptor",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/struct",
"ptypes/timestamp",
"ptypes/wrappers",
]
pruneopts = "UT"
revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
version = "v1.3.2"
[[projects]]
digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806"
name = "github.com/grpc-ecosystem/grpc-gateway"
packages = [
"internal",
"runtime",
"utilities",
]
pruneopts = "UT"
revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009"
version = "v1.12.1"
[[projects]]
digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
pruneopts = "UT"
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
version = "v1.1.0"
[[projects]]
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
pruneopts = "UT"
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551"
name = "github.com/stretchr/testify"
packages = [
"assert",
"require",
]
pruneopts = "UT"
revision = "221dbe5ed46703ee255b1da0dec05086f5035f62"
version = "v1.4.0"
[[projects]]
digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71"
name = "go.opencensus.io"
packages = [
".",
"internal",
"internal/tagencoding",
"metric/metricdata",
"metric/metricproducer",
"plugin/ocgrpc",
"plugin/ochttp",
"plugin/ochttp/propagation/b3",
"plugin/ochttp/propagation/tracecontext",
"resource",
"stats",
"stats/internal",
"stats/view",
"tag",
"trace",
"trace/internal",
"trace/propagation",
"trace/tracestate",
]
pruneopts = "UT"
revision = "aad2c527c5defcf89b5afab7f37274304195a6b2"
version = "v0.22.2"
[[projects]]
branch = "master"
digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae"
name = "golang.org/x/crypto"
packages = [
"pkcs12",
"pkcs12/internal/rc2",
]
pruneopts = "UT"
revision = "e9b2fee46413994441b28dfca259d911d963dfed"
[[projects]]
branch = "master"
digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43"
name = "golang.org/x/lint"
packages = [
".",
"golint",
]
pruneopts = "UT"
revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448"
[[projects]]
branch = "master"
digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910"
name = "golang.org/x/net"
packages = [
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"trace",
]
pruneopts = "UT"
revision = "1ddd1de85cb0337b623b740a609d35817d516a8d"
[[projects]]
branch = "master"
digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b"
name = "golang.org/x/sync"
packages = ["semaphore"]
pruneopts = "UT"
revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb"
[[projects]]
branch = "master"
digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8"
name = "golang.org/x/sys"
packages = ["unix"]
pruneopts = "UT"
revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945"
[[projects]]
digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/language",
"internal/language/compact",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
]
pruneopts = "UT"
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
version = "v0.3.2"
[[projects]]
branch = "master"
digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7"
name = "golang.org/x/tools"
packages = [
"go/ast/astutil",
"go/gcexportdata",
"go/internal/gcimporter",
"go/types/typeutil",
]
pruneopts = "UT"
revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42"
[[projects]]
digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877"
name = "google.golang.org/api"
packages = ["support/bundler"]
pruneopts = "UT"
revision = "8a410c21381766a810817fd6200fce8838ecb277"
version = "v0.14.0"
[[projects]]
branch = "master"
digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd"
name = "google.golang.org/genproto"
packages = [
"googleapis/api/httpbody",
"googleapis/rpc/status",
"protobuf/field_mask",
]
pruneopts = "UT"
revision = "51378566eb590fa106d1025ea12835a4416dda84"
[[projects]]
digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301"
name = "google.golang.org/grpc"
packages = [
".",
"backoff",
"balancer",
"balancer/base",
"balancer/roundrobin",
"binarylog/grpc_binarylog_v1",
"codes",
"connectivity",
"credentials",
"credentials/internal",
"encoding",
"encoding/proto",
"grpclog",
"internal",
"internal/backoff",
"internal/balancerload",
"internal/binarylog",
"internal/buffer",
"internal/channelz",
"internal/envconfig",
"internal/grpcrand",
"internal/grpcsync",
"internal/resolver/dns",
"internal/resolver/passthrough",
"internal/syscall",
"internal/transport",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"serviceconfig",
"stats",
"status",
"tap",
]
pruneopts = "UT"
revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514"
version = "v1.25.1"
[[projects]]
digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = "UT"
revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce"
version = "v2.2.7"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"contrib.go.opencensus.io/exporter/ocagent",
"github.com/dgrijalva/jwt-go",
"github.com/dimchansky/utfbom",
"github.com/mitchellh/go-homedir",
"github.com/stretchr/testify/require",
"go.opencensus.io/plugin/ochttp",
"go.opencensus.io/plugin/ochttp/propagation/tracecontext",
"go.opencensus.io/stats/view",
"go.opencensus.io/trace",
"golang.org/x/crypto/pkcs12",
"golang.org/x/lint/golint",
]
solver-name = "gps-cdcl"
solver-version = 1

59
vendor/github.com/Azure/go-autorest/Gopkg.toml generated vendored Normal file
View File

@ -0,0 +1,59 @@
# Gopkg.toml example
#
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
required = ["golang.org/x/lint/golint"]
[prune]
go-tests = true
unused-packages = true
[[constraint]]
name = "contrib.go.opencensus.io/exporter/ocagent"
version = "0.6.0"
[[constraint]]
name = "github.com/dgrijalva/jwt-go"
version = "3.2.0"
[[constraint]]
name = "github.com/dimchansky/utfbom"
version = "1.1.0"
[[constraint]]
name = "github.com/mitchellh/go-homedir"
version = "1.1.0"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.3.0"
[[constraint]]
name = "go.opencensus.io"
version = "0.22.0"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"

191
vendor/github.com/Azure/go-autorest/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2015 Microsoft Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

165
vendor/github.com/Azure/go-autorest/README.md generated vendored Normal file
View File

@ -0,0 +1,165 @@
# go-autorest
[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest)
[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master)
[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest)
Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages.
An authentication client tested with Azure Active Directory (AAD) is also
provided in this repo in the package
`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package
is maintained only as part of the Azure Go SDK and is not related to other
"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD).
## Overview
Package go-autorest implements an HTTP request pipeline suitable for use across
multiple goroutines and provides the shared routines used by packages generated
by [Autorest](https://github.com/Azure/autorest.go).
The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending,
and Responding. A typical pattern is:
```go
req, err := Prepare(&http.Request{},
token.WithAuthorization())
resp, err := Send(req,
WithLogging(logger),
DoErrorIfStatusCode(http.StatusInternalServerError),
DoCloseIfError(),
DoRetryForAttempts(5, time.Second))
err = Respond(resp,
ByDiscardingBody(),
ByClosing())
```
Each phase relies on decorators to modify and / or manage processing. Decorators may first modify
and then pass the data along, pass the data first and then modify the result, or wrap themselves
around passing the data (such as a logger might do). Decorators run in the order provided. For
example, the following:
```go
req, err := Prepare(&http.Request{},
WithBaseURL("https://microsoft.com/"),
WithPath("a"),
WithPath("b"),
WithPath("c"))
```
will set the URL to:
```
https://microsoft.com/a/b/c
```
Preparers and Responders may be shared and re-used (assuming the underlying decorators support
sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders
shared among multiple go-routines, and a single Sender shared among multiple sending go-routines,
all bound together by means of input / output channels.
Decorators hold their passed state within a closure (such as the path components in the example
above). Be careful to share Preparers and Responders only in a context where such held state
applies. For example, it may not make sense to share a Preparer that applies a query string from a
fixed set of values. Similarly, sharing a Responder that reads the response body into a passed
struct (e.g., `ByUnmarshallingJson`) is likely incorrect.
Errors raised by autorest objects and methods will conform to the `autorest.Error` interface.
See the included examples for more detail. For details on the suggested use of this package by
generated clients, see the Client described below.
## Helpers
### Handling Swagger Dates
The Swagger specification (https://swagger.io) that drives AutoRest
(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The
github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct
parsing and formatting.
### Handling Empty Values
In JSON, missing values have different semantics than empty values. This is especially true for
services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains
only those values to modify. Missing values are to be left unchanged. Developers, then, require a
means to both specify an empty value and to leave the value out of the submitted JSON.
The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits
empty values from the rendered JSON. Since Go defines default values for all base types (such as ""
for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package
treats default values as meaning empty, omitting them from the rendered JSON. This means that, using
the Go base types encoded through the default JSON package, it is not possible to create JSON to
clear a value at the server.
The workaround within the Go community is to use pointers to base types in lieu of base types within
structures that map to JSON. For example, instead of a value of type `string`, the workaround uses
`*string`. While this enables distinguishing empty values from those to be unchanged, creating
pointers to a base type (notably constant, in-line values) requires additional variables. This, for
example,
```go
s := struct {
S *string
}{ S: &"foo" }
```
fails, while, this
```go
v := "foo"
s := struct {
S *string
}{ S: &v }
```
succeeds.
To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for
Go base types which have Swagger analogs. It also provides a helper that converts between
`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value
associated with a key should be cleared. With the helpers, the previous example becomes
```go
s := struct {
S *string
}{ S: to.StringPtr("foo") }
```
## Install
```bash
go get github.com/Azure/go-autorest/autorest
go get github.com/Azure/go-autorest/autorest/azure
go get github.com/Azure/go-autorest/autorest/date
go get github.com/Azure/go-autorest/autorest/to
```
### Using with Go Modules
In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules.
- autorest/adal
- autorest/azure/auth
- autorest/azure/cli
- autorest/date
- autorest/mocks
- autorest/to
- autorest/validation
- autorest
- logger
- tracing
Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules.
## License
See LICENSE file.
-----
This project has adopted the [Microsoft Open Source Code of
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
see the [Code of Conduct
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
questions or comments.

View File

@ -222,6 +222,10 @@ func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code
case "code_expired":
return nil, ErrDeviceCodeExpired
default:
// return a more meaningful error message if available
if token.ErrorDescription != nil {
return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription)
}
return nil, ErrDeviceGeneric
}
}

View File

@ -3,10 +3,10 @@ module github.com/Azure/go-autorest/autorest/adal
go 1.12
require (
github.com/Azure/go-autorest/autorest v0.9.0
github.com/Azure/go-autorest/autorest/date v0.2.0
github.com/Azure/go-autorest/autorest/mocks v0.3.0
github.com/Azure/go-autorest/tracing v0.5.0
github.com/dgrijalva/jwt-go v3.2.0+incompatible
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413
github.com/Azure/go-autorest v14.2.0+incompatible
github.com/Azure/go-autorest/autorest/date v0.3.0
github.com/Azure/go-autorest/autorest/mocks v0.4.1
github.com/Azure/go-autorest/tracing v0.6.0
github.com/form3tech-oss/jwt-go v3.2.2+incompatible
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0
)

View File

@ -1,26 +1,17 @@
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View File

@ -16,9 +16,9 @@ package adal
// See the License for the specific language governing permissions and
// limitations under the License.
// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
// This file, and the github.com/Azure/go-autorest import, won't actually become part of
// the resultant binary.
// Necessary for safely adding multi-module repo.
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
import _ "github.com/Azure/go-autorest/autorest"
import _ "github.com/Azure/go-autorest"

View File

@ -15,11 +15,24 @@ package adal
// limitations under the License.
import (
"crypto/rsa"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"golang.org/x/crypto/pkcs12"
)
var (
// ErrMissingCertificate is returned when no local certificate is found in the provided PFX data.
ErrMissingCertificate = errors.New("adal: certificate missing")
// ErrMissingPrivateKey is returned when no private key is found in the provided PFX data.
ErrMissingPrivateKey = errors.New("adal: private key missing")
)
// LoadToken restores a Token object from a file located at 'path'.
@ -71,3 +84,52 @@ func SaveToken(path string, mode os.FileMode, token Token) error {
}
return nil
}
// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data.
// The PFX data must contain a private key along with a certificate whose public key matches that of the
// private key or an error is returned.
// If the private key is not password protected pass the empty string for password.
func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
blocks, err := pkcs12.ToPEM(pfxData, password)
if err != nil {
return nil, nil, err
}
// first extract the private key
var priv *rsa.PrivateKey
for _, block := range blocks {
if block.Type == "PRIVATE KEY" {
priv, err = x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return nil, nil, err
}
break
}
}
if priv == nil {
return nil, nil, ErrMissingPrivateKey
}
// now find the certificate with the matching public key of our private key
var cert *x509.Certificate
for _, block := range blocks {
if block.Type == "CERTIFICATE" {
pcert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, nil, err
}
certKey, ok := pcert.PublicKey.(*rsa.PublicKey)
if !ok {
// keep looking
continue
}
if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 {
// found a match
cert = pcert
break
}
}
}
if cert == nil {
return nil, nil, ErrMissingCertificate
}
return cert, priv, nil
}

View File

@ -35,7 +35,7 @@ import (
"time"
"github.com/Azure/go-autorest/autorest/date"
"github.com/dgrijalva/jwt-go"
"github.com/form3tech-oss/jwt-go"
)
const (
@ -62,6 +62,9 @@ const (
// msiEndpoint is the well known endpoint for getting MSI authentications tokens
msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
// the API version to use for the MSI endpoint
msiAPIVersion = "2018-02-01"
// the default number of attempts to refresh an MSI authentication token
defaultMaxMSIRefreshAttempts = 5
@ -70,6 +73,9 @@ const (
// asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions
asMSISecretEnv = "MSI_SECRET"
// the API version to use for the App Service MSI endpoint
appServiceAPIVersion = "2017-09-01"
)
// OAuthTokenProvider is an interface which should be implemented by an access token retriever
@ -354,6 +360,7 @@ type ServicePrincipalToken struct {
customRefreshFunc TokenRefresh
refreshCallbacks []TokenRefreshCallback
// MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token.
// Settings this to a value less than 1 will use the default value.
MaxMSIRefreshAttempts int
}
@ -650,6 +657,8 @@ func GetMSIVMEndpoint() (string, error) {
return msiEndpoint, nil
}
// NOTE: this only indicates if the ASE environment credentials have been set
// which does not necessarily mean that the caller is authenticating via ASE!
func isAppService() bool {
_, asMSIEndpointEnvExists := os.LookupEnv(asMSIEndpointEnv)
_, asMSISecretEnvExists := os.LookupEnv(asMSISecretEnv)
@ -678,16 +687,22 @@ func GetMSIEndpoint() (string, error) {
// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the system assigned identity when creating the token.
func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...)
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, nil, callbacks...)
}
// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the specified user assigned identity when creating the token.
// It will use the clientID of specified user assigned identity when creating the token.
func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...)
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, nil, callbacks...)
}
func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension.
// It will use the azure resource id of user assigned identity when creating the token.
func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, &identityResourceID, callbacks...)
}
func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, identityResourceID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) {
if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil {
return nil, err
}
@ -699,6 +714,11 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
return nil, err
}
}
if identityResourceID != nil {
if err := validateStringParam(*identityResourceID, "identityResourceID"); err != nil {
return nil, err
}
}
// We set the oauth config token endpoint to be MSI's endpoint
msiEndpointURL, err := url.Parse(msiEndpoint)
if err != nil {
@ -709,13 +729,16 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
v.Set("resource", resource)
// App Service MSI currently only supports token API version 2017-09-01
if isAppService() {
v.Set("api-version", "2017-09-01")
v.Set("api-version", appServiceAPIVersion)
} else {
v.Set("api-version", "2018-02-01")
v.Set("api-version", msiAPIVersion)
}
if userAssignedID != nil {
v.Set("client_id", *userAssignedID)
}
if identityResourceID != nil {
v.Set("mi_res_id", *identityResourceID)
}
msiEndpointURL.RawQuery = v.Encode()
spt := &ServicePrincipalToken{
@ -836,11 +859,28 @@ func (spt *ServicePrincipalToken) getGrantType() string {
}
func isIMDS(u url.URL) bool {
imds, err := url.Parse(msiEndpoint)
return isMSIEndpoint(u) == true || isASEEndpoint(u) == true
}
func isMSIEndpoint(endpoint url.URL) bool {
msi, err := url.Parse(msiEndpoint)
if err != nil {
return false
}
return (u.Host == imds.Host && u.Path == imds.Path) || isAppService()
return endpoint.Host == msi.Host && endpoint.Path == msi.Path
}
func isASEEndpoint(endpoint url.URL) bool {
aseEndpoint, err := GetMSIAppServiceEndpoint()
if err != nil {
// app service environment isn't enabled
return false
}
ase, err := url.Parse(aseEndpoint)
if err != nil {
return false
}
return endpoint.Host == ase.Host && endpoint.Path == ase.Path
}
func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error {
@ -859,7 +899,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
}
req.Header.Add("User-Agent", UserAgent())
// Add header when runtime is on App Service or Functions
if isAppService() {
if isASEEndpoint(spt.inner.OauthConfig.TokenEndpoint) {
asMSISecret, _ := os.LookupEnv(asMSISecretEnv)
req.Header.Add("Secret", asMSISecret)
}
@ -901,6 +941,14 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
}
var resp *http.Response
if isMSIEndpoint(spt.inner.OauthConfig.TokenEndpoint) {
resp, err = getMSIEndpoint(ctx, spt.sender)
if err != nil {
// return a TokenRefreshError here so that we don't keep retrying
return newTokenRefreshError(fmt.Sprintf("the MSI endpoint is not available. Failed HTTP request to MSI endpoint: %v", err), nil)
}
resp.Body.Close()
}
if isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts)
} else {
@ -973,6 +1021,11 @@ func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http
attempt := 0
delay := time.Duration(0)
// maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made
if maxAttempts < 1 {
maxAttempts = defaultMaxMSIRefreshAttempts
}
for attempt < maxAttempts {
if resp != nil && resp.Body != nil {
io.Copy(ioutil.Discard, resp.Body)
@ -1134,3 +1187,12 @@ func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig,
}
return &m, nil
}
// MSIAvailable returns true if the MSI endpoint is available for authentication.
func MSIAvailable(ctx context.Context, sender Sender) bool {
resp, err := getMSIEndpoint(ctx, sender)
if err == nil {
resp.Body.Close()
}
return err == nil
}

View File

@ -0,0 +1,36 @@
// +build go1.13
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adal
import (
"context"
"net/http"
"time"
)
func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) {
// this cannot fail, the return sig is due to legacy reasons
msiEndpoint, _ := GetMSIVMEndpoint()
tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
// http.NewRequestWithContext() was added in Go 1.13
req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil)
q := req.URL.Query()
q.Add("api-version", msiAPIVersion)
req.URL.RawQuery = q.Encode()
return sender.Do(req)
}

View File

@ -0,0 +1,36 @@
// +build !go1.13
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adal
import (
"context"
"net/http"
"time"
)
func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) {
// this cannot fail, the return sig is due to legacy reasons
msiEndpoint, _ := GetMSIVMEndpoint()
tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond)
defer cancel()
req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil)
req = req.WithContext(tempCtx)
q := req.URL.Query()
q.Add("api-version", msiAPIVersion)
req.URL.RawQuery = q.Encode()
return sender.Do(req)
}

View File

@ -2,4 +2,4 @@ module github.com/Azure/go-autorest/autorest/date
go 1.12
require github.com/Azure/go-autorest/autorest v0.9.0
require github.com/Azure/go-autorest v14.2.0+incompatible

View File

@ -1,16 +1,2 @@
github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0 h1:Ww5g4zThfD/6cLb4z6xxgeyDa7QDkizMkJKe0ysZXp0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=

View File

@ -16,9 +16,9 @@ package date
// See the License for the specific language governing permissions and
// limitations under the License.
// This file, and the github.com/Azure/go-autorest/autorest import, won't actually become part of
// This file, and the github.com/Azure/go-autorest import, won't actually become part of
// the resultant binary.
// Necessary for safely adding multi-module repo.
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
import _ "github.com/Azure/go-autorest/autorest"
import _ "github.com/Azure/go-autorest"

105
vendor/github.com/Azure/go-autorest/azure-pipelines.yml generated vendored Normal file
View File

@ -0,0 +1,105 @@
variables:
GOPATH: '$(system.defaultWorkingDirectory)/work'
sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)'
jobs:
- job: 'goautorest'
displayName: 'Run go-autorest CI Checks'
strategy:
matrix:
Linux_Go113:
vm.image: 'ubuntu-18.04'
go.version: '1.13'
Linux_Go114:
vm.image: 'ubuntu-18.04'
go.version: '1.14'
pool:
vmImage: '$(vm.image)'
steps:
- task: GoTool@0
inputs:
version: '$(go.version)'
displayName: "Select Go Version"
- script: |
set -e
mkdir -p '$(GOPATH)/bin'
mkdir -p '$(sdkPath)'
shopt -s extglob
mv !(work) '$(sdkPath)'
echo '##vso[task.prependpath]$(GOPATH)/bin'
displayName: 'Create Go Workspace'
- script: |
set -e
curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
dep ensure -v
go install ./vendor/golang.org/x/lint/golint
go get github.com/jstemmer/go-junit-report
go get github.com/axw/gocov/gocov
go get github.com/AlekSi/gocov-xml
go get -u github.com/matm/gocov-html
workingDirectory: '$(sdkPath)'
displayName: 'Install Dependencies'
- script: |
go vet ./autorest/...
go vet ./logger/...
go vet ./tracing/...
workingDirectory: '$(sdkPath)'
displayName: 'Vet'
- script: |
go build -v ./autorest/...
go build -v ./logger/...
go build -v ./tracing/...
workingDirectory: '$(sdkPath)'
displayName: 'Build'
- script: |
set -e
go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml
gocov convert coverage.txt > coverage.json
gocov-xml < coverage.json > coverage.xml
gocov-html < coverage.json > coverage.html
workingDirectory: '$(sdkPath)'
displayName: 'Run Tests'
- script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2
workingDirectory: '$(sdkPath)'
displayName: 'Copyright Header Check'
failOnStderr: true
condition: succeededOrFailed()
- script: |
gofmt -s -l -w ./autorest/. >&2
gofmt -s -l -w ./logger/. >&2
gofmt -s -l -w ./tracing/. >&2
workingDirectory: '$(sdkPath)'
displayName: 'Format Check'
failOnStderr: true
condition: succeededOrFailed()
- script: |
golint ./autorest/... >&2
golint ./logger/... >&2
golint ./tracing/... >&2
workingDirectory: '$(sdkPath)'
displayName: 'Linter Check'
failOnStderr: true
condition: succeededOrFailed()
- task: PublishTestResults@2
inputs:
testRunner: JUnit
testResultsFiles: $(sdkPath)/report.xml
failTaskOnFailedTests: true
- task: PublishCodeCoverageResults@1
inputs:
codeCoverageTool: Cobertura
summaryFileLocation: $(sdkPath)/coverage.xml
additionalCodeCoverageFiles: $(sdkPath)/coverage.html

18
vendor/github.com/Azure/go-autorest/doc.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
/*
Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages.
*/
package go_autorest
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

View File

@ -1,3 +1,5 @@
module github.com/Azure/go-autorest/tracing
go 1.12
require github.com/Azure/go-autorest v14.2.0+incompatible

2
vendor/github.com/Azure/go-autorest/tracing/go.sum generated vendored Normal file
View File

@ -0,0 +1,2 @@
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=

View File

@ -0,0 +1,24 @@
// +build modhack
package tracing
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file, and the github.com/Azure/go-autorest import, won't actually become part of
// the resultant binary.
// Necessary for safely adding multi-module repo.
// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
import _ "github.com/Azure/go-autorest"

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,547 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
package estargz
import (
"archive/tar"
"bytes"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"strings"
"sync"
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
type options struct {
chunkSize int
compressionLevel int
prioritizedFiles []string
}
type Option func(o *options)
// WithChunkSize option specifies the chunk size of eStargz blob to build.
func WithChunkSize(chunkSize int) Option {
return func(o *options) {
o.chunkSize = chunkSize
}
}
// WithCompressionLevel option specifies the gzip compression level.
// The default is gzip.BestCompression.
// See also: https://godoc.org/compress/gzip#pkg-constants
func WithCompressionLevel(level int) Option {
return func(o *options) {
o.compressionLevel = level
}
}
// WithPrioritizedFiles option specifies the list of prioritized files.
// These files must be a complete path relative to "/" (e.g. "foo/bar",
// "./foo/bar")
func WithPrioritizedFiles(files []string) Option {
return func(o *options) {
o.prioritizedFiles = files
}
}
// Blob is an eStargz blob.
type Blob struct {
io.ReadCloser
diffID digest.Digester
tocDigest digest.Digest
}
// DiffID returns the digest of uncompressed blob.
// It is only valid to call DiffID after Close.
func (b *Blob) DiffID() digest.Digest {
return b.diffID.Digest()
}
// TOCDigest returns the digest of uncompressed TOC JSON.
func (b *Blob) TOCDigest() digest.Digest {
return b.tocDigest
}
// Build builds an eStargz blob which is an extended version of stargz, from tar blob passed
// through the argument. If there are some prioritized files are listed in the option, these
// files are grouped as "prioritized" and can be used for runtime optimization (e.g. prefetch).
// This function builds a blob in parallel, with dividing that blob into several (at least the
// number of runtime.GOMAXPROCS(0)) sub-blobs.
func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
var opts options
opts.compressionLevel = gzip.BestCompression // BestCompression by default
for _, o := range opt {
o(&opts)
}
layerFiles := newTempFiles()
defer func() {
if rErr != nil {
if err := layerFiles.CleanupAll(); err != nil {
rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err)
}
}
}()
entries, err := sortEntries(tarBlob, opts.prioritizedFiles)
if err != nil {
return nil, err
}
tarParts := divideEntries(entries, runtime.GOMAXPROCS(0))
writers := make([]*Writer, len(tarParts))
payloads := make([]*os.File, len(tarParts))
var mu sync.Mutex
var eg errgroup.Group
for i, parts := range tarParts {
i, parts := i, parts
// builds verifiable stargz sub-blobs
eg.Go(func() error {
esgzFile, err := layerFiles.TempFile("", "esgzdata")
if err != nil {
return err
}
sw := NewWriterLevel(esgzFile, opts.compressionLevel)
sw.ChunkSize = opts.chunkSize
if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
return err
}
mu.Lock()
writers[i] = sw
payloads[i] = esgzFile
mu.Unlock()
return nil
})
}
if err := eg.Wait(); err != nil {
rErr = err
return nil, err
}
tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...)
if err != nil {
rErr = err
return nil, err
}
var rs []io.Reader
for _, p := range payloads {
fs, err := fileSectionReader(p)
if err != nil {
return nil, err
}
rs = append(rs, fs)
}
diffID := digest.Canonical.Digester()
pr, pw := io.Pipe()
go func() {
r, err := gzip.NewReader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw))
if err != nil {
pw.CloseWithError(err)
return
}
if _, err := io.Copy(diffID.Hash(), r); err != nil {
pw.CloseWithError(err)
return
}
pw.Close()
}()
return &Blob{
ReadCloser: readCloser{
Reader: pr,
closeFunc: layerFiles.CleanupAll,
},
tocDigest: tocDgst,
diffID: diffID,
}, nil
}
// closeWithCombine takes unclosed Writers and close them. This also returns the
// toc that combined all Writers into.
// Writers doesn't write TOC and footer to the underlying writers so they can be
// combined into a single eStargz and tocAndFooter returned by this function can
// be appended at the tail of that combined blob.
func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooter io.Reader, tocDgst digest.Digest, err error) {
if len(ws) == 0 {
return nil, "", fmt.Errorf("at least one writer must be passed")
}
for _, w := range ws {
if w.closed {
return nil, "", fmt.Errorf("writer must be unclosed")
}
defer func(w *Writer) { w.closed = true }(w)
if err := w.closeGz(); err != nil {
return nil, "", err
}
if err := w.bw.Flush(); err != nil {
return nil, "", err
}
}
var (
mtoc = new(jtoc)
currentOffset int64
)
mtoc.Version = ws[0].toc.Version
for _, w := range ws {
for _, e := range w.toc.Entries {
// Recalculate Offset of non-empty files/chunks
if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" {
e.Offset += currentOffset
}
mtoc.Entries = append(mtoc.Entries, e)
}
if w.toc.Version > mtoc.Version {
mtoc.Version = w.toc.Version
}
currentOffset += w.cw.n
}
tocJSON, err := json.MarshalIndent(mtoc, "", "\t")
if err != nil {
return nil, "", err
}
pr, pw := io.Pipe()
go func() {
zw, _ := gzip.NewWriterLevel(pw, compressionLevel)
tw := tar.NewWriter(zw)
if err := tw.WriteHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: TOCTarName,
Size: int64(len(tocJSON)),
}); err != nil {
pw.CloseWithError(err)
return
}
if _, err := tw.Write(tocJSON); err != nil {
pw.CloseWithError(err)
return
}
if err := tw.Close(); err != nil {
pw.CloseWithError(err)
return
}
if err := zw.Close(); err != nil {
pw.CloseWithError(err)
return
}
pw.Close()
}()
return io.MultiReader(
pr,
bytes.NewReader(footerBytes(currentOffset)),
), digest.FromBytes(tocJSON), nil
}
// divideEntries divides passed entries to the parts at least the number specified by the
// argument.
func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) {
var estimatedSize int64
for _, e := range entries {
estimatedSize += e.header.Size
}
unitSize := estimatedSize / int64(minPartsNum)
var (
nextEnd = unitSize
offset int64
)
set = append(set, []*entry{})
for _, e := range entries {
set[len(set)-1] = append(set[len(set)-1], e)
offset += e.header.Size
if offset > nextEnd {
set = append(set, []*entry{})
nextEnd += unitSize
}
}
return
}
// sortEntries reads the specified tar blob and returns a list of tar entries.
// If some of prioritized files are specified, the list starts from these
// files with keeping the order specified by the argument.
func sortEntries(in io.ReaderAt, prioritized []string) ([]*entry, error) {
// Import tar file.
intar, err := importTar(in)
if err != nil {
return nil, errors.Wrap(err, "failed to sort")
}
// Sort the tar file respecting to the prioritized files list.
sorted := &tarFile{}
for _, l := range prioritized {
moveRec(l, intar, sorted)
}
if len(prioritized) == 0 {
sorted.add(&entry{
header: &tar.Header{
Name: NoPrefetchLandmark,
Typeflag: tar.TypeReg,
Size: int64(len([]byte{landmarkContents})),
},
payload: bytes.NewReader([]byte{landmarkContents}),
})
} else {
sorted.add(&entry{
header: &tar.Header{
Name: PrefetchLandmark,
Typeflag: tar.TypeReg,
Size: int64(len([]byte{landmarkContents})),
},
payload: bytes.NewReader([]byte{landmarkContents}),
})
}
// Dump all entry and concatinate them.
return append(sorted.dump(), intar.dump()...), nil
}
// readerFromEntries returns a reader of tar archive that contains entries passed
// through the arguments.
func readerFromEntries(entries ...*entry) io.Reader {
pr, pw := io.Pipe()
go func() {
tw := tar.NewWriter(pw)
defer tw.Close()
for _, entry := range entries {
if err := tw.WriteHeader(entry.header); err != nil {
pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err))
return
}
if _, err := io.Copy(tw, entry.payload); err != nil {
pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err))
return
}
}
pw.Close()
}()
return pr
}
func importTar(in io.ReaderAt) (*tarFile, error) {
tf := &tarFile{}
pw, err := newCountReader(in)
if err != nil {
return nil, errors.Wrap(err, "failed to make position watcher")
}
tr := tar.NewReader(pw)
// Walk through all nodes.
for {
// Fetch and parse next header.
h, err := tr.Next()
if err != nil {
if err == io.EOF {
break
} else {
return nil, errors.Wrap(err, "failed to parse tar file")
}
}
switch trimNamePrefix(h.Name) {
case PrefetchLandmark, NoPrefetchLandmark:
// Ignore existing landmark
continue
}
// Add entry if not exist.
if _, ok := tf.get(h.Name); ok {
return nil, fmt.Errorf("Duplicated entry(%q) is not supported", h.Name)
}
tf.add(&entry{
header: h,
payload: io.NewSectionReader(in, pw.currentPos(), h.Size),
})
}
return tf, nil
}
func moveRec(name string, in *tarFile, out *tarFile) {
if name == "" {
return
}
parent, _ := path.Split(strings.TrimSuffix(name, "/"))
moveRec(parent, in, out)
if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink {
moveRec(e.header.Linkname, in, out)
}
if e, ok := in.get(name); ok {
out.add(e)
in.remove(name)
}
}
type entry struct {
header *tar.Header
payload io.ReadSeeker
}
type tarFile struct {
index map[string]*entry
stream []*entry
}
func (f *tarFile) add(e *entry) {
if f.index == nil {
f.index = make(map[string]*entry)
}
f.index[trimNamePrefix(e.header.Name)] = e
f.stream = append(f.stream, e)
}
func (f *tarFile) remove(name string) {
name = trimNamePrefix(name)
if f.index != nil {
delete(f.index, name)
}
var filtered []*entry
for _, e := range f.stream {
if trimNamePrefix(e.header.Name) == name {
continue
}
filtered = append(filtered, e)
}
f.stream = filtered
}
func (f *tarFile) get(name string) (e *entry, ok bool) {
if f.index == nil {
return nil, false
}
e, ok = f.index[trimNamePrefix(name)]
return
}
func (f *tarFile) dump() []*entry {
return f.stream
}
type readCloser struct {
io.Reader
closeFunc func() error
}
func (rc readCloser) Close() error {
return rc.closeFunc()
}
func fileSectionReader(file *os.File) (*io.SectionReader, error) {
info, err := file.Stat()
if err != nil {
return nil, err
}
return io.NewSectionReader(file, 0, info.Size()), nil
}
func newTempFiles() *tempFiles {
return &tempFiles{}
}
type tempFiles struct {
files []*os.File
filesMu sync.Mutex
}
func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
f, err := ioutil.TempFile(dir, pattern)
if err != nil {
return nil, err
}
tf.filesMu.Lock()
tf.files = append(tf.files, f)
tf.filesMu.Unlock()
return f, nil
}
func (tf *tempFiles) CleanupAll() error {
tf.filesMu.Lock()
defer tf.filesMu.Unlock()
var allErr []error
for _, f := range tf.files {
if err := f.Close(); err != nil {
allErr = append(allErr, err)
}
if err := os.Remove(f.Name()); err != nil {
allErr = append(allErr, err)
}
}
tf.files = nil
return errorutil.Aggregate(allErr)
}
func newCountReader(r io.ReaderAt) (*countReader, error) {
pos := int64(0)
return &countReader{r: r, cPos: &pos}, nil
}
type countReader struct {
r io.ReaderAt
cPos *int64
mu sync.Mutex
}
func (cr *countReader) Read(p []byte) (int, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
n, err := cr.r.ReadAt(p, *cr.cPos)
if err == nil {
*cr.cPos += int64(n)
}
return n, err
}
func (cr *countReader) Seek(offset int64, whence int) (int64, error) {
cr.mu.Lock()
defer cr.mu.Unlock()
switch whence {
default:
return 0, fmt.Errorf("Unknown whence: %v", whence)
case io.SeekStart:
case io.SeekCurrent:
offset += *cr.cPos
case io.SeekEnd:
return 0, fmt.Errorf("Unsupported whence: %v", whence)
}
if offset < 0 {
return 0, fmt.Errorf("invalid offset")
}
*cr.cPos = offset
return offset, nil
}
func (cr *countReader) currentPos() int64 {
cr.mu.Lock()
defer cr.mu.Unlock()
return *cr.cPos
}

View File

@ -0,0 +1,40 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errorutil
import (
"errors"
"fmt"
"strings"
)
// Aggregate combines a list of errors into a single new error.
func Aggregate(errs []error) error {
switch len(errs) {
case 0:
return nil
case 1:
return errs[0]
default:
points := make([]string, len(errs)+1)
points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs))
for i, err := range errs {
points[i+1] = fmt.Sprintf("* %s", err)
}
return errors.New(strings.Join(points, "\n\t"))
}
}

View File

@ -0,0 +1,836 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
package estargz
import (
"archive/tar"
"bufio"
"bytes"
"compress/gzip"
"crypto/sha256"
"encoding/binary"
"encoding/json"
"fmt"
"hash"
"io"
"io/ioutil"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// A Reader permits random access reads from a stargz file.
type Reader struct {
sr *io.SectionReader
toc *jtoc
tocDigest digest.Digest
// m stores all non-chunk entries, keyed by name.
m map[string]*TOCEntry
// chunks stores all TOCEntry values for regular files that
// are split up. For a file with a single chunk, it's only
// stored in m.
chunks map[string][]*TOCEntry
}
// Open opens a stargz file for reading.
func Open(sr *io.SectionReader) (*Reader, error) {
tocOff, footerSize, err := OpenFooter(sr)
if err != nil {
return nil, errors.Wrapf(err, "error parsing footer")
}
tocTargz := make([]byte, sr.Size()-tocOff-footerSize)
if _, err := sr.ReadAt(tocTargz, tocOff); err != nil {
return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocTargz), err)
}
zr, err := gzip.NewReader(bytes.NewReader(tocTargz))
if err != nil {
return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
}
zr.Multistream(false)
tr := tar.NewReader(zr)
h, err := tr.Next()
if err != nil {
return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
}
if h.Name != TOCTarName {
return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
}
dgstr := digest.Canonical.Digester()
toc := new(jtoc)
if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil {
return nil, fmt.Errorf("error decoding TOC JSON: %v", err)
}
r := &Reader{sr: sr, toc: toc, tocDigest: dgstr.Digest()}
if err := r.initFields(); err != nil {
return nil, fmt.Errorf("failed to initialize fields of entries: %v", err)
}
return r, nil
}
// OpenFooter extracts and parses footer from the given blob.
func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) {
if sr.Size() < FooterSize && sr.Size() < legacyFooterSize {
return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size())
}
// TODO: read a bigger chunk (1MB?) at once here to hopefully
// get the TOC + footer in one go.
var footer [FooterSize]byte
if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil {
return 0, 0, fmt.Errorf("error reading footer: %v", err)
}
return parseFooter(footer[:])
}
// initFields populates the Reader from r.toc after decoding it from
// JSON.
//
// Unexported fields are populated and TOCEntry fields that were
// implicit in the JSON are populated.
func (r *Reader) initFields() error {
r.m = make(map[string]*TOCEntry, len(r.toc.Entries))
r.chunks = make(map[string][]*TOCEntry)
var lastPath string
uname := map[int]string{}
gname := map[int]string{}
var lastRegEnt *TOCEntry
for _, ent := range r.toc.Entries {
ent.Name = trimNamePrefix(ent.Name)
if ent.Type == "reg" {
lastRegEnt = ent
}
if ent.Type == "chunk" {
ent.Name = lastPath
r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
if ent.ChunkSize == 0 && lastRegEnt != nil {
ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset
}
} else {
lastPath = ent.Name
if ent.Uname != "" {
uname[ent.UID] = ent.Uname
} else {
ent.Uname = uname[ent.UID]
}
if ent.Gname != "" {
gname[ent.GID] = ent.Gname
} else {
ent.Gname = uname[ent.GID]
}
ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339)
if ent.Type == "dir" {
ent.NumLink++ // Parent dir links to this directory
r.m[strings.TrimSuffix(ent.Name, "/")] = ent
} else {
r.m[ent.Name] = ent
}
}
if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size {
r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1)
r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
}
if ent.ChunkSize == 0 && ent.Size != 0 {
ent.ChunkSize = ent.Size
}
}
// Populate children, add implicit directories:
for _, ent := range r.toc.Entries {
if ent.Type == "chunk" {
continue
}
// add "foo/":
// add "foo" child to "" (creating "" if necessary)
//
// add "foo/bar/":
// add "bar" child to "foo" (creating "foo" if necessary)
//
// add "foo/bar.txt":
// add "bar.txt" child to "foo" (creating "foo" if necessary)
//
// add "a/b/c/d/e/f.txt":
// create "a/b/c/d/e" node
// add "f.txt" child to "e"
name := ent.Name
if ent.Type == "dir" {
name = strings.TrimSuffix(name, "/")
}
pdir := r.getOrCreateDir(parentDir(name))
ent.NumLink++ // at least one name(ent.Name) references this entry.
if ent.Type == "hardlink" {
if org, ok := r.m[trimNamePrefix(ent.LinkName)]; ok {
org.NumLink++ // original entry is referenced by this ent.Name.
ent = org
} else {
return fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName)
}
}
pdir.addChild(path.Base(name), ent)
}
lastOffset := r.sr.Size()
for i := len(r.toc.Entries) - 1; i >= 0; i-- {
e := r.toc.Entries[i]
if e.isDataType() {
e.nextOffset = lastOffset
}
if e.Offset != 0 {
lastOffset = e.Offset
}
}
return nil
}
func parentDir(p string) string {
dir, _ := path.Split(p)
return strings.TrimSuffix(dir, "/")
}
func (r *Reader) getOrCreateDir(d string) *TOCEntry {
e, ok := r.m[d]
if !ok {
e = &TOCEntry{
Name: d,
Type: "dir",
Mode: 0755,
NumLink: 2, // The directory itself(.) and the parent link to this directory.
}
r.m[d] = e
if d != "" {
pdir := r.getOrCreateDir(parentDir(d))
pdir.addChild(path.Base(d), e)
}
}
return e
}
// VerifyTOC checks that the TOC JSON in the passed blob matches the
// passed digests and that the TOC JSON contains digests for all chunks
// contained in the blob. If the verification succceeds, this function
// returns TOCEntryVerifier which holds all chunk digests in the stargz blob.
func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) {
// Verify the digest of TOC JSON
if r.tocDigest != tocDigest {
return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest)
}
digestMap := make(map[int64]digest.Digest) // map from chunk offset to the digest
for _, e := range r.toc.Entries {
if e.Type == "reg" || e.Type == "chunk" {
if e.Type == "reg" && e.Size == 0 {
continue // ignores empty file
}
// offset must be unique in stargz blob
if _, ok := digestMap[e.Offset]; ok {
return nil, fmt.Errorf("offset %d found twice", e.Offset)
}
// all chunk entries must contain digest
if e.ChunkDigest == "" {
return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON",
e.Name, e.Offset)
}
d, err := digest.Parse(e.ChunkDigest)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse digest %q", e.ChunkDigest)
}
digestMap[e.Offset] = d
}
}
return &verifier{digestMap: digestMap}, nil
}
// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by
// offset of the chunk.
type verifier struct {
digestMap map[int64]digest.Digest
digestMapMu sync.Mutex
}
// Verifier returns a content verifier specified by TOCEntry.
func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) {
v.digestMapMu.Lock()
defer v.digestMapMu.Unlock()
d, ok := v.digestMap[ce.Offset]
if !ok {
return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered",
ce.Offset, ce.ChunkSize)
}
return d.Verifier(), nil
}
// ChunkEntryForOffset returns the TOCEntry containing the byte of the
// named file at the given offset within the file.
func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) {
e, ok = r.Lookup(name)
if !ok || !e.isDataType() {
return nil, false
}
ents := r.chunks[name]
if len(ents) < 2 {
if offset >= e.ChunkSize {
return nil, false
}
return e, true
}
i := sort.Search(len(ents), func(i int) bool {
e := ents[i]
return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize)
})
if i == len(ents) {
return nil, false
}
return ents[i], true
}
// Lookup returns the Table of Contents entry for the given path.
//
// To get the root directory, use the empty string.
func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
if r == nil {
return
}
e, ok = r.m[path]
if ok && e.Type == "hardlink" {
e, ok = r.m[e.LinkName]
}
return
}
func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
ent, ok := r.Lookup(name)
if !ok {
// TODO: come up with some error plan. This is lazy:
return nil, &os.PathError{
Path: name,
Op: "OpenFile",
Err: os.ErrNotExist,
}
}
if ent.Type != "reg" {
return nil, &os.PathError{
Path: name,
Op: "OpenFile",
Err: errors.New("not a regular file"),
}
}
fr := &fileReader{
r: r,
size: ent.Size,
ents: r.getChunks(ent),
}
return io.NewSectionReader(fr, 0, fr.size), nil
}
func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry {
if ents, ok := r.chunks[ent.Name]; ok {
return ents
}
return []*TOCEntry{ent}
}
type fileReader struct {
r *Reader
size int64
ents []*TOCEntry // 1 or more reg/chunk entries
}
func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
if off >= fr.size {
return 0, io.EOF
}
if off < 0 {
return 0, errors.New("invalid offset")
}
var i int
if len(fr.ents) > 1 {
i = sort.Search(len(fr.ents), func(i int) bool {
return fr.ents[i].ChunkOffset >= off
})
if i == len(fr.ents) {
i = len(fr.ents) - 1
}
}
ent := fr.ents[i]
if ent.ChunkOffset > off {
if i == 0 {
return 0, errors.New("internal error; first chunk offset is non-zero")
}
ent = fr.ents[i-1]
}
// If ent is a chunk of a large file, adjust the ReadAt
// offset by the chunk's offset.
off -= ent.ChunkOffset
finalEnt := fr.ents[len(fr.ents)-1]
gzOff := ent.Offset
// gzBytesRemain is the number of compressed gzip bytes in this
// file remaining, over 1+ gzip chunks.
gzBytesRemain := finalEnt.NextOffset() - gzOff
sr := io.NewSectionReader(fr.r.sr, gzOff, gzBytesRemain)
const maxGZread = 2 << 20
var bufSize = maxGZread
if gzBytesRemain < maxGZread {
bufSize = int(gzBytesRemain)
}
br := bufio.NewReaderSize(sr, bufSize)
if _, err := br.Peek(bufSize); err != nil {
return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err)
}
gz, err := gzip.NewReader(br)
if err != nil {
return 0, fmt.Errorf("fileReader.ReadAt.gzipNewReader: %v", err)
}
if n, err := io.CopyN(ioutil.Discard, gz, off); n != off || err != nil {
return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err)
}
return io.ReadFull(gz, p)
}
// A Writer writes stargz files.
//
// Use NewWriter to create a new Writer.
type Writer struct {
bw *bufio.Writer
cw *countWriter
toc *jtoc
diffHash hash.Hash // SHA-256 of uncompressed tar
closed bool
gz *gzip.Writer
lastUsername map[int]string
lastGroupname map[int]string
compressionLevel int
// ChunkSize optionally controls the maximum number of bytes
// of data of a regular file that can be written in one gzip
// stream before a new gzip stream is started.
// Zero means to use a default, currently 4 MiB.
ChunkSize int
}
// currentGzipWriter writes to the current w.gz field, which can
// change throughout writing a tar entry.
//
// Additionally, it updates w's SHA-256 of the uncompressed bytes
// of the tar file.
type currentGzipWriter struct{ w *Writer }
func (cgw currentGzipWriter) Write(p []byte) (int, error) {
cgw.w.diffHash.Write(p)
return cgw.w.gz.Write(p)
}
func (w *Writer) chunkSize() int {
if w.ChunkSize <= 0 {
return 4 << 20
}
return w.ChunkSize
}
// NewWriter returns a new stargz writer writing to w.
//
// The writer must be closed to write its trailing table of contents.
func NewWriter(w io.Writer) *Writer {
return NewWriterLevel(w, gzip.BestCompression)
}
// NewWriterLevel returns a new stargz writer writing to w.
// The compression level is configurable.
//
// The writer must be closed to write its trailing table of contents.
func NewWriterLevel(w io.Writer, compressionLevel int) *Writer {
bw := bufio.NewWriter(w)
cw := &countWriter{w: bw}
return &Writer{
bw: bw,
cw: cw,
toc: &jtoc{Version: 1},
diffHash: sha256.New(),
compressionLevel: compressionLevel,
}
}
// Close writes the stargz's table of contents and flushes all the
// buffers, returning any error.
func (w *Writer) Close() (digest.Digest, error) {
if w.closed {
return "", nil
}
defer func() { w.closed = true }()
if err := w.closeGz(); err != nil {
return "", err
}
// Write the TOC index.
tocOff := w.cw.n
w.gz, _ = gzip.NewWriterLevel(w.cw, w.compressionLevel)
tw := tar.NewWriter(currentGzipWriter{w})
tocJSON, err := json.MarshalIndent(w.toc, "", "\t")
if err != nil {
return "", err
}
if err := tw.WriteHeader(&tar.Header{
Typeflag: tar.TypeReg,
Name: TOCTarName,
Size: int64(len(tocJSON)),
}); err != nil {
return "", err
}
if _, err := tw.Write(tocJSON); err != nil {
return "", err
}
if err := tw.Close(); err != nil {
return "", err
}
if err := w.closeGz(); err != nil {
return "", err
}
// And a little footer with pointer to the TOC gzip stream.
if _, err := w.bw.Write(footerBytes(tocOff)); err != nil {
return "", err
}
if err := w.bw.Flush(); err != nil {
return "", err
}
return digest.FromBytes(tocJSON), nil
}
func (w *Writer) closeGz() error {
if w.closed {
return errors.New("write on closed Writer")
}
if w.gz != nil {
if err := w.gz.Close(); err != nil {
return err
}
w.gz = nil
}
return nil
}
// nameIfChanged returns name, unless it was the already the value of (*mp)[id],
// in which case it returns the empty string.
func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
if name == "" {
return ""
}
if *mp == nil {
*mp = make(map[int]string)
}
if (*mp)[id] == name {
return ""
}
(*mp)[id] = name
return name
}
func (w *Writer) condOpenGz() {
if w.gz == nil {
w.gz, _ = gzip.NewWriterLevel(w.cw, w.compressionLevel)
}
}
// AppendTar reads the tar or tar.gz file from r and appends
// each of its contents to w.
//
// The input r can optionally be gzip compressed but the output will
// always be gzip compressed.
func (w *Writer) AppendTar(r io.Reader) error {
br := bufio.NewReader(r)
var tr *tar.Reader
if isGzip(br) {
// NewReader can't fail if isGzip returned true.
zr, _ := gzip.NewReader(br)
tr = tar.NewReader(zr)
} else {
tr = tar.NewReader(br)
}
for {
h, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
}
if h.Name == TOCTarName {
// It is possible for a layer to be "stargzified" twice during the
// distribution lifecycle. So we reserve "TOCTarName" here to avoid
// duplicated entries in the resulting layer.
continue
}
xattrs := make(map[string][]byte)
const xattrPAXRecordsPrefix = "SCHILY.xattr."
if h.PAXRecords != nil {
for k, v := range h.PAXRecords {
if strings.HasPrefix(k, xattrPAXRecordsPrefix) {
xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v)
}
}
}
ent := &TOCEntry{
Name: h.Name,
Mode: h.Mode,
UID: h.Uid,
GID: h.Gid,
Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname),
Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname),
ModTime3339: formatModtime(h.ModTime),
Xattrs: xattrs,
}
w.condOpenGz()
tw := tar.NewWriter(currentGzipWriter{w})
if err := tw.WriteHeader(h); err != nil {
return err
}
switch h.Typeflag {
case tar.TypeLink:
ent.Type = "hardlink"
ent.LinkName = h.Linkname
case tar.TypeSymlink:
ent.Type = "symlink"
ent.LinkName = h.Linkname
case tar.TypeDir:
ent.Type = "dir"
case tar.TypeReg:
ent.Type = "reg"
ent.Size = h.Size
case tar.TypeChar:
ent.Type = "char"
ent.DevMajor = int(h.Devmajor)
ent.DevMinor = int(h.Devminor)
case tar.TypeBlock:
ent.Type = "block"
ent.DevMajor = int(h.Devmajor)
ent.DevMinor = int(h.Devminor)
case tar.TypeFifo:
ent.Type = "fifo"
default:
return fmt.Errorf("unsupported input tar entry %q", h.Typeflag)
}
// We need to keep a reference to the TOC entry for regular files, so that we
// can fill the digest later.
var regFileEntry *TOCEntry
var payloadDigest digest.Digester
if h.Typeflag == tar.TypeReg {
regFileEntry = ent
payloadDigest = digest.Canonical.Digester()
}
if h.Typeflag == tar.TypeReg && ent.Size > 0 {
var written int64
totalSize := ent.Size // save it before we destroy ent
tee := io.TeeReader(tr, payloadDigest.Hash())
for written < totalSize {
if err := w.closeGz(); err != nil {
return err
}
chunkSize := int64(w.chunkSize())
remain := totalSize - written
if remain < chunkSize {
chunkSize = remain
} else {
ent.ChunkSize = chunkSize
}
ent.Offset = w.cw.n
ent.ChunkOffset = written
chunkDigest := digest.Canonical.Digester()
w.condOpenGz()
teeChunk := io.TeeReader(tee, chunkDigest.Hash())
if _, err := io.CopyN(tw, teeChunk, chunkSize); err != nil {
return fmt.Errorf("error copying %q: %v", h.Name, err)
}
ent.ChunkDigest = chunkDigest.Digest().String()
w.toc.Entries = append(w.toc.Entries, ent)
written += chunkSize
ent = &TOCEntry{
Name: h.Name,
Type: "chunk",
}
}
} else {
w.toc.Entries = append(w.toc.Entries, ent)
}
if payloadDigest != nil {
regFileEntry.Digest = payloadDigest.Digest().String()
}
if err := tw.Flush(); err != nil {
return err
}
}
return nil
}
// DiffID returns the SHA-256 of the uncompressed tar bytes.
// It is only valid to call DiffID after Close.
func (w *Writer) DiffID() string {
return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil))
}
// footerBytes returns the 51 bytes footer.
func footerBytes(tocOff int64) []byte {
buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes
// Extra header indicating the offset of TOCJSON
// https://tools.ietf.org/html/rfc1952#section-2.3.1.1
header := make([]byte, 4)
header[0], header[1] = 'S', 'G'
subfield := fmt.Sprintf("%016xSTARGZ", tocOff)
binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952
gz.Header.Extra = append(header, []byte(subfield)...)
gz.Close()
if buf.Len() != FooterSize {
panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
}
return buf.Bytes()
}
func parseFooter(p []byte) (tocOffset int64, footerSize int64, rErr error) {
var allErr []error
tocOffset, err := parseEStargzFooter(p)
if err == nil {
return tocOffset, FooterSize, nil
}
allErr = append(allErr, err)
pad := len(p) - legacyFooterSize
if pad < 0 {
pad = 0
}
tocOffset, err = parseLegacyFooter(p[pad:])
if err == nil {
return tocOffset, legacyFooterSize, nil
}
return 0, 0, errorutil.Aggregate(append(allErr, err))
}
func parseEStargzFooter(p []byte) (tocOffset int64, err error) {
if len(p) != FooterSize {
return 0, fmt.Errorf("invalid length %d cannot be parsed", len(p))
}
zr, err := gzip.NewReader(bytes.NewReader(p))
if err != nil {
return 0, err
}
extra := zr.Header.Extra
si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:]
if si1 != 'S' || si2 != 'G' {
return 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2)
}
if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) {
return 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ"))
}
if string(subfield[16:]) != "STARGZ" {
return 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield")
}
return strconv.ParseInt(string(subfield[:16]), 16, 64)
}
func parseLegacyFooter(p []byte) (tocOffset int64, err error) {
if len(p) != legacyFooterSize {
return 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p))
}
zr, err := gzip.NewReader(bytes.NewReader(p))
if err != nil {
return 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
}
extra := zr.Header.Extra
if len(extra) != 16+len("STARGZ") {
return 0, fmt.Errorf("legacy: invalid stargz's extra field size")
}
if string(extra[16:]) != "STARGZ" {
return 0, fmt.Errorf("legacy: magic string STARGZ not found")
}
return strconv.ParseInt(string(extra[:16]), 16, 64)
}
func formatModtime(t time.Time) string {
if t.IsZero() || t.Unix() == 0 {
return ""
}
return t.UTC().Round(time.Second).Format(time.RFC3339)
}
func trimNamePrefix(name string) string {
// We don't use filepath.Clean here to preserve "/" suffix for directory entry.
return strings.TrimPrefix(name, "./")
}
// countWriter counts how many bytes have been written to its wrapped
// io.Writer.
type countWriter struct {
w io.Writer
n int64
}
func (cw *countWriter) Write(p []byte) (n int, err error) {
n, err = cw.w.Write(p)
cw.n += int64(n)
return
}
// isGzip reports whether br is positioned right before an upcoming gzip stream.
// It does not consume any bytes from br.
func isGzip(br *bufio.Reader) bool {
const (
gzipID1 = 0x1f
gzipID2 = 0x8b
gzipDeflate = 8
)
peek, _ := br.Peek(3)
return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate
}

View File

@ -0,0 +1,9 @@
module github.com/containerd/stargz-snapshotter/estargz
go 1.13
require (
github.com/opencontainers/go-digest v1.0.0
github.com/pkg/errors v0.9.1
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
)

View File

@ -0,0 +1,6 @@
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@ -0,0 +1,254 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
package estargz
import (
"os"
"path"
"time"
digest "github.com/opencontainers/go-digest"
)
const (
// TOCTarName is the name of the JSON file in the tar archive in the
// table of contents gzip stream.
TOCTarName = "stargz.index.json"
// FooterSize is the number of bytes in the footer
//
// The footer is an empty gzip stream with no compression and an Extra
// header of the form "%016xSTARGZ", where the 64 bit hex-encoded
// number is the offset to the gzip stream of JSON TOC.
//
// 51 comes from:
//
// 10 bytes gzip header
// 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
// 2 bytes Extra: SI1 = 'S', SI2 = 'G'
// 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
// 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
// 5 bytes flate header
// 8 bytes gzip footer
// (End of the eStargz blob)
//
// NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz.
FooterSize = 51
// legacyFooterSize is the number of bytes in the legacy stargz footer.
//
// 47 comes from:
//
// 10 byte gzip header +
// 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" +
// 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset))
// 5 byte flate header
// 8 byte gzip footer (two little endian uint32s: digest, size)
legacyFooterSize = 47
// TOCJSONDigestAnnotation is an annotation for image manifest. This stores the
// digest of the TOC JSON
TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
// PrefetchLandmark is a file entry which indicates the end position of
// prefetch in the stargz file.
PrefetchLandmark = ".prefetch.landmark"
// NoPrefetchLandmark is a file entry which indicates that no prefetch should
// occur in the stargz file.
NoPrefetchLandmark = ".no.prefetch.landmark"
landmarkContents = 0xf
)
// jtoc is the JSON-serialized table of contents index of the files in the stargz file.
type jtoc struct {
Version int `json:"version"`
Entries []*TOCEntry `json:"entries"`
}
// TOCEntry is an entry in the stargz file's TOC (Table of Contents).
type TOCEntry struct {
// Name is the tar entry's name. It is the complete path
// stored in the tar file, not just the base name.
Name string `json:"name"`
// Type is one of "dir", "reg", "symlink", "hardlink", "char",
// "block", "fifo", or "chunk".
// The "chunk" type is used for regular file data chunks past the first
// TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset,
// ChunkOffset, and ChunkSize populated.
Type string `json:"type"`
// Size, for regular files, is the logical size of the file.
Size int64 `json:"size,omitempty"`
// ModTime3339 is the modification time of the tar entry. Empty
// means zero or unknown. Otherwise it's in UTC RFC3339
// format. Use the ModTime method to access the time.Time value.
ModTime3339 string `json:"modtime,omitempty"`
modTime time.Time
// LinkName, for symlinks and hardlinks, is the link target.
LinkName string `json:"linkName,omitempty"`
// Mode is the permission and mode bits.
Mode int64 `json:"mode,omitempty"`
// UID is the user ID of the owner.
UID int `json:"uid,omitempty"`
// GID is the group ID of the owner.
GID int `json:"gid,omitempty"`
// Uname is the username of the owner.
//
// In the serialized JSON, this field may only be present for
// the first entry with the same UID.
Uname string `json:"userName,omitempty"`
// Gname is the group name of the owner.
//
// In the serialized JSON, this field may only be present for
// the first entry with the same GID.
Gname string `json:"groupName,omitempty"`
// Offset, for regular files, provides the offset in the
// stargz file to the file's data bytes. See ChunkOffset and
// ChunkSize.
Offset int64 `json:"offset,omitempty"`
nextOffset int64 // the Offset of the next entry with a non-zero Offset
// DevMajor is the major device number for "char" and "block" types.
DevMajor int `json:"devMajor,omitempty"`
// DevMinor is the major device number for "char" and "block" types.
DevMinor int `json:"devMinor,omitempty"`
// NumLink is the number of entry names pointing to this entry.
// Zero means one name references this entry.
NumLink int
// Xattrs are the extended attribute for the entry.
Xattrs map[string][]byte `json:"xattrs,omitempty"`
// Digest stores the OCI checksum for regular files payload.
// It has the form "sha256:abcdef01234....".
Digest string `json:"digest,omitempty"`
// ChunkOffset is non-zero if this is a chunk of a large,
// regular file. If so, the Offset is where the gzip header of
// ChunkSize bytes at ChunkOffset in Name begin.
//
// In serialized form, a "chunkSize" JSON field of zero means
// that the chunk goes to the end of the file. After reading
// from the stargz TOC, though, the ChunkSize is initialized
// to a non-zero file for when Type is either "reg" or
// "chunk".
ChunkOffset int64 `json:"chunkOffset,omitempty"`
ChunkSize int64 `json:"chunkSize,omitempty"`
// ChunkDigest stores an OCI digest of the chunk. This must be formed
// as "sha256:0123abcd...".
ChunkDigest string `json:"chunkDigest,omitempty"`
children map[string]*TOCEntry
}
// ModTime returns the entry's modification time.
func (e *TOCEntry) ModTime() time.Time { return e.modTime }
// NextOffset returns the position (relative to the start of the
// stargz file) of the next gzip boundary after e.Offset.
func (e *TOCEntry) NextOffset() int64 { return e.nextOffset }
func (e *TOCEntry) addChild(baseName string, child *TOCEntry) {
if e.children == nil {
e.children = make(map[string]*TOCEntry)
}
if child.Type == "dir" {
e.NumLink++ // Entry ".." in the subdirectory links to this directory
}
e.children[baseName] = child
}
// isDataType reports whether TOCEntry is a regular file or chunk (something that
// contains regular file data).
func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" }
// Stat returns a FileInfo value representing e.
func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} }
// ForeachChild calls f for each child item. If f returns false, iteration ends.
// If e is not a directory, f is not called.
func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) {
for name, ent := range e.children {
if !f(name, ent) {
return
}
}
}
// LookupChild returns the directory e's child by its base name.
func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) {
child, ok = e.children[baseName]
return
}
// fileInfo implements os.FileInfo using the wrapped *TOCEntry.
type fileInfo struct{ e *TOCEntry }
var _ os.FileInfo = fileInfo{}
func (fi fileInfo) Name() string { return path.Base(fi.e.Name) }
func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" }
func (fi fileInfo) Size() int64 { return fi.e.Size }
func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() }
func (fi fileInfo) Sys() interface{} { return fi.e }
func (fi fileInfo) Mode() (m os.FileMode) {
m = os.FileMode(fi.e.Mode) & os.ModePerm
switch fi.e.Type {
case "dir":
m |= os.ModeDir
case "symlink":
m |= os.ModeSymlink
case "char":
m |= os.ModeDevice | os.ModeCharDevice
case "block":
m |= os.ModeDevice
case "fifo":
m |= os.ModeNamedPipe
}
// TODO: ModeSetuid, ModeSetgid, if/as needed.
return m
}
// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained
// in a eStargz blob.
type TOCEntryVerifier interface {
// Verifier provides a content verifier that can be used for verifying the
// contents of the specified TOCEntry.
Verifier(ce *TOCEntry) (digest.Verifier, error)
}

View File

@ -0,0 +1,27 @@
package challenge
import (
"net/url"
"strings"
)
// FROM: https://golang.org/src/net/http/http.go
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
// return true if the string includes a port.
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
// FROM: http://golang.org/src/net/http/transport.go
var portMap = map[string]string{
"http": "80",
"https": "443",
}
// canonicalAddr returns url.Host but always with a ":port" suffix
// FROM: http://golang.org/src/net/http/transport.go
func canonicalAddr(url *url.URL) string {
addr := url.Host
if !hasPort(addr) {
return addr + ":" + portMap[url.Scheme]
}
return addr
}

View File

@ -0,0 +1,237 @@
package challenge
import (
"fmt"
"net/http"
"net/url"
"strings"
"sync"
)
// Challenge carries information from a WWW-Authenticate response header.
// See RFC 2617.
type Challenge struct {
// Scheme is the auth-scheme according to RFC 2617
Scheme string
// Parameters are the auth-params according to RFC 2617
Parameters map[string]string
}
// Manager manages the challenges for endpoints.
// The challenges are pulled out of HTTP responses. Only
// responses which expect challenges should be added to
// the manager, since a non-unauthorized request will be
// viewed as not requiring challenges.
type Manager interface {
// GetChallenges returns the challenges for the given
// endpoint URL.
GetChallenges(endpoint url.URL) ([]Challenge, error)
// AddResponse adds the response to the challenge
// manager. The challenges will be parsed out of
// the WWW-Authenicate headers and added to the
// URL which was produced the response. If the
// response was authorized, any challenges for the
// endpoint will be cleared.
AddResponse(resp *http.Response) error
}
// NewSimpleManager returns an instance of
// Manger which only maps endpoints to challenges
// based on the responses which have been added the
// manager. The simple manager will make no attempt to
// perform requests on the endpoints or cache the responses
// to a backend.
func NewSimpleManager() Manager {
return &simpleManager{
Challenges: make(map[string][]Challenge),
}
}
type simpleManager struct {
sync.RWMutex
Challenges map[string][]Challenge
}
func normalizeURL(endpoint *url.URL) {
endpoint.Host = strings.ToLower(endpoint.Host)
endpoint.Host = canonicalAddr(endpoint)
}
func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
normalizeURL(&endpoint)
m.RLock()
defer m.RUnlock()
challenges := m.Challenges[endpoint.String()]
return challenges, nil
}
func (m *simpleManager) AddResponse(resp *http.Response) error {
challenges := ResponseChallenges(resp)
if resp.Request == nil {
return fmt.Errorf("missing request reference")
}
urlCopy := url.URL{
Path: resp.Request.URL.Path,
Host: resp.Request.URL.Host,
Scheme: resp.Request.URL.Scheme,
}
normalizeURL(&urlCopy)
m.Lock()
defer m.Unlock()
m.Challenges[urlCopy.String()] = challenges
return nil
}
// Octet types from RFC 2616.
type octetType byte
var octetTypes [256]octetType
const (
isToken octetType = 1 << iota
isSpace
)
func init() {
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
// CR = <US-ASCII CR, carriage return (13)>
// LF = <US-ASCII LF, linefeed (10)>
// SP = <US-ASCII SP, space (32)>
// HT = <US-ASCII HT, horizontal-tab (9)>
// <"> = <US-ASCII double-quote mark (34)>
// CRLF = CR LF
// LWS = [CRLF] 1*( SP | HT )
// TEXT = <any OCTET except CTLs, but including LWS>
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
// token = 1*<any CHAR except CTLs or separators>
// qdtext = <any TEXT except <">>
for c := 0; c < 256; c++ {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
t |= isToken
}
octetTypes[c] = t
}
}
// ResponseChallenges returns a list of authorization challenges
// for the given http Response. Challenges are only checked if
// the response status code was a 401.
func ResponseChallenges(resp *http.Response) []Challenge {
if resp.StatusCode == http.StatusUnauthorized {
// Parse the WWW-Authenticate Header and store the challenges
// on this endpoint object.
return parseAuthHeader(resp.Header)
}
return nil
}
func parseAuthHeader(header http.Header) []Challenge {
challenges := []Challenge{}
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
v, p := parseValueAndParams(h)
if v != "" {
challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
}
}
return challenges
}
func parseValueAndParams(header string) (value string, params map[string]string) {
params = make(map[string]string)
value, s := expectToken(header)
if value == "" {
return
}
value = strings.ToLower(value)
s = "," + skipSpace(s)
for strings.HasPrefix(s, ",") {
var pkey string
pkey, s = expectToken(skipSpace(s[1:]))
if pkey == "" {
return
}
if !strings.HasPrefix(s, "=") {
return
}
var pvalue string
pvalue, s = expectTokenOrQuoted(s[1:])
if pvalue == "" {
return
}
pkey = strings.ToLower(pkey)
params[pkey] = pvalue
s = skipSpace(s)
}
return
}
func skipSpace(s string) (rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isSpace == 0 {
break
}
}
return s[i:]
}
func expectToken(s string) (token, rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isToken == 0 {
break
}
}
return s[:i], s[i:]
}
func expectTokenOrQuoted(s string) (value string, rest string) {
if !strings.HasPrefix(s, "\"") {
return expectToken(s)
}
s = s[1:]
for i := 0; i < len(s); i++ {
switch s[i] {
case '"':
return s[:i], s[i+1:]
case '\\':
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i = i + 1; i < len(s); i++ {
b := s[i]
switch {
case escape:
escape = false
p[j] = b
j++
case b == '\\':
escape = true
case b == '"':
return string(p[:j]), s[i+1:]
default:
p[j] = b
j++
}
}
return "", ""
}
}
return "", ""
}

View File

@ -1,4 +1,5 @@
.DS_Store
bin
.idea/

View File

@ -5,9 +5,8 @@ script:
- go test -v ./...
go:
- 1.3
- 1.4
- 1.5
- 1.6
- 1.7
- 1.12
- 1.13
- 1.14
- 1.15
- tip

View File

@ -9,7 +9,7 @@ A [go](http://www.golang.org) (or 'golang' for search engine friendliness) imple
**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail.
**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
## What the heck is a JWT?
@ -19,7 +19,7 @@ In short, it's a signed JSON object that does something useful (for example, aut
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) for information about reserved keys and the proper way to add your own.
## What's in the box?
@ -37,7 +37,7 @@ See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) f
This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go
## Compliance
@ -93,6 +93,10 @@ Without going too far down the rabbit hole, here's a description of the interact
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
### Troubleshooting
This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types.
## More
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).

View File

@ -16,7 +16,7 @@ type Claims interface {
// https://tools.ietf.org/html/rfc7519#section-4.1
// See examples for how to use this with your own claim types
type StandardClaims struct {
Audience string `json:"aud,omitempty"`
Audience []string `json:"aud,omitempty"`
ExpiresAt int64 `json:"exp,omitempty"`
Id string `json:"jti,omitempty"`
IssuedAt int64 `json:"iat,omitempty"`
@ -90,15 +90,17 @@ func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
// ----- helpers
func verifyAud(aud string, cmp string, required bool) bool {
if aud == "" {
func verifyAud(aud []string, cmp string, required bool) bool {
if len(aud) == 0 {
return !required
}
if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
return true
} else {
return false
for _, a := range aud {
if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
return true
}
}
return false
}
func verifyExp(exp int64, now int64, required bool) bool {

View File

@ -25,7 +25,9 @@ func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
return nil, err
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
return nil, err
}
}
var pkey *ecdsa.PrivateKey

View File

@ -13,7 +13,15 @@ type MapClaims map[string]interface{}
// Compares the aud claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
aud, _ := m["aud"].(string)
aud, ok := m["aud"].([]string)
if !ok {
strAud, ok := m["aud"].(string)
if !ok {
return false
}
aud = append(aud, strAud)
}
return verifyAud(aud, cmp, req)
}

View File

@ -12,9 +12,14 @@ import (
type SigningMethodRSAPSS struct {
*SigningMethodRSA
Options *rsa.PSSOptions
// VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
// Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
// https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
// See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
VerifyOptions *rsa.PSSOptions
}
// Specific instances for RS/PS and company
// Specific instances for RS/PS and company.
var (
SigningMethodPS256 *SigningMethodRSAPSS
SigningMethodPS384 *SigningMethodRSAPSS
@ -24,13 +29,15 @@ var (
func init() {
// PS256
SigningMethodPS256 = &SigningMethodRSAPSS{
&SigningMethodRSA{
SigningMethodRSA: &SigningMethodRSA{
Name: "PS256",
Hash: crypto.SHA256,
},
&rsa.PSSOptions{
Options: &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
VerifyOptions: &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthAuto,
Hash: crypto.SHA256,
},
}
RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
@ -39,13 +46,15 @@ func init() {
// PS384
SigningMethodPS384 = &SigningMethodRSAPSS{
&SigningMethodRSA{
SigningMethodRSA: &SigningMethodRSA{
Name: "PS384",
Hash: crypto.SHA384,
},
&rsa.PSSOptions{
Options: &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
VerifyOptions: &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthAuto,
Hash: crypto.SHA384,
},
}
RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
@ -54,13 +63,15 @@ func init() {
// PS512
SigningMethodPS512 = &SigningMethodRSAPSS{
&SigningMethodRSA{
SigningMethodRSA: &SigningMethodRSA{
Name: "PS512",
Hash: crypto.SHA512,
},
&rsa.PSSOptions{
Options: &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthEqualsHash,
},
VerifyOptions: &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthAuto,
Hash: crypto.SHA512,
},
}
RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
@ -94,7 +105,12 @@ func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interf
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
opts := m.Options
if m.VerifyOptions != nil {
opts = m.VerifyOptions
}
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
}
// Implements the Sign method from SigningMethod

View File

@ -8,7 +8,7 @@ import (
)
var (
ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be a PEM encoded PKCS1 or PKCS8 key")
ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
)

View File

@ -15,6 +15,7 @@
package k8schain
import (
"context"
"fmt"
"sync"
@ -49,7 +50,7 @@ var (
// New returns a new authn.Keychain suitable for resolving image references as
// scoped by the provided Options. It speaks to Kubernetes through the provided
// client interface.
func New(client kubernetes.Interface, opt Options) (authn.Keychain, error) {
func New(ctx context.Context, client kubernetes.Interface, opt Options) (authn.Keychain, error) {
if opt.Namespace == "" {
opt.Namespace = "default"
}
@ -68,7 +69,7 @@ func New(client kubernetes.Interface, opt Options) (authn.Keychain, error) {
var pullSecrets []v1.Secret
if client != nil {
for _, name := range opt.ImagePullSecrets {
ps, err := client.CoreV1().Secrets(opt.Namespace).Get(name, metav1.GetOptions{})
ps, err := client.CoreV1().Secrets(opt.Namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, err
}
@ -76,12 +77,12 @@ func New(client kubernetes.Interface, opt Options) (authn.Keychain, error) {
}
// Second, fetch all of the pull secrets attached to our service account.
sa, err := client.CoreV1().ServiceAccounts(opt.Namespace).Get(opt.ServiceAccountName, metav1.GetOptions{})
sa, err := client.CoreV1().ServiceAccounts(opt.Namespace).Get(ctx, opt.ServiceAccountName, metav1.GetOptions{})
if err != nil {
return nil, err
}
for _, localObj := range sa.ImagePullSecrets {
ps, err := client.CoreV1().Secrets(opt.Namespace).Get(localObj.Name, metav1.GetOptions{})
ps, err := client.CoreV1().Secrets(opt.Namespace).Get(ctx, localObj.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
@ -106,7 +107,7 @@ func New(client kubernetes.Interface, opt Options) (authn.Keychain, error) {
// NewInCluster returns a new authn.Keychain suitable for resolving image references as
// scoped by the provided Options, constructing a kubernetes.Interface based on in-cluster
// authentication.
func NewInCluster(opt Options) (authn.Keychain, error) {
func NewInCluster(ctx context.Context, opt Options) (authn.Keychain, error) {
clusterConfig, err := rest.InClusterConfig()
if err != nil {
return nil, err
@ -116,7 +117,7 @@ func NewInCluster(opt Options) (authn.Keychain, error) {
if err != nil {
return nil, err
}
return New(client, opt)
return New(ctx, client, opt)
}
// NewNoClient returns a new authn.Keychain that supports the portions of the K8s keychain
@ -127,8 +128,8 @@ func NewInCluster(opt Options) (authn.Keychain, error) {
// for Kubernetes authentication, but this actually targets a different use-case. What
// remains is an interesting sweet spot: this variant can serve as a credential provider
// for all of the major public clouds, but in library form (vs. an executable you exec).
func NewNoClient() (authn.Keychain, error) {
return New(nil, Options{})
func NewNoClient(ctx context.Context) (authn.Keychain, error) {
return New(ctx, nil, Options{})
}
type lazyProvider struct {

View File

@ -15,12 +15,10 @@
package authn
import (
"encoding/json"
"os"
"github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/types"
"github.com/google/go-containerregistry/pkg/logs"
"github.com/google/go-containerregistry/pkg/name"
)
@ -76,12 +74,6 @@ func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) {
if err != nil {
return nil, err
}
if logs.Enabled(logs.Debug) {
b, err := json.Marshal(cfg)
if err == nil {
logs.Debug.Printf("defaultKeychain.Resolve(%q) = %s", key, string(b))
}
}
empty := types.AuthConfig{}
if cfg == empty {

View File

@ -0,0 +1,35 @@
// Copyright 2020 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package redact contains a simple context signal for redacting requests.
package redact
import (
"context"
)
type contextKey string
var redactKey = contextKey("redact")
// NewContext creates a new ctx with the reason for redaction.
func NewContext(ctx context.Context, reason string) context.Context {
return context.WithValue(ctx, redactKey, reason)
}
// FromContext returns the redaction reason, if any.
func FromContext(ctx context.Context) (bool, string) {
reason, ok := ctx.Value(redactKey).(string)
return ok, reason
}

View File

@ -14,13 +14,29 @@
package name
const (
// DefaultRegistry is the registry name that will be used if no registry
// provided and the default is not overridden.
DefaultRegistry = "index.docker.io"
defaultRegistryAlias = "docker.io"
// DefaultTag is the tag name that will be used if no tag provided and the
// default is not overridden.
DefaultTag = "latest"
)
type options struct {
strict bool // weak by default
insecure bool // secure by default
strict bool // weak by default
insecure bool // secure by default
defaultRegistry string
defaultTag string
}
func makeOptions(opts ...Option) options {
opt := options{}
opt := options{
defaultRegistry: DefaultRegistry,
defaultTag: DefaultTag,
}
for _, o := range opts {
o(&opt)
}
@ -47,3 +63,21 @@ func WeakValidation(opts *options) {
func Insecure(opts *options) {
opts.insecure = true
}
// OptionFn is a function that returns an option.
type OptionFn func() Option
// WithDefaultRegistry sets the default registry that will be used if one is not
// provided.
func WithDefaultRegistry(r string) Option {
return func(opts *options) {
opts.defaultRegistry = r
}
}
// WithDefaultTag sets the default tag that will be used if one is not provided.
func WithDefaultTag(t string) Option {
return func(opts *options) {
opts.defaultTag = t
}
}

View File

@ -21,12 +21,6 @@ import (
"strings"
)
const (
// DefaultRegistry is Docker Hub, assumed when a hostname is omitted.
DefaultRegistry = "index.docker.io"
defaultRegistryAlias = "docker.io"
)
// Detect more complex forms of local references.
var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`)
@ -44,10 +38,7 @@ type Registry struct {
// RegistryStr returns the registry component of the Registry.
func (r Registry) RegistryStr() string {
if r.registry != "" {
return r.registry
}
return DefaultRegistry
return r.registry
}
// Name returns the name from which the Registry was derived.
@ -124,6 +115,9 @@ func NewRegistry(name string, opts ...Option) (Registry, error) {
return Registry{}, err
}
if name == "" {
name = opt.defaultRegistry
}
// Rewrite "docker.io" to "index.docker.io".
// See: https://github.com/google/go-containerregistry/issues/68
if name == defaultRegistryAlias {

View File

@ -19,7 +19,6 @@ import (
)
const (
defaultTag = "latest"
// TODO(dekkagaijin): use the docker/distribution regexes for validation.
tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ"
tagDelim = ":"
@ -47,10 +46,7 @@ func (t Tag) Identifier() string {
// TagStr returns the tag component of the Tag.
func (t Tag) TagStr() string {
if t.tag != "" {
return t.tag
}
return defaultTag
return t.tag
}
// Name returns the name from which the Tag was derived.
@ -69,7 +65,7 @@ func (t Tag) Scope(action string) string {
}
func checkTag(name string) error {
return checkElement("tag", name, tagChars, 1, 127)
return checkElement("tag", name, tagChars, 1, 128)
}
// NewTag returns a new Tag representing the given name, according to the given strictness.
@ -96,6 +92,10 @@ func NewTag(name string, opts ...Option) (Tag, error) {
}
}
if tag == "" {
tag = opt.defaultTag
}
repo, err := NewRepository(base, opts...)
if err != nil {
return Tag{}, err

View File

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate deepcopy-gen -O zz_deepcopy_generated --go-header-file $BOILER_PLATE_FILE -i .
// +k8s:deepcopy-gen=package
// Package v1 defines structured types for OCI v1 images

View File

@ -19,7 +19,6 @@ import (
)
// Image defines the interface for interacting with an OCI v1 image.
//go:generate counterfeiter -o fake/image.go . Image
type Image interface {
// Layers returns the ordered collection of filesystem layers that comprise this image.
// The order of the list is oldest/base layer first, and most-recent/top layer last.

View File

@ -19,7 +19,6 @@ import (
)
// ImageIndex defines the interface for interacting with an OCI image index.
//go:generate counterfeiter -o fake/index.go . ImageIndex
type ImageIndex interface {
// MediaType of this image's manifest.
MediaType() (types.MediaType, error)

View File

@ -1,4 +1,4 @@
// Copyright 2018 Google LLC All Rights Reserved.
// Copyright 2020 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,36 +12,36 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package v1util
package and
import (
"io"
)
// readAndCloser implements io.ReadCloser by reading from a particular io.Reader
// ReadCloser implements io.ReadCloser by reading from a particular io.Reader
// and then calling the provided "Close()" method.
type readAndCloser struct {
type ReadCloser struct {
io.Reader
CloseFunc func() error
}
var _ io.ReadCloser = (*readAndCloser)(nil)
var _ io.ReadCloser = (*ReadCloser)(nil)
// Close implements io.ReadCloser
func (rac *readAndCloser) Close() error {
func (rac *ReadCloser) Close() error {
return rac.CloseFunc()
}
// writeAndCloser implements io.WriteCloser by reading from a particular io.Writer
// WriteCloser implements io.WriteCloser by reading from a particular io.Writer
// and then calling the provided "Close()" method.
type writeAndCloser struct {
type WriteCloser struct {
io.Writer
CloseFunc func() error
}
var _ io.WriteCloser = (*writeAndCloser)(nil)
var _ io.WriteCloser = (*WriteCloser)(nil)
// Close implements io.WriteCloser
func (wac *writeAndCloser) Close() error {
func (wac *WriteCloser) Close() error {
return wac.CloseFunc()
}

View File

@ -0,0 +1,54 @@
// Copyright 2020 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package estargz
import (
"bytes"
"io"
"io/ioutil"
"github.com/containerd/stargz-snapshotter/estargz"
v1 "github.com/google/go-containerregistry/pkg/v1"
)
// Assert that what we're returning is an io.ReadCloser
var _ io.ReadCloser = (*estargz.Blob)(nil)
// ReadCloser reads uncompressed tarball input from the io.ReadCloser and
// returns:
// * An io.ReadCloser from which compressed data may be read, and
// * A v1.Hash with the hash of the estargz table of contents, or
// * An error if the estargz processing encountered a problem.
//
// Refer to estargz for the options:
// https://pkg.go.dev/github.com/containerd/stargz-snapshotter@v0.2.0/estargz#Option
func ReadCloser(r io.ReadCloser, opts ...estargz.Option) (*estargz.Blob, v1.Hash, error) {
defer r.Close()
// TODO(#876): Avoid buffering into memory.
bs, err := ioutil.ReadAll(r)
if err != nil {
return nil, v1.Hash{}, err
}
br := bytes.NewReader(bs)
rc, err := estargz.Build(io.NewSectionReader(br, 0, int64(len(bs))), opts...)
if err != nil {
return nil, v1.Hash{}, err
}
h, err := v1.NewHash(rc.TOCDigest().String())
return rc, h, err
}

View File

@ -0,0 +1,96 @@
// Copyright 2020 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gzip
import (
"bytes"
"compress/gzip"
"io"
"github.com/google/go-containerregistry/pkg/v1/internal/and"
)
var gzipMagicHeader = []byte{'\x1f', '\x8b'}
// ReadCloser reads uncompressed input data from the io.ReadCloser and
// returns an io.ReadCloser from which compressed data may be read.
// This uses gzip.BestSpeed for the compression level.
func ReadCloser(r io.ReadCloser) io.ReadCloser {
return ReadCloserLevel(r, gzip.BestSpeed)
}
// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and
// returns an io.ReadCloser from which compressed data may be read.
// Refer to compress/gzip for the level:
// https://golang.org/pkg/compress/gzip/#pkg-constants
func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser {
pr, pw := io.Pipe()
// Returns err so we can pw.CloseWithError(err)
go func() error {
// TODO(go1.14): Just defer {pw,gw,r}.Close like you'd expect.
// Context: https://golang.org/issue/24283
gw, err := gzip.NewWriterLevel(pw, level)
if err != nil {
return pw.CloseWithError(err)
}
if _, err := io.Copy(gw, r); err != nil {
defer r.Close()
defer gw.Close()
return pw.CloseWithError(err)
}
defer pw.Close()
defer r.Close()
defer gw.Close()
return nil
}()
return pr
}
// UnzipReadCloser reads compressed input data from the io.ReadCloser and
// returns an io.ReadCloser from which uncompessed data may be read.
func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) {
gr, err := gzip.NewReader(r)
if err != nil {
return nil, err
}
return &and.ReadCloser{
Reader: gr,
CloseFunc: func() error {
// If the unzip fails, then this seems to return the same
// error as the read. We don't want this to interfere with
// us closing the main ReadCloser, since this could leave
// an open file descriptor (fails on Windows).
gr.Close()
return r.Close()
},
}, nil
}
// Is detects whether the input stream is compressed.
func Is(r io.Reader) (bool, error) {
magicHeader := make([]byte, 2)
n, err := r.Read(magicHeader)
if n == 0 && err == io.EOF {
return false, nil
}
if err != nil {
return false, err
}
return bytes.Equal(magicHeader, gzipMagicHeader), nil
}

View File

@ -0,0 +1,62 @@
// Copyright 2020 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"encoding/hex"
"fmt"
"hash"
"io"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/internal/and"
)
type verifyReader struct {
inner io.Reader
hasher hash.Hash
expected v1.Hash
}
// Read implements io.Reader
func (vc *verifyReader) Read(b []byte) (int, error) {
n, err := vc.inner.Read(b)
if err == io.EOF {
got := hex.EncodeToString(vc.hasher.Sum(make([]byte, 0, vc.hasher.Size())))
if want := vc.expected.Hex; got != want {
return n, fmt.Errorf("error verifying %s checksum; got %q, want %q",
vc.expected.Algorithm, got, want)
}
}
return n, err
}
// ReadCloser wraps the given io.ReadCloser to verify that its contents match
// the provided v1.Hash before io.EOF is returned.
func ReadCloser(r io.ReadCloser, h v1.Hash) (io.ReadCloser, error) {
w, err := v1.Hasher(h.Algorithm)
if err != nil {
return nil, err
}
r2 := io.TeeReader(r, w)
return &and.ReadCloser{
Reader: &verifyReader{
inner: r2,
hasher: w,
expected: h,
},
CloseFunc: r.Close,
}, nil
}

View File

@ -34,7 +34,7 @@ var layoutFile = `{
// AppendImage writes a v1.Image to the Path and updates
// the index.json to reference it.
func (l Path) AppendImage(img v1.Image, options ...Option) error {
if err := l.writeImage(img); err != nil {
if err := l.WriteImage(img); err != nil {
return err
}
@ -71,7 +71,7 @@ func (l Path) AppendImage(img v1.Image, options ...Option) error {
// AppendIndex writes a v1.ImageIndex to the Path and updates
// the index.json to reference it.
func (l Path) AppendIndex(ii v1.ImageIndex, options ...Option) error {
if err := l.writeIndex(ii); err != nil {
if err := l.WriteIndex(ii); err != nil {
return err
}
@ -182,7 +182,13 @@ func (l Path) writeLayer(layer v1.Layer) error {
return l.WriteBlob(d, r)
}
func (l Path) writeImage(img v1.Image) error {
// WriteImage writes an image, including its manifest, config and all of its
// layers, to the blobs directory. If any blob already exists, as determined by
// the hash filename, does not write it.
// This function does *not* update the `index.json` file. If you want to write the
// image and also update the `index.json`, call AppendImage(), which wraps this
// and also updates the `index.json`.
func (l Path) WriteImage(img v1.Image) error {
layers, err := img.Layers()
if err != nil {
return err
@ -241,7 +247,7 @@ func (l Path) writeIndexToFile(indexFile string, ii v1.ImageIndex) error {
if err != nil {
return err
}
if err := l.writeIndex(ii); err != nil {
if err := l.WriteIndex(ii); err != nil {
return err
}
case types.OCIManifestSchema1, types.DockerManifestSchema2:
@ -249,7 +255,7 @@ func (l Path) writeIndexToFile(indexFile string, ii v1.ImageIndex) error {
if err != nil {
return err
}
if err := l.writeImage(img); err != nil {
if err := l.WriteImage(img); err != nil {
return err
}
default:
@ -266,7 +272,14 @@ func (l Path) writeIndexToFile(indexFile string, ii v1.ImageIndex) error {
return l.WriteFile(indexFile, rawIndex, os.ModePerm)
}
func (l Path) writeIndex(ii v1.ImageIndex) error {
// WriteIndex writes an index to the blobs directory. Walks down the children,
// including its children manifests and/or indexes, and down the tree until all of
// config and all layers, have been written. If any blob already exists, as determined by
// the hash filename, does not write it.
// This function does *not* update the `index.json` file. If you want to write the
// index and also update the `index.json`, call AppendIndex(), which wraps this
// and also updates the `index.json`.
func (l Path) WriteIndex(ii v1.ImageIndex) error {
// Always just write oci-layout file, since it's small.
if err := l.WriteFile("oci-layout", []byte(layoutFile), os.ModePerm); err != nil {
return err

View File

@ -0,0 +1,90 @@
// Copyright 2020 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package match provides functionality for conveniently matching a v1.Descriptor.
package match
import (
v1 "github.com/google/go-containerregistry/pkg/v1"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
)
// Matcher function that is given a v1.Descriptor, and returns whether or
// not it matches a given rule. Can match on anything it wants in the Descriptor.
type Matcher func(desc v1.Descriptor) bool
// Name returns a match.Matcher that matches based on the value of the
// "org.opencontainers.image.ref.name" annotation:
// github.com/opencontainers/image-spec/blob/v1.0.1/annotations.md#pre-defined-annotation-keys
func Name(name string) Matcher {
return Annotation(imagespec.AnnotationRefName, name)
}
// Annotation returns a match.Matcher that matches based on the provided annotation.
func Annotation(key, value string) Matcher {
return func(desc v1.Descriptor) bool {
if desc.Annotations == nil {
return false
}
if aValue, ok := desc.Annotations[key]; ok && aValue == value {
return true
}
return false
}
}
// Platforms returns a match.Matcher that matches on any one of the provided platforms.
// Ignores any descriptors that do not have a platform.
func Platforms(platforms ...v1.Platform) Matcher {
return func(desc v1.Descriptor) bool {
if desc.Platform == nil {
return false
}
for _, platform := range platforms {
if desc.Platform.Equals(platform) {
return true
}
}
return false
}
}
// MediaTypes returns a match.Matcher that matches at least one of the provided media types.
func MediaTypes(mediaTypes ...string) Matcher {
mts := map[string]bool{}
for _, media := range mediaTypes {
mts[media] = true
}
return func(desc v1.Descriptor) bool {
if desc.MediaType == "" {
return false
}
if _, ok := mts[string(desc.MediaType)]; ok {
return true
}
return false
}
}
// Digests returns a match.Matcher that matches at least one of the provided Digests
func Digests(digests ...v1.Hash) Matcher {
digs := map[v1.Hash]bool{}
for _, digest := range digests {
digs[digest] = true
}
return func(desc v1.Descriptor) bool {
_, ok := digs[desc.Digest]
return ok
}
}

View File

@ -105,6 +105,10 @@ func (i *image) compute() error {
desc.URLs = add.URLs
}
if add.MediaType != "" {
desc.MediaType = add.MediaType
}
manifestLayers = append(manifestLayers, *desc)
digestMap[desc.Digest] = add.Layer
}

View File

@ -26,9 +26,9 @@ import (
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/internal/gzip"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
)
const whiteoutPrefix = ".wh."
@ -40,6 +40,7 @@ type Addendum struct {
History v1.History
URLs []string
Annotations map[string]string
MediaType types.MediaType
}
// AppendLayers applies layers to a base image.
@ -174,6 +175,7 @@ func extract(img v1.Image, w io.Writer) error {
if err != nil {
return fmt.Errorf("reading layer contents: %v", err)
}
defer layerReader.Close()
tarReader := tar.NewReader(layerReader)
for {
header, err := tarReader.Next()
@ -299,6 +301,7 @@ func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) {
if err != nil {
return nil, fmt.Errorf("getting layer: %v", err)
}
defer layerReader.Close()
w := new(bytes.Buffer)
tarWriter := tar.NewWriter(w)
defer tarWriter.Close()
@ -332,7 +335,7 @@ func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) {
b := w.Bytes()
// gzip the contents, then create the layer
opener := func() (io.ReadCloser, error) {
return v1util.GzipReadCloser(ioutil.NopCloser(bytes.NewReader(b))), nil
return gzip.ReadCloser(ioutil.NopCloser(bytes.NewReader(b))), nil
}
layer, err = tarball.LayerFromOpener(opener)
if err != nil {

View File

@ -60,23 +60,42 @@ func Rebase(orig, oldBase, newBase v1.Image) (v1.Image, error) {
return nil, fmt.Errorf("failed to get config for original: %v", err)
}
newConfig, err := newBase.ConfigFile()
if err != nil {
return nil, fmt.Errorf("could not get config for new base: %v", err)
}
// Stitch together an image that contains:
// - original image's config
// - new base image's os/arch properties
// - new base image's layers + top of original image's layers
// - new base image's history + top of original image's history
rebasedImage, err := Config(empty.Image, *origConfig.Config.DeepCopy())
if err != nil {
return nil, fmt.Errorf("failed to create empty image with original config: %v", err)
}
// Add new config properties from existing images.
rebasedConfig, err := rebasedImage.ConfigFile()
if err != nil {
return nil, fmt.Errorf("could not get config for rebased image: %v", err)
}
// OS/Arch properties from new base
rebasedConfig.Architecture = newConfig.Architecture
rebasedConfig.OS = newConfig.OS
rebasedConfig.OSVersion = newConfig.OSVersion
// Apply config properties to rebased.
rebasedImage, err = ConfigFile(rebasedImage, rebasedConfig)
if err != nil {
return nil, fmt.Errorf("failed to replace config for rebased image: %v", err)
}
// Get new base layers and config for history.
newBaseLayers, err := newBase.Layers()
if err != nil {
return nil, fmt.Errorf("could not get new base layers for new base: %v", err)
}
newConfig, err := newBase.ConfigFile()
if err != nil {
return nil, fmt.Errorf("could not get config for new base: %v", err)
}
// Add new base layers.
rebasedImage, err = Append(rebasedImage, createAddendums(0, 0, newConfig.History, newBaseLayers)...)
if err != nil {

View File

@ -51,7 +51,7 @@ There are some properties of a [`Descriptor`](https://github.com/opencontainers/
For example, in a `tarball.Image`, there is a `LayerSources` field that contains
an entire layer descriptor with `URLs` information for foreign layers. This
information can be passed through to callers by implementing this optional
information can be passed through to callers by implementing this optional
`Descriptor` method.
See [`#654`](https://github.com/google/go-containerregistry/pull/654).

View File

@ -18,8 +18,8 @@ import (
"io"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/internal/gzip"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
)
// CompressedLayer represents the bare minimum interface a natively
@ -49,7 +49,7 @@ func (cle *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) {
if err != nil {
return nil, err
}
return v1util.GunzipReadCloser(r)
return gzip.UnzipReadCloser(r)
}
// DiffID implements v1.Layer

View File

@ -0,0 +1,85 @@
// Copyright 2020 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package partial
import (
"fmt"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/match"
)
// FindManifests given a v1.ImageIndex, find the manifests that fit the matcher.
func FindManifests(index v1.ImageIndex, matcher match.Matcher) ([]v1.Descriptor, error) {
// get the actual manifest list
indexManifest, err := index.IndexManifest()
if err != nil {
return nil, fmt.Errorf("unable to get raw index: %v", err)
}
manifests := []v1.Descriptor{}
// try to get the root of our image
for _, manifest := range indexManifest.Manifests {
if matcher(manifest) {
manifests = append(manifests, manifest)
}
}
return manifests, nil
}
// FindImages given a v1.ImageIndex, find the images that fit the matcher. If a Descriptor
// matches the provider Matcher, but the referenced item is not an Image, ignores it.
// Only returns those that match the Matcher and are images.
func FindImages(index v1.ImageIndex, matcher match.Matcher) ([]v1.Image, error) {
matches := []v1.Image{}
manifests, err := FindManifests(index, matcher)
if err != nil {
return nil, err
}
for _, desc := range manifests {
// if it is not an image, ignore it
if !desc.MediaType.IsImage() {
continue
}
img, err := index.Image(desc.Digest)
if err != nil {
return nil, err
}
matches = append(matches, img)
}
return matches, nil
}
// FindIndexes given a v1.ImageIndex, find the indexes that fit the matcher. If a Descriptor
// matches the provider Matcher, but the referenced item is not an Index, ignores it.
// Only returns those that match the Matcher and are indexes.
func FindIndexes(index v1.ImageIndex, matcher match.Matcher) ([]v1.ImageIndex, error) {
matches := []v1.ImageIndex{}
manifests, err := FindManifests(index, matcher)
if err != nil {
return nil, err
}
for _, desc := range manifests {
if !desc.MediaType.IsIndex() {
continue
}
// if it is not an index, ignore it
idx, err := index.ImageIndex(desc.Digest)
if err != nil {
return nil, err
}
matches = append(matches, idx)
}
return matches, nil
}

View File

@ -20,8 +20,8 @@ import (
"sync"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/internal/gzip"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
)
// UncompressedLayer represents the bare minimum interface a natively
@ -54,7 +54,7 @@ func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) {
if err != nil {
return nil, err
}
return v1util.GzipReadCloser(u), nil
return gzip.ReadCloser(u), nil
}
// Digest implements v1.Layer

View File

@ -14,6 +14,10 @@
package v1
import (
"sort"
)
// Platform represents the target os/arch for an image.
type Platform struct {
Architecture string `json:"architecture"`
@ -23,3 +27,33 @@ type Platform struct {
Variant string `json:"variant,omitempty"`
Features []string `json:"features,omitempty"`
}
// Equals returns true if the given platform is semantically equivalent to this one.
// The order of Features and OSFeatures is not important.
func (p Platform) Equals(o Platform) bool {
return p.OS == o.OS && p.Architecture == o.Architecture && p.Variant == o.Variant && p.OSVersion == o.OSVersion &&
stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) && stringSliceEqualIgnoreOrder(p.Features, o.Features)
}
// stringSliceEqual compares 2 string slices and returns if their contents are identical.
func stringSliceEqual(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i, elm := range a {
if elm != b[i] {
return false
}
}
return true
}
// stringSliceEqualIgnoreOrder compares 2 string slices and returns if their contents are identical, ignoring order
func stringSliceEqualIgnoreOrder(a, b []string) bool {
a1, b1 := a[:], b[:]
if a1 != nil && b1 != nil {
sort.Strings(a1)
sort.Strings(b1)
}
return stringSliceEqual(a1, b1)
}

View File

@ -17,6 +17,7 @@ package v1
// Update representation of an update of transfer progress. Some functions
// in this module can take a channel to which updates will be sent while a
// transfer is in progress.
// +k8s:deepcopy-gen=false
type Update struct {
Total int64
Complete int64

View File

@ -71,7 +71,7 @@ func Image(byteSize, layers int64) (v1.Image, error) {
Author: "random.Image",
Comment: fmt.Sprintf("this is a random history %d of %d", i, layers),
CreatedBy: "random",
Created: v1.Time{time.Now()},
Created: v1.Time{Time: time.Now()},
},
})
}

View File

@ -8,7 +8,7 @@ per the [OCI distribution spec](https://github.com/opencontainers/distribution-s
It leans heavily on the lower level [`transport`](/pkg/v1/remote/transport) package, which handles the
authentication handshake and structured errors.
## Usage
## Usage
```go
package main

View File

@ -37,7 +37,7 @@ func CatalogPage(target name.Registry, last string, n int, options ...Option) ([
}
scopes := []string{target.Scope(transport.PullScope)}
tr, err := transport.New(target, o.auth, o.transport, scopes)
tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes)
if err != nil {
return nil, err
}
@ -82,7 +82,7 @@ func Catalog(ctx context.Context, target name.Registry, options ...Option) ([]st
}
scopes := []string{target.Scope(transport.PullScope)}
tr, err := transport.New(target, o.auth, o.transport, scopes)
tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes)
if err != nil {
return nil, err
}

View File

@ -30,7 +30,7 @@ func Delete(ref name.Reference, options ...Option) error {
return err
}
scopes := []string{ref.Scope(transport.DeleteScope)}
tr, err := transport.New(ref.Context().Registry, o.auth, o.transport, scopes)
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
if err != nil {
return err
}

View File

@ -22,22 +22,18 @@ import (
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/google/go-containerregistry/pkg/logs"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/internal/verify"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
)
var defaultPlatform = v1.Platform{
Architecture: "amd64",
OS: "linux",
}
// ErrSchema1 indicates that we received a schema1 manifest from the registry.
// This library doesn't have plans to support this legacy image format:
// https://github.com/google/go-containerregistry/issues/377
@ -76,6 +72,8 @@ func (d *Descriptor) RawManifest() ([]byte, error) {
// Get returns a remote.Descriptor for the given reference. The response from
// the registry is left un-interpreted, for the most part. This is useful for
// querying what kind of artifact a reference represents.
//
// See Head if you don't need the response body.
func Get(ref name.Reference, options ...Option) (*Descriptor, error) {
acceptable := []types.MediaType{
// Just to look at them.
@ -87,6 +85,33 @@ func Get(ref name.Reference, options ...Option) (*Descriptor, error) {
return get(ref, acceptable, options...)
}
// Head returns a v1.Descriptor for the given reference by issuing a HEAD
// request.
//
// Note that the server response will not have a body, so any errors encountered
// should be retried with Get to get more details.
func Head(ref name.Reference, options ...Option) (*v1.Descriptor, error) {
acceptable := []types.MediaType{
// Just to look at them.
types.DockerManifestSchema1,
types.DockerManifestSchema1Signed,
}
acceptable = append(acceptable, acceptableImageMediaTypes...)
acceptable = append(acceptable, acceptableIndexMediaTypes...)
o, err := makeOptions(ref.Context(), options...)
if err != nil {
return nil, err
}
f, err := makeFetcher(ref, o)
if err != nil {
return nil, err
}
return f.headManifest(ref, acceptable)
}
// Handle options and fetch the manifest with the acceptable MediaTypes in the
// Accept header.
func get(ref name.Reference, acceptable []types.MediaType, options ...Option) (*Descriptor, error) {
@ -193,7 +218,7 @@ type fetcher struct {
}
func makeFetcher(ref name.Reference, o *options) (*fetcher, error) {
tr, err := transport.New(ref.Context().Registry, o.auth, o.transport, []string{ref.Scope(transport.PullScope)})
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, []string{ref.Scope(transport.PullScope)})
if err != nil {
return nil, err
}
@ -277,14 +302,63 @@ func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType
return manifest, &desc, nil
}
func (f *fetcher) fetchBlob(h v1.Hash) (io.ReadCloser, error) {
func (f *fetcher) headManifest(ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) {
u := f.url("manifests", ref.Identifier())
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
if err != nil {
return nil, err
}
accept := []string{}
for _, mt := range acceptable {
accept = append(accept, string(mt))
}
req.Header.Set("Accept", strings.Join(accept, ","))
resp, err := f.Client.Do(req.WithContext(f.context))
if err != nil {
return nil, err
}
defer resp.Body.Close()
if err := transport.CheckError(resp, http.StatusOK); err != nil {
return nil, err
}
mediaType := types.MediaType(resp.Header.Get("Content-Type"))
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
return nil, err
}
digest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest"))
if err != nil {
return nil, err
}
// Validate the digest matches what we asked for, if pulling by digest.
if dgst, ok := ref.(name.Digest); ok {
if digest.String() != dgst.DigestStr() {
return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref)
}
}
// Return all this info since we have to calculate it anyway.
return &v1.Descriptor{
Digest: digest,
Size: size,
MediaType: mediaType,
}, nil
}
func (f *fetcher) fetchBlob(ctx context.Context, h v1.Hash) (io.ReadCloser, error) {
u := f.url("blobs", h.String())
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
resp, err := f.Client.Do(req.WithContext(f.context))
resp, err := f.Client.Do(req.WithContext(ctx))
if err != nil {
return nil, err
}
@ -294,7 +368,7 @@ func (f *fetcher) fetchBlob(h v1.Hash) (io.ReadCloser, error) {
return nil, err
}
return v1util.VerifyReadCloser(resp.Body, h)
return verify.ReadCloser(resp.Body, h)
}
func (f *fetcher) headBlob(h v1.Hash) (*http.Response, error) {

View File

@ -21,12 +21,13 @@ import (
"net/url"
"sync"
"github.com/google/go-containerregistry/pkg/internal/redact"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/internal/verify"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/v1util"
)
var acceptableImageMediaTypes = []types.MediaType{
@ -99,7 +100,7 @@ func (r *remoteImage) RawConfigFile() ([]byte, error) {
return nil, err
}
body, err := r.fetchBlob(m.Config.Digest)
body, err := r.fetchBlob(r.context, m.Config.Digest)
if err != nil {
return nil, err
}
@ -142,6 +143,9 @@ func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) {
return nil, err
}
// We don't want to log binary layers -- this can break terminals.
ctx := redact.NewContext(rl.ri.context, "omitting binary blobs from logs")
for _, s := range d.URLs {
u, err := url.Parse(s)
if err != nil {
@ -161,7 +165,7 @@ func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) {
return nil, err
}
resp, err := rl.ri.Client.Do(req.WithContext(rl.ri.context))
resp, err := rl.ri.Client.Do(req.WithContext(ctx))
if err != nil {
lastErr = err
continue
@ -173,7 +177,7 @@ func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) {
continue
}
return v1util.VerifyReadCloser(resp.Body, rl.digest)
return verify.ReadCloser(resp.Body, rl.digest)
}
return nil, lastErr

View File

@ -155,7 +155,7 @@ func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error)
return r.childDescriptor(childDesc, platform)
}
}
return nil, fmt.Errorf("no child with platform %s/%s in index %s", platform.Architecture, platform.OS, r.Ref)
return nil, fmt.Errorf("no child with platform %s/%s in index %s", platform.OS, platform.Architecture, r.Ref)
}
func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) {

View File

@ -17,6 +17,7 @@ package remote
import (
"io"
"github.com/google/go-containerregistry/pkg/internal/redact"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
@ -31,7 +32,9 @@ type remoteLayer struct {
// Compressed implements partial.CompressedLayer
func (rl *remoteLayer) Compressed() (io.ReadCloser, error) {
return rl.fetchBlob(rl.digest)
// We don't want to log binary layers -- this can break terminals.
ctx := redact.NewContext(rl.context, "omitting binary blobs from logs")
return rl.fetchBlob(ctx, rl.digest)
}
// Compressed implements partial.CompressedLayer

View File

@ -44,7 +44,7 @@ func ListWithContext(ctx context.Context, repo name.Repository, options ...Optio
return nil, err
}
scopes := []string{repo.Scope(transport.PullScope)}
tr, err := transport.New(repo.Registry, o.auth, o.transport, scopes)
tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes)
if err != nil {
return nil, err
}

View File

@ -0,0 +1,241 @@
// Copyright 2020 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package remote
import (
"fmt"
"net/http"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/partial"
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
"github.com/google/go-containerregistry/pkg/v1/types"
"golang.org/x/sync/errgroup"
)
// MultiWrite writes the given Images or ImageIndexes to the given refs, as
// efficiently as possible, by deduping shared layer blobs and uploading layers
// in parallel, then uploading all manifests in parallel.
//
// Current limitations:
// - All refs must share the same repository.
// - Images cannot consist of stream.Layers.
func MultiWrite(m map[name.Reference]Taggable, options ...Option) error {
// Determine the repository being pushed to; if asked to push to
// multiple repositories, give up.
var repo, zero name.Repository
for ref := range m {
if repo == zero {
repo = ref.Context()
} else if ref.Context() != repo {
return fmt.Errorf("MultiWrite can only push to the same repository (saw %q and %q)", repo, ref.Context())
}
}
// Collect unique blobs (layers and config blobs).
blobs := map[v1.Hash]v1.Layer{}
newManifests := []map[name.Reference]Taggable{}
// Separate originally requested images and indexes, so we can push images first.
images, indexes := map[name.Reference]Taggable{}, map[name.Reference]Taggable{}
var err error
for ref, i := range m {
if img, ok := i.(v1.Image); ok {
images[ref] = i
if err := addImageBlobs(img, blobs); err != nil {
return err
}
continue
}
if idx, ok := i.(v1.ImageIndex); ok {
indexes[ref] = i
newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, 0)
if err != nil {
return err
}
continue
}
return fmt.Errorf("pushable resource was not Image or ImageIndex: %T", i)
}
o, err := makeOptions(repo, options...)
if err != nil {
return err
}
// Determine if any of the layers are Mountable, because if so we need
// to request Pull scope too.
ls := []v1.Layer{}
for _, l := range blobs {
ls = append(ls, l)
}
scopes := scopesForUploadingImage(repo, ls)
tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes)
if err != nil {
return err
}
w := writer{
repo: repo,
client: &http.Client{Transport: tr},
context: o.context,
}
// Upload individual blobs and collect any errors.
blobChan := make(chan v1.Layer, 2*o.jobs)
var g errgroup.Group
for i := 0; i < o.jobs; i++ {
// Start N workers consuming blobs to upload.
g.Go(func() error {
for b := range blobChan {
if err := w.uploadOne(b); err != nil {
return err
}
}
return nil
})
}
go func() {
for _, b := range blobs {
blobChan <- b
}
close(blobChan)
}()
if err := g.Wait(); err != nil {
return err
}
commitMany := func(m map[name.Reference]Taggable) error {
// With all of the constituent elements uploaded, upload the manifests
// to commit the images and indexes, and collect any errors.
type task struct {
i Taggable
ref name.Reference
}
taskChan := make(chan task, 2*o.jobs)
for i := 0; i < o.jobs; i++ {
// Start N workers consuming tasks to upload manifests.
g.Go(func() error {
for t := range taskChan {
if err := w.commitManifest(t.i, t.ref); err != nil {
return err
}
}
return nil
})
}
go func() {
for ref, i := range m {
taskChan <- task{i, ref}
}
close(taskChan)
}()
return g.Wait()
}
// Push originally requested image manifests. These have no
// dependencies.
if err := commitMany(images); err != nil {
return err
}
// Push new manifests from lowest levels up.
for i := len(newManifests) - 1; i >= 0; i-- {
if err := commitMany(newManifests[i]); err != nil {
return err
}
}
// Push originally requested index manifests, which might depend on
// newly discovered manifests.
return commitMany(indexes)
}
// addIndexBlobs adds blobs to the set of blobs we intend to upload, and
// returns the latest copy of the ordered collection of manifests to upload.
func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repository, newManifests []map[name.Reference]Taggable, lvl int) ([]map[name.Reference]Taggable, error) {
if lvl > len(newManifests)-1 {
newManifests = append(newManifests, map[name.Reference]Taggable{})
}
im, err := idx.IndexManifest()
if err != nil {
return nil, err
}
for _, desc := range im.Manifests {
switch desc.MediaType {
case types.OCIImageIndex, types.DockerManifestList:
idx, err := idx.ImageIndex(desc.Digest)
if err != nil {
return nil, err
}
newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, lvl+1)
if err != nil {
return nil, err
}
// Also track the sub-index manifest to upload later by digest.
newManifests[lvl][repo.Digest(desc.Digest.String())] = idx
case types.OCIManifestSchema1, types.DockerManifestSchema2:
img, err := idx.Image(desc.Digest)
if err != nil {
return nil, err
}
if err := addImageBlobs(img, blobs); err != nil {
return nil, err
}
// Also track the sub-image manifest to upload later by digest.
newManifests[lvl][repo.Digest(desc.Digest.String())] = img
default:
return nil, fmt.Errorf("unknown media type: %v", desc.MediaType)
}
}
return newManifests, nil
}
func addImageBlobs(img v1.Image, blobs map[v1.Hash]v1.Layer) error {
ls, err := img.Layers()
if err != nil {
return err
}
// Collect all layers.
for _, l := range ls {
d, err := l.Digest()
if err != nil {
return err
}
// Ignore foreign layers.
mt, err := l.MediaType()
if err != nil {
return err
}
if !mt.IsDistributable() {
// TODO(jonjohnsonjr): Add "allow-nondistributable-artifacts" option.
continue
}
blobs[d] = l
}
// Collect config blob.
cl, err := partial.ConfigLayer(img)
if err != nil {
return err
}
cld, err := cl.Digest()
if err != nil {
return err
}
blobs[cld] = cl
return nil
}

View File

@ -16,6 +16,7 @@ package remote
import (
"context"
"errors"
"net/http"
"github.com/google/go-containerregistry/pkg/authn"
@ -33,14 +34,24 @@ type options struct {
transport http.RoundTripper
platform v1.Platform
context context.Context
jobs int
userAgent string
}
var defaultPlatform = v1.Platform{
Architecture: "amd64",
OS: "linux",
}
const defaultJobs = 4
func makeOptions(target authn.Resource, opts ...Option) (*options, error) {
o := &options{
auth: authn.Anonymous,
transport: http.DefaultTransport,
platform: defaultPlatform,
context: context.Background(),
jobs: defaultJobs,
}
for _, option := range opts {
@ -70,6 +81,11 @@ func makeOptions(target authn.Resource, opts ...Option) (*options, error) {
// Wrap the transport in something that can retry network flakes.
o.transport = transport.NewRetry(o.transport)
// Wrap this last to prevent transport.New from double-wrapping.
if o.userAgent != "" {
o.transport = transport.NewUserAgent(o.transport, o.userAgent)
}
return o, nil
}
@ -131,3 +147,29 @@ func WithContext(ctx context.Context) Option {
return nil
}
}
// WithJobs is a functional option for setting the parallelism of remote
// operations performed by a given function. Note that not all remote
// operations support parallelism.
//
// The default value is 4.
func WithJobs(jobs int) Option {
return func(o *options) error {
if jobs <= 0 {
return errors.New("jobs must be greater than zero")
}
o.jobs = jobs
return nil
}
}
// WithUserAgent adds the given string to the User-Agent header for any HTTP
// requests. This header will also include "go-containerregistry/${version}".
//
// If you want to completely overwrite the User-Agent header, use WithTransport.
func WithUserAgent(ua string) Option {
return func(o *options) error {
o.userAgent = ua
return nil
}
}

View File

@ -42,7 +42,7 @@ Similar reasons! That ends up pulling in grpc, protobuf, and logrus.
That just uses the the `docker/distribution` client... and more!
![containerd/containerd](../../../../images/containers.dot.svg)
![containers/image](../../../../images/containers.dot.svg)
> Wow, what about this package?

View File

@ -58,6 +58,5 @@ func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) {
}
}
}
in.Header.Set("User-Agent", transportName)
return bt.inner.RoundTrip(in)
}

View File

@ -15,6 +15,7 @@
package transport
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@ -23,7 +24,9 @@ import (
"net/url"
"strings"
authchallenge "github.com/docker/distribution/registry/client/auth/challenge"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/internal/redact"
"github.com/google/go-containerregistry/pkg/name"
)
@ -52,6 +55,14 @@ var portMap = map[string]string{
"https": "443",
}
func stringSet(ss []string) map[string]struct{} {
set := make(map[string]struct{})
for _, s := range ss {
set[s] = struct{}{}
}
return set
}
// RoundTrip implements http.RoundTripper
func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) {
sendRequest := func() (*http.Response, error) {
@ -72,9 +83,32 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) {
return nil, err
}
// Perform a token refresh() and retry the request in case the token has expired
if res.StatusCode == http.StatusUnauthorized {
if err = bt.refresh(); err != nil {
// If we hit a WWW-Authenticate challenge, it might be due to expired tokens or insufficient scope.
if challenges := authchallenge.ResponseChallenges(res); len(challenges) != 0 {
for _, wac := range challenges {
// TODO(jonjohnsonjr): Should we also update "realm" or "service"?
if scope, ok := wac.Parameters["scope"]; ok {
// From https://tools.ietf.org/html/rfc6750#section-3
// The "scope" attribute is defined in Section 3.3 of [RFC6749]. The
// "scope" attribute is a space-delimited list of case-sensitive scope
// values indicating the required scope of the access token for
// accessing the requested resource.
scopes := strings.Split(scope, " ")
// Add any scopes that we don't already request.
got := stringSet(bt.scopes)
for _, want := range scopes {
if _, ok := got[want]; !ok {
bt.scopes = append(bt.scopes, want)
}
}
}
}
// TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge.
// Retry the request to attempt to get a valid token.
if err = bt.refresh(in.Context()); err != nil {
return nil, err
}
return sendRequest()
@ -87,7 +121,7 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) {
// so we rely on heuristics and fallbacks to support as many registries as possible.
// The basic token exchange is attempted first, falling back to the oauth flow.
// If the IdentityToken is set, this indicates that we should start with the oauth flow.
func (bt *bearerTransport) refresh() error {
func (bt *bearerTransport) refresh(ctx context.Context) error {
auth, err := bt.basic.Authorization()
if err != nil {
return err
@ -103,15 +137,15 @@ func (bt *bearerTransport) refresh() error {
// If the secret being stored is an identity token,
// the Username should be set to <token>, which indicates
// we are using an oauth flow.
content, err = bt.refreshOauth()
content, err = bt.refreshOauth(ctx)
if terr, ok := err.(*Error); ok && terr.StatusCode == http.StatusNotFound {
// Note: Not all token servers implement oauth2.
// If the request to the endpoint returns 404 using the HTTP POST method,
// refer to Token Documentation for using the HTTP GET method supported by all token servers.
content, err = bt.refreshBasic()
content, err = bt.refreshBasic(ctx)
}
} else {
content, err = bt.refreshBasic()
content, err = bt.refreshBasic(ctx)
}
if err != nil {
return err
@ -185,7 +219,7 @@ func canonicalAddress(host, scheme string) (address string) {
}
// https://docs.docker.com/registry/spec/auth/oauth/
func (bt *bearerTransport) refreshOauth() ([]byte, error) {
func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) {
auth, err := bt.basic.Authorization()
if err != nil {
return nil, err
@ -199,7 +233,7 @@ func (bt *bearerTransport) refreshOauth() ([]byte, error) {
v := url.Values{}
v.Set("scope", strings.Join(bt.scopes, " "))
v.Set("service", bt.service)
v.Set("client_id", transportName)
v.Set("client_id", defaultUserAgent)
if auth.IdentityToken != "" {
v.Set("grant_type", "refresh_token")
v.Set("refresh_token", auth.IdentityToken)
@ -212,7 +246,16 @@ func (bt *bearerTransport) refreshOauth() ([]byte, error) {
}
client := http.Client{Transport: bt.inner}
resp, err := client.PostForm(u.String(), v)
req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode()))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
// We don't want to log credentials.
ctx = redact.NewContext(ctx, "oauth token response contains credentials")
resp, err := client.Do(req.WithContext(ctx))
if err != nil {
return nil, err
}
@ -226,7 +269,7 @@ func (bt *bearerTransport) refreshOauth() ([]byte, error) {
}
// https://docs.docker.com/registry/spec/auth/token/
func (bt *bearerTransport) refreshBasic() ([]byte, error) {
func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) {
u, err := url.Parse(bt.realm)
if err != nil {
return nil, err
@ -238,12 +281,20 @@ func (bt *bearerTransport) refreshBasic() ([]byte, error) {
}
client := http.Client{Transport: b}
u.RawQuery = url.Values{
"scope": bt.scopes,
"service": []string{bt.service},
}.Encode()
v := u.Query()
v["scope"] = bt.scopes
v.Set("service", bt.service)
u.RawQuery = v.Encode()
resp, err := client.Get(u.String())
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
// We don't want to log credentials.
ctx = redact.NewContext(ctx, "basic token response contains credentials")
resp, err := client.Do(req.WithContext(ctx))
if err != nil {
return nil, err
}

View File

@ -28,16 +28,16 @@ import (
// from a redirect. These redirects often included tokens or signed URLs.
var paramWhitelist = map[string]struct{}{
// Token exchange
"scope": struct{}{},
"service": struct{}{},
"scope": {},
"service": {},
// Cross-repo mounting
"mount": struct{}{},
"from": struct{}{},
"mount": {},
"from": {},
// Layer PUT
"digest": struct{}{},
"digest": {},
// Listing tags and catalog
"n": struct{}{},
"last": struct{}{},
"n": {},
"last": {},
}
// Error implements error to support the following error specification:
@ -59,7 +59,7 @@ var _ error = (*Error)(nil)
func (e *Error) Error() string {
prefix := ""
if e.request != nil {
prefix = fmt.Sprintf("%s %s: ", e.request.Method, redact(e.request.URL))
prefix = fmt.Sprintf("%s %s: ", e.request.Method, redactURL(e.request.URL))
}
return prefix + e.responseErr()
}
@ -68,9 +68,12 @@ func (e *Error) responseErr() string {
switch len(e.Errors) {
case 0:
if len(e.rawBody) == 0 {
return fmt.Sprintf("unsupported status code %d", e.StatusCode)
if e.request != nil && e.request.Method == http.MethodHead {
return fmt.Sprintf("unexpected status code %d %s (HEAD responses have no body, use GET for details)", e.StatusCode, http.StatusText(e.StatusCode))
}
return fmt.Sprintf("unexpected status code %d %s", e.StatusCode, http.StatusText(e.StatusCode))
}
return fmt.Sprintf("unsupported status code %d; body: %s", e.StatusCode, e.rawBody)
return fmt.Sprintf("unexpected status code %d %s: %s", e.StatusCode, http.StatusText(e.StatusCode), e.rawBody)
case 1:
return e.Errors[0].String()
default:
@ -96,7 +99,8 @@ func (e *Error) Temporary() bool {
return true
}
func redact(original *url.URL) *url.URL {
// TODO(jonjohnsonjr): Consider moving to pkg/internal/redact.
func redactURL(original *url.URL) *url.URL {
qs := original.Query()
for k, v := range qs {
for i := range v {
@ -153,8 +157,8 @@ const (
// TODO: Include other error types.
var temporaryErrorCodes = map[ErrorCode]struct{}{
BlobUploadInvalidErrorCode: struct{}{},
TooManyRequestsErrorCode: struct{}{},
BlobUploadInvalidErrorCode: {},
TooManyRequestsErrorCode: {},
}
// CheckError returns a structured error if the response status is not in codes.

View File

@ -1,10 +1,26 @@
// Copyright 2020 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"fmt"
"net/http"
"net/http/httputil"
"time"
"github.com/google/go-containerregistry/pkg/internal/redact"
"github.com/google/go-containerregistry/pkg/logs"
)
@ -20,24 +36,55 @@ func NewLogger(inner http.RoundTripper) http.RoundTripper {
func (t *logTransport) RoundTrip(in *http.Request) (out *http.Response, err error) {
// Inspired by: github.com/motemen/go-loghttp
logs.Debug.Printf("--> %s %s", in.Method, in.URL)
b, err := httputil.DumpRequestOut(in, true)
// We redact token responses and binary blobs in response/request.
omitBody, reason := redact.FromContext(in.Context())
if omitBody {
logs.Debug.Printf("--> %s %s [body redacted: %s]", in.Method, in.URL, reason)
} else {
logs.Debug.Printf("--> %s %s", in.Method, in.URL)
}
// Save these headers so we can redact Authorization.
savedHeaders := in.Header.Clone()
if in.Header != nil && in.Header.Get("authorization") != "" {
in.Header.Set("authorization", "<redacted>")
}
b, err := httputil.DumpRequestOut(in, !omitBody)
if err == nil {
logs.Debug.Println(string(b))
} else {
logs.Debug.Printf("Failed to dump request %s %s: %v", in.Method, in.URL, err)
}
// Restore the non-redacted headers.
in.Header = savedHeaders
start := time.Now()
out, err = t.inner.RoundTrip(in)
duration := time.Since(start)
if err != nil {
logs.Debug.Printf("<-- %v %s", err, in.URL)
logs.Debug.Printf("<-- %v %s %s (%s)", err, in.Method, in.URL, duration)
}
if out != nil {
msg := fmt.Sprintf("<-- %d", out.StatusCode)
if out.Request != nil {
msg = fmt.Sprintf("%s %s", msg, out.Request.URL)
}
msg = fmt.Sprintf("%s (%s)", msg, duration)
if omitBody {
msg = fmt.Sprintf("%s [body redacted: %s]", msg, reason)
}
logs.Debug.Print(msg)
b, err := httputil.DumpResponse(out, true)
b, err := httputil.DumpResponse(out, !omitBody)
if err == nil {
logs.Debug.Println(string(b))
} else {
logs.Debug.Printf("Failed to dump response %s %s: %v", in.Method, in.URL, err)
}
}
return

View File

@ -15,12 +15,15 @@
package transport
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
authchallenge "github.com/docker/distribution/registry/client/auth/challenge"
"github.com/google/go-containerregistry/pkg/name"
)
@ -65,7 +68,7 @@ func parseChallenge(suffix string) map[string]string {
return kv
}
func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) {
func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingResp, error) {
client := http.Client{Transport: t}
// This first attempts to use "https" for every request, falling back to http
@ -76,12 +79,16 @@ func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) {
schemes = append(schemes, "http")
}
var connErr error
var errs []string
for _, scheme := range schemes {
url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name())
resp, err := client.Get(url)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
connErr = err
return nil, err
}
resp, err := client.Do(req.WithContext(ctx))
if err != nil {
errs = append(errs, err.Error())
// Potentially retry with http.
continue
}
@ -100,23 +107,23 @@ func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) {
scheme: scheme,
}, nil
case http.StatusUnauthorized:
wac := resp.Header.Get("WWW-Authenticate")
if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 {
// If there are two parts, then parse the challenge parameters.
if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 {
// If we hit more than one, I'm not even sure what to do.
wac := challenges[0]
return &pingResp{
challenge: challenge(parts[0]).Canonical(),
parameters: parseChallenge(parts[1]),
challenge: challenge(wac.Scheme).Canonical(),
parameters: wac.Parameters,
scheme: scheme,
}, nil
}
// Otherwise, just return the challenge without parameters.
return &pingResp{
challenge: challenge(wac).Canonical(),
challenge: challenge(resp.Header.Get("WWW-Authenticate")).Canonical(),
scheme: scheme,
}, nil
default:
return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized)
}
}
return nil, connErr
return nil, errors.New(strings.Join(errs, "; "))
}

View File

@ -15,6 +15,7 @@
package transport
import (
"context"
"fmt"
"net/http"
@ -25,7 +26,16 @@ import (
// New returns a new RoundTripper based on the provided RoundTripper that has been
// setup to authenticate with the remote registry "reg", in the capacity
// laid out by the specified scopes.
//
// TODO(jonjohnsonjr): Deprecate this.
func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) {
return NewWithContext(context.Background(), reg, auth, t, scopes)
}
// NewWithContext returns a new RoundTripper based on the provided RoundTripper that has been
// setup to authenticate with the remote registry "reg", in the capacity
// laid out by the specified scopes.
func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) {
// The handshake:
// 1. Use "t" to ping() the registry for the authentication challenge.
//
@ -40,19 +50,21 @@ func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scope
// First we ping the registry to determine the parameters of the authentication handshake
// (if one is even necessary).
pr, err := ping(reg, t)
pr, err := ping(ctx, reg, t)
if err != nil {
return nil, err
}
// Wrap the given transport in transports that use an appropriate scheme,
// (based on the ping response) and set the user agent.
t = &useragentTransport{
inner: &schemeTransport{
scheme: pr.scheme,
registry: reg,
inner: t,
},
// Wrap t with a useragent transport unless we already have one.
if _, ok := t.(*userAgentTransport); !ok {
t = NewUserAgent(t, "")
}
// Wrap t in a transport that selects the appropriate scheme based on the ping response.
t = &schemeTransport{
scheme: pr.scheme,
registry: reg,
inner: t,
}
switch pr.challenge.Canonical() {
@ -81,7 +93,7 @@ func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scope
scopes: scopes,
scheme: pr.scheme,
}
if err := bt.refresh(); err != nil {
if err := bt.refresh(ctx); err != nil {
return nil, err
}
return bt, nil

Some files were not shown because too many files have changed in this diff Show More