Updated PR Branch with master
This commit is contained in:
parent
8b459b57aa
commit
fa2a2c803b
|
|
@ -445,7 +445,7 @@
|
||||||
version = "v0.2.0"
|
version = "v0.2.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:f1b23f53418c1b035a5965ac2600a28b16c08643683d5213fb581ecf4e79a02a"
|
digest = "1:d40a26f0daf07f3b5c916356a3e10fabbf97d5166f77e57aa3983013ab57004c"
|
||||||
name = "github.com/google/go-containerregistry"
|
name = "github.com/google/go-containerregistry"
|
||||||
packages = [
|
packages = [
|
||||||
"pkg/authn",
|
"pkg/authn",
|
||||||
|
|
@ -459,12 +459,13 @@
|
||||||
"pkg/v1/random",
|
"pkg/v1/random",
|
||||||
"pkg/v1/remote",
|
"pkg/v1/remote",
|
||||||
"pkg/v1/remote/transport",
|
"pkg/v1/remote/transport",
|
||||||
|
"pkg/v1/stream",
|
||||||
"pkg/v1/tarball",
|
"pkg/v1/tarball",
|
||||||
"pkg/v1/types",
|
"pkg/v1/types",
|
||||||
"pkg/v1/v1util",
|
"pkg/v1/v1util",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "88d8d18eb1bde1fcef23c745205c738074290515"
|
revision = "8621d738a07bc74b2adeafd175a3c738423577a0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:f4f203acd8b11b8747bdcd91696a01dbc95ccb9e2ca2db6abf81c3a4f5e950ce"
|
digest = "1:f4f203acd8b11b8747bdcd91696a01dbc95ccb9e2ca2db6abf81c3a4f5e950ce"
|
||||||
|
|
@ -726,6 +727,14 @@
|
||||||
revision = "c37440a7cf42ac63b919c752ca73a85067e05992"
|
revision = "c37440a7cf42ac63b919c752ca73a85067e05992"
|
||||||
version = "v0.2.0"
|
version = "v0.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:15057fc7395024283a7d2639b8afc61c5b6df3fe260ce06ff5834c8464f16b5c"
|
||||||
|
name = "github.com/otiai10/copy"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "NUT"
|
||||||
|
revision = "7e9a647135a142c2669943d4a4d29be015ce9392"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
|
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
|
||||||
|
|
@ -1265,7 +1274,7 @@
|
||||||
version = "kubernetes-1.11.0"
|
version = "kubernetes-1.11.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:b960fc62d636ccdc3265dd1e190b7f5e7bf5f8d29bf4f02af7f1352768c58f3f"
|
digest = "1:2f523dd16b56091fab1f329f772c3540742920e270bf0f9b8451106b7f005a66"
|
||||||
name = "k8s.io/client-go"
|
name = "k8s.io/client-go"
|
||||||
packages = [
|
packages = [
|
||||||
"discovery",
|
"discovery",
|
||||||
|
|
@ -1317,8 +1326,8 @@
|
||||||
"util/integer",
|
"util/integer",
|
||||||
]
|
]
|
||||||
pruneopts = "NUT"
|
pruneopts = "NUT"
|
||||||
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
|
revision = "2cefa64ff137e128daeddbd1775cd775708a05bf"
|
||||||
version = "kubernetes-1.11.0"
|
version = "kubernetes-1.11.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:e345c95cf277bb7f650306556904df69e0904395c56959a56002d0140747eda0"
|
digest = "1:e345c95cf277bb7f650306556904df69e0904395c56959a56002d0140747eda0"
|
||||||
|
|
@ -1366,6 +1375,7 @@
|
||||||
"github.com/moby/buildkit/frontend/dockerfile/instructions",
|
"github.com/moby/buildkit/frontend/dockerfile/instructions",
|
||||||
"github.com/moby/buildkit/frontend/dockerfile/parser",
|
"github.com/moby/buildkit/frontend/dockerfile/parser",
|
||||||
"github.com/moby/buildkit/frontend/dockerfile/shell",
|
"github.com/moby/buildkit/frontend/dockerfile/shell",
|
||||||
|
"github.com/otiai10/copy",
|
||||||
"github.com/pkg/errors",
|
"github.com/pkg/errors",
|
||||||
"github.com/sirupsen/logrus",
|
"github.com/sirupsen/logrus",
|
||||||
"github.com/spf13/cobra",
|
"github.com/spf13/cobra",
|
||||||
|
|
|
||||||
|
|
@ -33,11 +33,11 @@ required = [
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "k8s.io/client-go"
|
name = "k8s.io/client-go"
|
||||||
version = "kubernetes-1.11.0"
|
version = "kubernetes-1.11.3"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/google/go-containerregistry"
|
name = "github.com/google/go-containerregistry"
|
||||||
revision = "88d8d18eb1bde1fcef23c745205c738074290515"
|
revision = "8621d738a07bc74b2adeafd175a3c738423577a0"
|
||||||
|
|
||||||
[[override]]
|
[[override]]
|
||||||
name = "k8s.io/apimachinery"
|
name = "k8s.io/apimachinery"
|
||||||
|
|
|
||||||
23
README.md
23
README.md
|
|
@ -366,7 +366,7 @@ You can set it multiple times for multiple registries.
|
||||||
|
|
||||||
#### --skip-tls-verify-registry
|
#### --skip-tls-verify-registry
|
||||||
|
|
||||||
Set this flag to skip TLS cerificate validation when accessing a registry. It is supposed to be useed for testing purposes only and should not be used in production!
|
Set this flag to skip TLS cerificate validation when accessing a registry. It is supposed to be used for testing purposes only and should not be used in production!
|
||||||
You can set it multiple times for multiple registries.
|
You can set it multiple times for multiple registries.
|
||||||
|
|
||||||
#### --cleanup
|
#### --cleanup
|
||||||
|
|
@ -395,7 +395,11 @@ This flag takes a single snapshot of the filesystem at the end of the build, so
|
||||||
|
|
||||||
#### --skip-tls-verify
|
#### --skip-tls-verify
|
||||||
|
|
||||||
Set this flag to skip TLS certificate validation when connecting to a registry. It is supposed to be used for testing purposes only and should not be used in production!
|
Set this flag to skip TLS certificate validation when pushing to a registry. It is supposed to be used for testing purposes only and should not be used in production!
|
||||||
|
|
||||||
|
#### --skip-tls-verify-pull
|
||||||
|
|
||||||
|
Set this flag to skip TLS certificate validation when pulling from a registry. It is supposed to be used for testing purposes only and should not be used in production!
|
||||||
|
|
||||||
#### --snapshotMode
|
#### --snapshotMode
|
||||||
|
|
||||||
|
|
@ -471,12 +475,15 @@ filesystem is sufficiently complicated). However it has no `Dockerfile`-like
|
||||||
build tooling (it's a slightly lower-level tool that can be used to build such
|
build tooling (it's a slightly lower-level tool that can be used to build such
|
||||||
builders -- such as `orca-build`).
|
builders -- such as `orca-build`).
|
||||||
|
|
||||||
`Buildah` can run as a non root user and does not require privileges. Buildah
|
`Buildah` specializes in building OCI images. Buildah's commands replicate all
|
||||||
specializes in building OCI images. Buildah's commands replicate all of the
|
of the commands that are found in a Dockerfile. This allows building images
|
||||||
commands that are found in a Dockerfile. Its goal is also to provide a lower
|
with and without Dockerfiles while not requiring any root privileges.
|
||||||
level coreutils interface to build images, allowing people to build containers
|
Buildah’s ultimate goal is to provide a lower-level coreutils interface to
|
||||||
without requiring a Dockerfile. The intent with Buildah is to allow other
|
build images. The flexibility of building images without Dockerfiles allows
|
||||||
scripting languages to build container images, without requiring a daemon.
|
for the integration of other scripting languages into the build process.
|
||||||
|
Buildah follows a simple fork-exec model and does not run as a daemon
|
||||||
|
but it is based on a comprehensive API in golang, which can be vendored
|
||||||
|
into other tools.
|
||||||
|
|
||||||
`FTL` and `Bazel` aim to achieve the fastest possible creation of Docker images
|
`FTL` and `Bazel` aim to achieve the fastest possible creation of Docker images
|
||||||
for a subset of images. These can be thought of as a special-case "fast path"
|
for a subset of images. These can be thought of as a special-case "fast path"
|
||||||
|
|
|
||||||
|
|
@ -24,12 +24,11 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/timing"
|
|
||||||
|
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/buildcontext"
|
"github.com/GoogleContainerTools/kaniko/pkg/buildcontext"
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
"github.com/GoogleContainerTools/kaniko/pkg/constants"
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/executor"
|
"github.com/GoogleContainerTools/kaniko/pkg/executor"
|
||||||
|
"github.com/GoogleContainerTools/kaniko/pkg/timing"
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||||
"github.com/genuinetools/amicontained/container"
|
"github.com/genuinetools/amicontained/container"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
@ -79,6 +78,9 @@ var RootCmd = &cobra.Command{
|
||||||
}
|
}
|
||||||
logrus.Warn("kaniko is being run outside of a container. This can have dangerous effects on your system")
|
logrus.Warn("kaniko is being run outside of a container. This can have dangerous effects on your system")
|
||||||
}
|
}
|
||||||
|
if err := executor.CheckPushPermissions(opts); err != nil {
|
||||||
|
exit(errors.Wrap(err, "error checking push permissions -- make sure you entered the correct tag name, and that you are authenticated correctly, and try again"))
|
||||||
|
}
|
||||||
if err := os.Chdir("/"); err != nil {
|
if err := os.Chdir("/"); err != nil {
|
||||||
exit(errors.Wrap(err, "error changing to root dir"))
|
exit(errors.Wrap(err, "error changing to root dir"))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -24,4 +24,7 @@ COPY $file /arg
|
||||||
|
|
||||||
# Finally, test adding a remote URL, concurrently with a normal file
|
# Finally, test adding a remote URL, concurrently with a normal file
|
||||||
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3/docker-credential-gcr_linux_386-1.4.3.tar.gz context/foo /test/all/
|
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3/docker-credential-gcr_linux_386-1.4.3.tar.gz context/foo /test/all/
|
||||||
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3-static/docker-credential-gcr_linux_amd64-1.4.3.tar.gz /destination
|
|
||||||
|
# Test environment replacement in the URL
|
||||||
|
ENV VERSION=v1.4.3
|
||||||
|
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/${VERSION}-static/docker-credential-gcr_linux_amd64-1.4.3.tar.gz /destination
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
ARG FILE_NAME=myFile
|
||||||
|
|
||||||
|
FROM busybox:latest AS builder
|
||||||
|
ARG FILE_NAME
|
||||||
|
|
||||||
|
RUN echo $FILE_NAME && touch /$FILE_NAME.txt && stat /$FILE_NAME.txt;
|
||||||
|
|
||||||
|
FROM busybox:latest
|
||||||
|
ARG FILE_NAME
|
||||||
|
|
||||||
|
RUN echo $FILE_NAME && touch /$FILE_NAME.txt && stat /$FILE_NAME.txt;
|
||||||
|
COPY --from=builder /$FILE_NAME.txt /
|
||||||
|
|
@ -0,0 +1,8 @@
|
||||||
|
# Test to make sure the cache works with special file permissions properly.
|
||||||
|
# If the image is built twice, directory foo should have the sticky bit,
|
||||||
|
# and file bar should have the setuid and setgid bits.
|
||||||
|
|
||||||
|
FROM busybox
|
||||||
|
|
||||||
|
RUN mkdir foo && chmod +t foo
|
||||||
|
RUN touch bar && chmod u+s,g+s bar
|
||||||
|
|
@ -0,0 +1,8 @@
|
||||||
|
FROM busybox
|
||||||
|
|
||||||
|
RUN adduser --disabled-password --gecos "" --uid 1000 user
|
||||||
|
RUN mkdir -p /home/user/foo
|
||||||
|
RUN chown -R user /home/user
|
||||||
|
RUN chmod 700 /home/user/foo
|
||||||
|
ADD https://raw.githubusercontent.com/GoogleContainerTools/kaniko/master/README.md /home/user/foo/README.md
|
||||||
|
RUN chown -R user /home/user
|
||||||
|
|
@ -0,0 +1,17 @@
|
||||||
|
# Copyright 2018 Google, Inc. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
FROM gcr.io/google-appengine/debian9@sha256:1d6a9a6d106bd795098f60f4abb7083626354fa6735e81743c7f8cfca11259f0
|
||||||
|
USER testuser:testgroup
|
||||||
|
|
||||||
|
|
@ -134,6 +134,7 @@ func NewDockerFileBuilder(dockerfiles []string) *DockerFileBuilder {
|
||||||
d.TestCacheDockerfiles = map[string]struct{}{
|
d.TestCacheDockerfiles = map[string]struct{}{
|
||||||
"Dockerfile_test_cache": {},
|
"Dockerfile_test_cache": {},
|
||||||
"Dockerfile_test_cache_install": {},
|
"Dockerfile_test_cache_install": {},
|
||||||
|
"Dockerfile_test_cache_perm": {},
|
||||||
}
|
}
|
||||||
return &d
|
return &d
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||||
|
|
@ -53,6 +54,15 @@ func WarmCache(opts *config.WarmerOptions) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, fmt.Sprintf("Failed to write %s to cache", image))
|
return errors.Wrap(err, fmt.Sprintf("Failed to write %s to cache", image))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mfst, err := img.RawManifest()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, fmt.Sprintf("Failed to retrieve manifest for %s", image))
|
||||||
|
}
|
||||||
|
mfstPath := cachePath + ".json"
|
||||||
|
if err := ioutil.WriteFile(mfstPath, mfst, 0666); err != nil {
|
||||||
|
return errors.Wrap(err, fmt.Sprintf("Failed to save manifest for %s", image))
|
||||||
|
}
|
||||||
logrus.Debugf("Wrote %s to cache", image)
|
logrus.Debugf("Wrote %s to cache", image)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,7 @@ type AddCommand struct {
|
||||||
func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
|
func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
|
||||||
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
||||||
|
|
||||||
srcs, dest, err := resolveEnvAndWildcards(a.cmd.SourcesAndDest, a.buildcontext, replacementEnvs)
|
srcs, dest, err := util.ResolveEnvAndWildcards(a.cmd.SourcesAndDest, a.buildcontext, replacementEnvs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -61,7 +61,10 @@ func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bui
|
||||||
for _, src := range srcs {
|
for _, src := range srcs {
|
||||||
fullPath := filepath.Join(a.buildcontext, src)
|
fullPath := filepath.Join(a.buildcontext, src)
|
||||||
if util.IsSrcRemoteFileURL(src) {
|
if util.IsSrcRemoteFileURL(src) {
|
||||||
urlDest := util.URLDestinationFilepath(src, dest, config.WorkingDir)
|
urlDest, err := util.URLDestinationFilepath(src, dest, config.WorkingDir, replacementEnvs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
logrus.Infof("Adding remote URL %s to %s", src, urlDest)
|
logrus.Infof("Adding remote URL %s to %s", src, urlDest)
|
||||||
if err := util.DownloadFileToDest(src, urlDest); err != nil {
|
if err := util.DownloadFileToDest(src, urlDest); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
@ -111,7 +114,7 @@ func (a *AddCommand) String() string {
|
||||||
func (a *AddCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerfile.BuildArgs) ([]string, error) {
|
func (a *AddCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerfile.BuildArgs) ([]string, error) {
|
||||||
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
||||||
|
|
||||||
srcs, _, err := resolveEnvAndWildcards(a.cmd.SourcesAndDest, a.buildcontext, replacementEnvs)
|
srcs, _, err := util.ResolveEnvAndWildcards(a.cmd.SourcesAndDest, a.buildcontext, replacementEnvs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -30,27 +30,35 @@ type ArgCommand struct {
|
||||||
|
|
||||||
// ExecuteCommand only needs to add this ARG key/value as seen
|
// ExecuteCommand only needs to add this ARG key/value as seen
|
||||||
func (r *ArgCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
|
func (r *ArgCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
|
||||||
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
key, val, err := ParseArg(r.cmd.Key, r.cmd.Value, config.Env, buildArgs)
|
||||||
resolvedKey, err := util.ResolveEnvironmentReplacement(r.cmd.Key, replacementEnvs, false)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
buildArgs.AddArg(key, val)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseArg(key string, val *string, env []string, ba *dockerfile.BuildArgs) (string, *string, error) {
|
||||||
|
replacementEnvs := ba.ReplacementEnvs(env)
|
||||||
|
resolvedKey, err := util.ResolveEnvironmentReplacement(key, replacementEnvs, false)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
var resolvedValue *string
|
var resolvedValue *string
|
||||||
if r.cmd.Value != nil {
|
if val != nil {
|
||||||
value, err := util.ResolveEnvironmentReplacement(*r.cmd.Value, replacementEnvs, false)
|
value, err := util.ResolveEnvironmentReplacement(*val, replacementEnvs, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
resolvedValue = &value
|
resolvedValue = &value
|
||||||
} else {
|
} else {
|
||||||
meta := buildArgs.GetAllMeta()
|
meta := ba.GetAllMeta()
|
||||||
if value, ok := meta[resolvedKey]; ok {
|
if value, ok := meta[resolvedKey]; ok {
|
||||||
resolvedValue = &value
|
resolvedValue = &value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return resolvedKey, resolvedValue, nil
|
||||||
buildArgs.AddArg(resolvedKey, resolvedValue)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns some information about the command for the image config history
|
// String returns some information about the command for the image config history
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,7 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu
|
||||||
|
|
||||||
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
||||||
|
|
||||||
srcs, dest, err := resolveEnvAndWildcards(c.cmd.SourcesAndDest, c.buildcontext, replacementEnvs)
|
srcs, dest, err := util.ResolveEnvAndWildcards(c.cmd.SourcesAndDest, c.buildcontext, replacementEnvs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -100,18 +100,6 @@ func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveEnvAndWildcards(sd instructions.SourcesAndDest, buildcontext string, envs []string) ([]string, string, error) {
|
|
||||||
// First, resolve any environment replacement
|
|
||||||
resolvedEnvs, err := util.ResolveEnvironmentReplacementList(sd, envs, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
dest := resolvedEnvs[len(resolvedEnvs)-1]
|
|
||||||
// Resolve wildcards and get a list of resolved sources
|
|
||||||
srcs, err := util.ResolveSources(resolvedEnvs, buildcontext)
|
|
||||||
return srcs, dest, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilesToSnapshot should return an empty array if still nil; no files were changed
|
// FilesToSnapshot should return an empty array if still nil; no files were changed
|
||||||
func (c *CopyCommand) FilesToSnapshot() []string {
|
func (c *CopyCommand) FilesToSnapshot() []string {
|
||||||
return c.snapshotFiles
|
return c.snapshotFiles
|
||||||
|
|
@ -129,7 +117,7 @@ func (c *CopyCommand) FilesUsedFromContext(config *v1.Config, buildArgs *dockerf
|
||||||
}
|
}
|
||||||
|
|
||||||
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
|
||||||
srcs, _, err := resolveEnvAndWildcards(c.cmd.SourcesAndDest, c.buildcontext, replacementEnvs)
|
srcs, _, err := util.ResolveEnvAndWildcards(c.cmd.SourcesAndDest, c.buildcontext, replacementEnvs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -31,10 +31,6 @@ type UserCommand struct {
|
||||||
cmd *instructions.UserCommand
|
cmd *instructions.UserCommand
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *UserCommand) RequiresUnpackedFS() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
|
func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
|
||||||
logrus.Info("cmd: USER")
|
logrus.Info("cmd: USER")
|
||||||
u := r.cmd.User
|
u := r.cmd.User
|
||||||
|
|
@ -52,11 +48,6 @@ func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.Bu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = util.GetUserFromUsername(userStr, groupStr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if groupStr != "" {
|
if groupStr != "" {
|
||||||
userStr = userStr + ":" + groupStr
|
userStr = userStr + ":" + groupStr
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -28,57 +28,42 @@ import (
|
||||||
var userTests = []struct {
|
var userTests = []struct {
|
||||||
user string
|
user string
|
||||||
expectedUID string
|
expectedUID string
|
||||||
shouldError bool
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
user: "root",
|
user: "root",
|
||||||
expectedUID: "root",
|
expectedUID: "root",
|
||||||
shouldError: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
user: "0",
|
user: "0",
|
||||||
expectedUID: "0",
|
expectedUID: "0",
|
||||||
shouldError: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
user: "fakeUser",
|
user: "fakeUser",
|
||||||
expectedUID: "",
|
expectedUID: "fakeUser",
|
||||||
shouldError: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
user: "root:root",
|
user: "root:root",
|
||||||
expectedUID: "root:root",
|
expectedUID: "root:root",
|
||||||
shouldError: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
user: "0:root",
|
user: "0:root",
|
||||||
expectedUID: "0:root",
|
expectedUID: "0:root",
|
||||||
shouldError: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
user: "root:0",
|
user: "root:0",
|
||||||
expectedUID: "root:0",
|
expectedUID: "root:0",
|
||||||
shouldError: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
user: "0:0",
|
user: "0:0",
|
||||||
expectedUID: "0:0",
|
expectedUID: "0:0",
|
||||||
shouldError: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
user: "root:fakeGroup",
|
|
||||||
expectedUID: "",
|
|
||||||
shouldError: true,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
user: "$envuser",
|
user: "$envuser",
|
||||||
expectedUID: "root",
|
expectedUID: "root",
|
||||||
shouldError: false,
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
user: "root:$envgroup",
|
user: "root:$envgroup",
|
||||||
expectedUID: "root:root",
|
expectedUID: "root:root",
|
||||||
shouldError: false,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -97,6 +82,6 @@ func TestUpdateUser(t *testing.T) {
|
||||||
}
|
}
|
||||||
buildArgs := dockerfile.NewBuildArgs([]string{})
|
buildArgs := dockerfile.NewBuildArgs([]string{})
|
||||||
err := cmd.ExecuteCommand(cfg, buildArgs)
|
err := cmd.ExecuteCommand(cfg, buildArgs)
|
||||||
testutil.CheckErrorAndDeepEqual(t, test.shouldError, err, test.expectedUID, cfg.User)
|
testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedUID, cfg.User)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -26,4 +26,5 @@ type KanikoStage struct {
|
||||||
BaseImageStoredLocally bool
|
BaseImageStoredLocally bool
|
||||||
SaveStage bool
|
SaveStage bool
|
||||||
MetaArgs []instructions.ArgCommand
|
MetaArgs []instructions.ArgCommand
|
||||||
|
Index int
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -25,6 +25,8 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||||
|
|
@ -67,6 +69,7 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) {
|
||||||
return nil, errors.Wrap(err, "resolving base name")
|
return nil, errors.Wrap(err, "resolving base name")
|
||||||
}
|
}
|
||||||
stage.Name = resolvedBaseName
|
stage.Name = resolvedBaseName
|
||||||
|
logrus.Infof("Resolved base name %s to %s", stage.BaseName, stage.Name)
|
||||||
kanikoStages = append(kanikoStages, config.KanikoStage{
|
kanikoStages = append(kanikoStages, config.KanikoStage{
|
||||||
Stage: stage,
|
Stage: stage,
|
||||||
BaseImageIndex: baseImageIndex(index, stages),
|
BaseImageIndex: baseImageIndex(index, stages),
|
||||||
|
|
@ -74,6 +77,7 @@ func Stages(opts *config.KanikoOptions) ([]config.KanikoStage, error) {
|
||||||
SaveStage: saveStage(index, stages),
|
SaveStage: saveStage(index, stages),
|
||||||
Final: index == targetStage,
|
Final: index == targetStage,
|
||||||
MetaArgs: metaArgs,
|
MetaArgs: metaArgs,
|
||||||
|
Index: index,
|
||||||
})
|
})
|
||||||
if index == targetStage {
|
if index == targetStage {
|
||||||
break
|
break
|
||||||
|
|
@ -175,14 +179,6 @@ func saveStage(index int, stages []instructions.Stage) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, cmd := range stage.Commands {
|
|
||||||
switch c := cmd.(type) {
|
|
||||||
case *instructions.CopyCommand:
|
|
||||||
if c.From == strconv.Itoa(index) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -114,7 +114,7 @@ func Test_SaveStage(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "reference stage in later copy command",
|
name: "reference stage in later copy command",
|
||||||
index: 0,
|
index: 0,
|
||||||
expected: true,
|
expected: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "reference stage in later from command",
|
name: "reference stage in later from command",
|
||||||
|
|
|
||||||
|
|
@ -23,6 +23,8 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/otiai10/copy"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||||
|
|
||||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||||
|
|
@ -60,10 +62,11 @@ type stageBuilder struct {
|
||||||
opts *config.KanikoOptions
|
opts *config.KanikoOptions
|
||||||
cmds []commands.DockerCommand
|
cmds []commands.DockerCommand
|
||||||
args *dockerfile.BuildArgs
|
args *dockerfile.BuildArgs
|
||||||
|
crossStageDeps map[int][]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
|
// newStageBuilder returns a new type stageBuilder which contains all the information required to build the stage
|
||||||
func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*stageBuilder, error) {
|
func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage, crossStageDeps map[int][]string) (*stageBuilder, error) {
|
||||||
sourceImage, err := util.RetrieveSourceImage(stage, opts)
|
sourceImage, err := util.RetrieveSourceImage(stage, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -96,6 +99,7 @@ func newStageBuilder(opts *config.KanikoOptions, stage config.KanikoStage) (*sta
|
||||||
snapshotter: snapshotter,
|
snapshotter: snapshotter,
|
||||||
baseImageDigest: digest.String(),
|
baseImageDigest: digest.String(),
|
||||||
opts: opts,
|
opts: opts,
|
||||||
|
crossStageDeps: crossStageDeps,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, cmd := range s.stage.Commands {
|
for _, cmd := range s.stage.Commands {
|
||||||
|
|
@ -207,6 +211,10 @@ func (s *stageBuilder) build() error {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(s.crossStageDeps[s.stage.Index]) > 0 {
|
||||||
|
shouldUnpack = true
|
||||||
|
}
|
||||||
|
|
||||||
if shouldUnpack {
|
if shouldUnpack {
|
||||||
t := timing.Start("FS Unpacking")
|
t := timing.Start("FS Unpacking")
|
||||||
if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
|
if _, err := util.GetFSFromImage(constants.RootDir, s.image); err != nil {
|
||||||
|
|
@ -353,6 +361,68 @@ func (s *stageBuilder) saveSnapshotToImage(createdBy string, tarPath string) err
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CalculateDependencies(opts *config.KanikoOptions) (map[int][]string, error) {
|
||||||
|
stages, err := dockerfile.Stages(opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
images := []v1.Image{}
|
||||||
|
depGraph := map[int][]string{}
|
||||||
|
for _, s := range stages {
|
||||||
|
ba := dockerfile.NewBuildArgs(opts.BuildArgs)
|
||||||
|
ba.AddMetaArgs(s.MetaArgs)
|
||||||
|
var image v1.Image
|
||||||
|
var err error
|
||||||
|
if s.BaseImageStoredLocally {
|
||||||
|
image = images[s.BaseImageIndex]
|
||||||
|
} else if s.Name == constants.NoBaseImage {
|
||||||
|
image = empty.Image
|
||||||
|
} else {
|
||||||
|
image, err = util.RetrieveSourceImage(s, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cfg, err := initializeConfig(image)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, c := range s.Commands {
|
||||||
|
switch cmd := c.(type) {
|
||||||
|
case *instructions.CopyCommand:
|
||||||
|
if cmd.From != "" {
|
||||||
|
i, err := strconv.Atoi(cmd.From)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
resolved, err := util.ResolveEnvironmentReplacementList(cmd.SourcesAndDest, ba.ReplacementEnvs(cfg.Config.Env), true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
depGraph[i] = append(depGraph[i], resolved[0:len(resolved)-1]...)
|
||||||
|
}
|
||||||
|
case *instructions.EnvCommand:
|
||||||
|
if err := util.UpdateConfigEnv(cmd.Env, &cfg.Config, ba.ReplacementEnvs(cfg.Config.Env)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
image, err = mutate.Config(image, cfg.Config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case *instructions.ArgCommand:
|
||||||
|
k, v, err := commands.ParseArg(cmd.Key, cmd.Value, cfg.Config.Env, ba)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ba.AddArg(k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
images = append(images, image)
|
||||||
|
}
|
||||||
|
return depGraph, nil
|
||||||
|
}
|
||||||
|
|
||||||
// DoBuild executes building the Dockerfile
|
// DoBuild executes building the Dockerfile
|
||||||
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
||||||
t := timing.Start("Total Build Time")
|
t := timing.Start("Total Build Time")
|
||||||
|
|
@ -369,8 +439,14 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
crossStageDependencies, err := CalculateDependencies(opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
logrus.Infof("Built cross stage deps: %v", crossStageDependencies)
|
||||||
|
|
||||||
for index, stage := range stages {
|
for index, stage := range stages {
|
||||||
sb, err := newStageBuilder(opts, stage)
|
sb, err := newStageBuilder(opts, stage, crossStageDependencies)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -405,10 +481,21 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
||||||
if err := saveStageAsTarball(strconv.Itoa(index), sourceImage); err != nil {
|
if err := saveStageAsTarball(strconv.Itoa(index), sourceImage); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := extractImageToDependecyDir(strconv.Itoa(index), sourceImage); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
filesToSave, err := filesToSave(crossStageDependencies[index])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dstDir := filepath.Join(constants.KanikoDir, strconv.Itoa(index))
|
||||||
|
if err := os.MkdirAll(dstDir, 0644); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, p := range filesToSave {
|
||||||
|
logrus.Infof("Saving file %s for later use.", p)
|
||||||
|
copy.Copy(p, filepath.Join(dstDir, p))
|
||||||
|
}
|
||||||
|
|
||||||
// Delete the filesystem
|
// Delete the filesystem
|
||||||
if err := util.DeleteFilesystem(); err != nil {
|
if err := util.DeleteFilesystem(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -418,6 +505,18 @@ func DoBuild(opts *config.KanikoOptions) (v1.Image, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func filesToSave(deps []string) ([]string, error) {
|
||||||
|
allFiles := []string{}
|
||||||
|
for _, src := range deps {
|
||||||
|
srcs, err := filepath.Glob(src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
allFiles = append(allFiles, srcs...)
|
||||||
|
}
|
||||||
|
return allFiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
func fetchExtraStages(stages []config.KanikoStage, opts *config.KanikoOptions) error {
|
func fetchExtraStages(stages []config.KanikoStage, opts *config.KanikoOptions) error {
|
||||||
t := timing.Start("Fetching Extra Stages")
|
t := timing.Start("Fetching Extra Stages")
|
||||||
defer timing.DefaultRun.Stop(t)
|
defer timing.DefaultRun.Stop(t)
|
||||||
|
|
|
||||||
|
|
@ -17,14 +17,19 @@ limitations under the License.
|
||||||
package executor
|
package executor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
|
||||||
|
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
"github.com/GoogleContainerTools/kaniko/pkg/config"
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
|
||||||
"github.com/GoogleContainerTools/kaniko/testutil"
|
"github.com/GoogleContainerTools/kaniko/testutil"
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
|
"github.com/moby/buildkit/frontend/dockerfile/instructions"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_reviewConfig(t *testing.T) {
|
func Test_reviewConfig(t *testing.T) {
|
||||||
|
|
@ -180,3 +185,223 @@ func Test_stageBuilder_shouldTakeSnapshot(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCalculateDependencies(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
dockerfile string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want map[int][]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no deps",
|
||||||
|
args: args{
|
||||||
|
dockerfile: `
|
||||||
|
FROM debian as stage1
|
||||||
|
RUN foo
|
||||||
|
FROM stage1
|
||||||
|
RUN bar
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
want: map[int][]string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "args",
|
||||||
|
args: args{
|
||||||
|
dockerfile: `
|
||||||
|
ARG myFile=foo
|
||||||
|
FROM debian as stage1
|
||||||
|
RUN foo
|
||||||
|
FROM stage1
|
||||||
|
ARG myFile
|
||||||
|
COPY --from=stage1 /tmp/$myFile.txt .
|
||||||
|
RUN bar
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
want: map[int][]string{
|
||||||
|
0: {"/tmp/foo.txt"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "simple deps",
|
||||||
|
args: args{
|
||||||
|
dockerfile: `
|
||||||
|
FROM debian as stage1
|
||||||
|
FROM alpine
|
||||||
|
COPY --from=stage1 /foo /bar
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
want: map[int][]string{
|
||||||
|
0: {"/foo"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "two sets deps",
|
||||||
|
args: args{
|
||||||
|
dockerfile: `
|
||||||
|
FROM debian as stage1
|
||||||
|
FROM ubuntu as stage2
|
||||||
|
RUN foo
|
||||||
|
COPY --from=stage1 /foo /bar
|
||||||
|
FROM alpine
|
||||||
|
COPY --from=stage2 /bar /bat
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
want: map[int][]string{
|
||||||
|
0: {"/foo"},
|
||||||
|
1: {"/bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "double deps",
|
||||||
|
args: args{
|
||||||
|
dockerfile: `
|
||||||
|
FROM debian as stage1
|
||||||
|
FROM ubuntu as stage2
|
||||||
|
RUN foo
|
||||||
|
COPY --from=stage1 /foo /bar
|
||||||
|
FROM alpine
|
||||||
|
COPY --from=stage1 /baz /bat
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
want: map[int][]string{
|
||||||
|
0: {"/foo", "/baz"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "envs in deps",
|
||||||
|
args: args{
|
||||||
|
dockerfile: `
|
||||||
|
FROM debian as stage1
|
||||||
|
FROM ubuntu as stage2
|
||||||
|
RUN foo
|
||||||
|
ENV key1 val1
|
||||||
|
ENV key2 val2
|
||||||
|
COPY --from=stage1 /foo/$key1 /foo/$key2 /bar
|
||||||
|
FROM alpine
|
||||||
|
COPY --from=stage2 /bar /bat
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
want: map[int][]string{
|
||||||
|
0: {"/foo/val1", "/foo/val2"},
|
||||||
|
1: {"/bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "envs from base image in deps",
|
||||||
|
args: args{
|
||||||
|
dockerfile: `
|
||||||
|
FROM debian as stage1
|
||||||
|
ENV key1 baseval1
|
||||||
|
FROM stage1 as stage2
|
||||||
|
RUN foo
|
||||||
|
ENV key2 val2
|
||||||
|
COPY --from=stage1 /foo/$key1 /foo/$key2 /bar
|
||||||
|
FROM alpine
|
||||||
|
COPY --from=stage2 /bar /bat
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
want: map[int][]string{
|
||||||
|
0: {"/foo/baseval1", "/foo/val2"},
|
||||||
|
1: {"/bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
f, _ := ioutil.TempFile("", "")
|
||||||
|
ioutil.WriteFile(f.Name(), []byte(tt.args.dockerfile), 0755)
|
||||||
|
opts := &config.KanikoOptions{
|
||||||
|
DockerfilePath: f.Name(),
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := CalculateDependencies(opts)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("got error: %s,", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(got, tt.want) {
|
||||||
|
diff := cmp.Diff(got, tt.want)
|
||||||
|
t.Errorf("CalculateDependencies() = %v, want %v, diff %v", got, tt.want, diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_filesToSave(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args []string
|
||||||
|
want []string
|
||||||
|
files []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "simple",
|
||||||
|
args: []string{"foo"},
|
||||||
|
files: []string{"foo"},
|
||||||
|
want: []string{"foo"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "glob",
|
||||||
|
args: []string{"foo*"},
|
||||||
|
files: []string{"foo", "foo2", "fooooo", "bar"},
|
||||||
|
want: []string{"foo", "foo2", "fooooo"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "complex glob",
|
||||||
|
args: []string{"foo*", "bar?"},
|
||||||
|
files: []string{"foo", "foo2", "fooooo", "bar", "bar1", "bar2", "bar33"},
|
||||||
|
want: []string{"foo", "foo2", "fooooo", "bar1", "bar2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "dir",
|
||||||
|
args: []string{"foo"},
|
||||||
|
files: []string{"foo/bar", "foo/baz", "foo/bat/baz"},
|
||||||
|
want: []string{"foo"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
tmpDir, err := ioutil.TempDir("", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error creating tmpdir: %s", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
for _, f := range tt.files {
|
||||||
|
p := filepath.Join(tmpDir, f)
|
||||||
|
dir := filepath.Dir(p)
|
||||||
|
if dir != "." {
|
||||||
|
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||||
|
t.Errorf("error making dir: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fp, err := os.Create(p)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("error making file: %s", err)
|
||||||
|
}
|
||||||
|
fp.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
args := []string{}
|
||||||
|
for _, arg := range tt.args {
|
||||||
|
args = append(args, filepath.Join(tmpDir, arg))
|
||||||
|
}
|
||||||
|
got, err := filesToSave(args)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("got err: %s", err)
|
||||||
|
}
|
||||||
|
want := []string{}
|
||||||
|
for _, w := range tt.want {
|
||||||
|
want = append(want, filepath.Join(tmpDir, w))
|
||||||
|
}
|
||||||
|
sort.Strings(want)
|
||||||
|
sort.Strings(got)
|
||||||
|
if !reflect.DeepEqual(got, want) {
|
||||||
|
t.Errorf("filesToSave() = %v, want %v", got, want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -47,6 +47,30 @@ func (w *withUserAgent) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||||
return w.t.RoundTrip(r)
|
return w.t.RoundTrip(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckPushPermissionos checks that the configured credentials can be used to
|
||||||
|
// push to every specified destination.
|
||||||
|
func CheckPushPermissions(opts *config.KanikoOptions) error {
|
||||||
|
if opts.NoPush {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
checked := map[string]bool{}
|
||||||
|
for _, destination := range opts.Destinations {
|
||||||
|
destRef, err := name.NewTag(destination, name.WeakValidation)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "getting tag for destination")
|
||||||
|
}
|
||||||
|
if checked[destRef.Context().RepositoryStr()] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := remote.CheckPushPermission(destRef, creds.GetKeychain(), http.DefaultTransport); err != nil {
|
||||||
|
return errors.Wrapf(err, "checking push permission for %q", destRef)
|
||||||
|
}
|
||||||
|
checked[destRef.Context().RepositoryStr()] = true
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// DoPush is responsible for pushing image to the destinations specified in opts
|
// DoPush is responsible for pushing image to the destinations specified in opts
|
||||||
func DoPush(image v1.Image, opts *config.KanikoOptions) error {
|
func DoPush(image v1.Image, opts *config.KanikoOptions) error {
|
||||||
t := timing.Start("Total Push Time")
|
t := timing.Start("Total Push Time")
|
||||||
|
|
|
||||||
|
|
@ -109,11 +109,10 @@ func (l *LayeredMap) Add(s string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MaybeAdd will add the specified file s to the layered map if
|
// CheckFileChange checkes whether a given file changed
|
||||||
// the layered map's hashing function determines it has changed. If
|
// from the current layered map by its hashing function.
|
||||||
// it has not changed, it will not be added. Returns true if the file
|
// Returns true if the file is changed.
|
||||||
// was added.
|
func (l *LayeredMap) CheckFileChange(s string) (bool, error) {
|
||||||
func (l *LayeredMap) MaybeAdd(s string) (bool, error) {
|
|
||||||
oldV, ok := l.Get(s)
|
oldV, ok := l.Get(s)
|
||||||
t := timing.Start("Hashing files")
|
t := timing.Start("Hashing files")
|
||||||
defer timing.DefaultRun.Stop(t)
|
defer timing.DefaultRun.Stop(t)
|
||||||
|
|
@ -124,6 +123,5 @@ func (l *LayeredMap) MaybeAdd(s string) (bool, error) {
|
||||||
if ok && newV == oldV {
|
if ok && newV == oldV {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
l.layers[len(l.layers)-1][s] = newV
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -73,44 +73,15 @@ func (s *Snapshotter) TakeSnapshot(files []string) (string, error) {
|
||||||
}
|
}
|
||||||
logrus.Info("Taking snapshot of files...")
|
logrus.Info("Taking snapshot of files...")
|
||||||
logrus.Debugf("Taking snapshot of files %v", files)
|
logrus.Debugf("Taking snapshot of files %v", files)
|
||||||
snapshottedFiles := make(map[string]bool)
|
|
||||||
|
|
||||||
// First add to the tar any parent directories that haven't been added
|
// Also add parent directories to keep the permission of them correctly.
|
||||||
parentDirs := map[string]struct{}{}
|
filesToAdd := filesWithParentDirs(files)
|
||||||
for _, file := range files {
|
|
||||||
for _, p := range util.ParentDirectories(file) {
|
|
||||||
parentDirs[p] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
filesToAdd := []string{}
|
|
||||||
for file := range parentDirs {
|
|
||||||
file = filepath.Clean(file)
|
|
||||||
snapshottedFiles[file] = true
|
|
||||||
|
|
||||||
// The parent directory might already be in a previous layer.
|
|
||||||
fileAdded, err := s.l.MaybeAdd(file)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Unable to add parent dir %s to layered map: %s", file, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if fileAdded {
|
|
||||||
filesToAdd = append(filesToAdd, file)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next add the files themselves to the tar
|
|
||||||
for _, file := range files {
|
|
||||||
// We might have already added the file above as a parent directory of another file.
|
|
||||||
file = filepath.Clean(file)
|
|
||||||
if _, ok := snapshottedFiles[file]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
snapshottedFiles[file] = true
|
|
||||||
|
|
||||||
|
// Add files to the layered map
|
||||||
|
for _, file := range filesToAdd {
|
||||||
if err := s.l.Add(file); err != nil {
|
if err := s.l.Add(file); err != nil {
|
||||||
return "", fmt.Errorf("Unable to add file %s to layered map: %s", file, err)
|
return "", fmt.Errorf("Unable to add file %s to layered map: %s", file, err)
|
||||||
}
|
}
|
||||||
filesToAdd = append(filesToAdd, file)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
t := util.NewTar(f)
|
t := util.NewTar(f)
|
||||||
|
|
@ -201,16 +172,27 @@ func (s *Snapshotter) scanFullFilesystem() ([]string, []string, error) {
|
||||||
logrus.Debugf("Not adding %s to layer, as it's whitelisted", path)
|
logrus.Debugf("Not adding %s to layer, as it's whitelisted", path)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Only add to the tar if we add it to the layeredmap.
|
// Only add changed files.
|
||||||
maybeAdd, err := s.l.MaybeAdd(path)
|
fileChanged, err := s.l.CheckFileChange(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if maybeAdd {
|
if fileChanged {
|
||||||
logrus.Debugf("Adding %s to layer, because it was changed.", path)
|
logrus.Infof("Adding %s to layer, because it was changed.", path)
|
||||||
filesToAdd = append(filesToAdd, path)
|
filesToAdd = append(filesToAdd, path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Also add parent directories to keep the permission of them correctly.
|
||||||
|
filesToAdd = filesWithParentDirs(filesToAdd)
|
||||||
|
|
||||||
|
// Add files to the layered map
|
||||||
|
for _, file := range filesToAdd {
|
||||||
|
if err := s.l.Add(file); err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("Unable to add file %s to layered map: %s", file, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return filesToAdd, filesToWhiteOut, nil
|
return filesToAdd, filesToWhiteOut, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -230,3 +212,24 @@ func writeToTar(t util.Tar, files, whiteouts []string) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func filesWithParentDirs(files []string) []string {
|
||||||
|
filesSet := map[string]bool{}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
file = filepath.Clean(file)
|
||||||
|
filesSet[file] = true
|
||||||
|
|
||||||
|
for _, dir := range util.ParentDirectories(file) {
|
||||||
|
dir = filepath.Clean(dir)
|
||||||
|
filesSet[dir] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newFiles := []string{}
|
||||||
|
for file := range filesSet {
|
||||||
|
newFiles = append(newFiles, file)
|
||||||
|
}
|
||||||
|
|
||||||
|
return newFiles
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
"github.com/GoogleContainerTools/kaniko/pkg/util"
|
||||||
|
|
@ -60,6 +61,12 @@ func TestSnapshotFSFileChange(t *testing.T) {
|
||||||
fooPath: "newbaz1",
|
fooPath: "newbaz1",
|
||||||
batPath: "baz",
|
batPath: "baz",
|
||||||
}
|
}
|
||||||
|
for _, dir := range util.ParentDirectories(fooPath) {
|
||||||
|
snapshotFiles[dir] = ""
|
||||||
|
}
|
||||||
|
for _, dir := range util.ParentDirectories(batPath) {
|
||||||
|
snapshotFiles[dir] = ""
|
||||||
|
}
|
||||||
numFiles := 0
|
numFiles := 0
|
||||||
for {
|
for {
|
||||||
hdr, err := tr.Next()
|
hdr, err := tr.Next()
|
||||||
|
|
@ -75,7 +82,7 @@ func TestSnapshotFSFileChange(t *testing.T) {
|
||||||
t.Fatalf("Contents of %s incorrect, expected: %s, actual: %s", hdr.Name, snapshotFiles[hdr.Name], string(contents))
|
t.Fatalf("Contents of %s incorrect, expected: %s, actual: %s", hdr.Name, snapshotFiles[hdr.Name], string(contents))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if numFiles != 2 {
|
if numFiles != len(snapshotFiles) {
|
||||||
t.Fatalf("Incorrect number of files were added, expected: 2, actual: %v", numFiles)
|
t.Fatalf("Incorrect number of files were added, expected: 2, actual: %v", numFiles)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -105,6 +112,9 @@ func TestSnapshotFSChangePermissions(t *testing.T) {
|
||||||
snapshotFiles := map[string]string{
|
snapshotFiles := map[string]string{
|
||||||
batPath: "baz2",
|
batPath: "baz2",
|
||||||
}
|
}
|
||||||
|
for _, dir := range util.ParentDirectories(batPath) {
|
||||||
|
snapshotFiles[dir] = ""
|
||||||
|
}
|
||||||
numFiles := 0
|
numFiles := 0
|
||||||
for {
|
for {
|
||||||
hdr, err := tr.Next()
|
hdr, err := tr.Next()
|
||||||
|
|
@ -120,7 +130,7 @@ func TestSnapshotFSChangePermissions(t *testing.T) {
|
||||||
t.Fatalf("Contents of %s incorrect, expected: %s, actual: %s", hdr.Name, snapshotFiles[hdr.Name], string(contents))
|
t.Fatalf("Contents of %s incorrect, expected: %s, actual: %s", hdr.Name, snapshotFiles[hdr.Name], string(contents))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if numFiles != 1 {
|
if numFiles != len(snapshotFiles) {
|
||||||
t.Fatalf("Incorrect number of files were added, expected: 1, got: %v", numFiles)
|
t.Fatalf("Incorrect number of files were added, expected: 1, got: %v", numFiles)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -147,7 +157,10 @@ func TestSnapshotFiles(t *testing.T) {
|
||||||
}
|
}
|
||||||
defer os.Remove(tarPath)
|
defer os.Remove(tarPath)
|
||||||
|
|
||||||
expectedFiles := []string{"/", "/tmp", filepath.Join(testDir, "foo")}
|
expectedFiles := []string{
|
||||||
|
filepath.Join(testDir, "foo"),
|
||||||
|
}
|
||||||
|
expectedFiles = append(expectedFiles, util.ParentDirectories(filepath.Join(testDir, "foo"))...)
|
||||||
|
|
||||||
f, err := os.Open(tarPath)
|
f, err := os.Open(tarPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -166,6 +179,8 @@ func TestSnapshotFiles(t *testing.T) {
|
||||||
}
|
}
|
||||||
actualFiles = append(actualFiles, hdr.Name)
|
actualFiles = append(actualFiles, hdr.Name)
|
||||||
}
|
}
|
||||||
|
sort.Strings(expectedFiles)
|
||||||
|
sort.Strings(actualFiles)
|
||||||
testutil.CheckErrorAndDeepEqual(t, false, nil, expectedFiles, actualFiles)
|
testutil.CheckErrorAndDeepEqual(t, false, nil, expectedFiles, actualFiles)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,11 +37,13 @@ import (
|
||||||
func ResolveEnvironmentReplacementList(values, envs []string, isFilepath bool) ([]string, error) {
|
func ResolveEnvironmentReplacementList(values, envs []string, isFilepath bool) ([]string, error) {
|
||||||
var resolvedValues []string
|
var resolvedValues []string
|
||||||
for _, value := range values {
|
for _, value := range values {
|
||||||
|
var resolved string
|
||||||
|
var err error
|
||||||
if IsSrcRemoteFileURL(value) {
|
if IsSrcRemoteFileURL(value) {
|
||||||
resolvedValues = append(resolvedValues, value)
|
resolved, err = ResolveEnvironmentReplacement(value, envs, false)
|
||||||
continue
|
} else {
|
||||||
|
resolved, err = ResolveEnvironmentReplacement(value, envs, isFilepath)
|
||||||
}
|
}
|
||||||
resolved, err := ResolveEnvironmentReplacement(value, envs, isFilepath)
|
|
||||||
logrus.Debugf("Resolved %s to %s", value, resolved)
|
logrus.Debugf("Resolved %s to %s", value, resolved)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -76,6 +78,22 @@ func ResolveEnvironmentReplacement(value string, envs []string, isFilepath bool)
|
||||||
return fp, nil
|
return fp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ResolveEnvAndWildcards(sd instructions.SourcesAndDest, buildcontext string, envs []string) ([]string, string, error) {
|
||||||
|
// First, resolve any environment replacement
|
||||||
|
resolvedEnvs, err := ResolveEnvironmentReplacementList(sd, envs, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
dest := resolvedEnvs[len(resolvedEnvs)-1]
|
||||||
|
// Resolve wildcards and get a list of resolved sources
|
||||||
|
srcs, err := ResolveSources(resolvedEnvs[0:len(resolvedEnvs)-1], buildcontext)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
err = IsSrcsValid(sd, srcs, buildcontext)
|
||||||
|
return srcs, dest, err
|
||||||
|
}
|
||||||
|
|
||||||
// ContainsWildcards returns true if any entry in paths contains wildcards
|
// ContainsWildcards returns true if any entry in paths contains wildcards
|
||||||
func ContainsWildcards(paths []string) bool {
|
func ContainsWildcards(paths []string) bool {
|
||||||
for _, path := range paths {
|
for _, path := range paths {
|
||||||
|
|
@ -88,23 +106,22 @@ func ContainsWildcards(paths []string) bool {
|
||||||
|
|
||||||
// ResolveSources resolves the given sources if the sources contains wildcards
|
// ResolveSources resolves the given sources if the sources contains wildcards
|
||||||
// It returns a list of resolved sources
|
// It returns a list of resolved sources
|
||||||
func ResolveSources(srcsAndDest instructions.SourcesAndDest, root string) ([]string, error) {
|
func ResolveSources(srcs []string, root string) ([]string, error) {
|
||||||
srcs := srcsAndDest[:len(srcsAndDest)-1]
|
|
||||||
// If sources contain wildcards, we first need to resolve them to actual paths
|
// If sources contain wildcards, we first need to resolve them to actual paths
|
||||||
if ContainsWildcards(srcs) {
|
if !ContainsWildcards(srcs) {
|
||||||
logrus.Debugf("Resolving srcs %v...", srcs)
|
return srcs, nil
|
||||||
files, err := RelativeFiles("", root)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
srcs, err = matchSources(srcs, files)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
logrus.Debugf("Resolved sources to %v", srcs)
|
|
||||||
}
|
}
|
||||||
// Check to make sure the sources are valid
|
logrus.Infof("Resolving srcs %v...", srcs)
|
||||||
return srcs, IsSrcsValid(srcsAndDest, srcs, root)
|
files, err := RelativeFiles("", root)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resolved, err := matchSources(srcs, files)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
logrus.Debugf("Resolved sources to %v", resolved)
|
||||||
|
return resolved, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// matchSources returns a list of sources that match wildcards
|
// matchSources returns a list of sources that match wildcards
|
||||||
|
|
@ -165,20 +182,24 @@ func DestinationFilepath(src, dest, cwd string) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// URLDestinationFilepath gives the destination a file from a remote URL should be saved to
|
// URLDestinationFilepath gives the destination a file from a remote URL should be saved to
|
||||||
func URLDestinationFilepath(rawurl, dest, cwd string) string {
|
func URLDestinationFilepath(rawurl, dest, cwd string, envs []string) (string, error) {
|
||||||
if !IsDestDir(dest) {
|
if !IsDestDir(dest) {
|
||||||
if !filepath.IsAbs(dest) {
|
if !filepath.IsAbs(dest) {
|
||||||
return filepath.Join(cwd, dest)
|
return filepath.Join(cwd, dest), nil
|
||||||
}
|
}
|
||||||
return dest
|
return dest, nil
|
||||||
}
|
}
|
||||||
urlBase := filepath.Base(rawurl)
|
urlBase := filepath.Base(rawurl)
|
||||||
|
urlBase, err := ResolveEnvironmentReplacement(urlBase, envs, true)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
destPath := filepath.Join(dest, urlBase)
|
destPath := filepath.Join(dest, urlBase)
|
||||||
|
|
||||||
if !filepath.IsAbs(dest) {
|
if !filepath.IsAbs(dest) {
|
||||||
destPath = filepath.Join(cwd, destPath)
|
destPath = filepath.Join(cwd, destPath)
|
||||||
}
|
}
|
||||||
return destPath
|
return destPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, resolvedSources []string, root string) error {
|
func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, resolvedSources []string, root string) error {
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
|
@ -27,14 +28,12 @@ var testURL = "https://github.com/GoogleContainerTools/runtimes-common/blob/mast
|
||||||
|
|
||||||
var testEnvReplacement = []struct {
|
var testEnvReplacement = []struct {
|
||||||
path string
|
path string
|
||||||
command string
|
|
||||||
envs []string
|
envs []string
|
||||||
isFilepath bool
|
isFilepath bool
|
||||||
expectedPath string
|
expectedPath string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
path: "/simple/path",
|
path: "/simple/path",
|
||||||
command: "WORKDIR /simple/path",
|
|
||||||
envs: []string{
|
envs: []string{
|
||||||
"simple=/path/",
|
"simple=/path/",
|
||||||
},
|
},
|
||||||
|
|
@ -42,8 +41,7 @@ var testEnvReplacement = []struct {
|
||||||
expectedPath: "/simple/path",
|
expectedPath: "/simple/path",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
path: "/simple/path/",
|
path: "/simple/path/",
|
||||||
command: "WORKDIR /simple/path/",
|
|
||||||
envs: []string{
|
envs: []string{
|
||||||
"simple=/path/",
|
"simple=/path/",
|
||||||
},
|
},
|
||||||
|
|
@ -51,8 +49,7 @@ var testEnvReplacement = []struct {
|
||||||
expectedPath: "/simple/path/",
|
expectedPath: "/simple/path/",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
path: "${a}/b",
|
path: "${a}/b",
|
||||||
command: "WORKDIR ${a}/b",
|
|
||||||
envs: []string{
|
envs: []string{
|
||||||
"a=/path/",
|
"a=/path/",
|
||||||
"b=/path2/",
|
"b=/path2/",
|
||||||
|
|
@ -61,8 +58,7 @@ var testEnvReplacement = []struct {
|
||||||
expectedPath: "/path/b",
|
expectedPath: "/path/b",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
path: "/$a/b",
|
path: "/$a/b",
|
||||||
command: "COPY ${a}/b /c/",
|
|
||||||
envs: []string{
|
envs: []string{
|
||||||
"a=/path/",
|
"a=/path/",
|
||||||
"b=/path2/",
|
"b=/path2/",
|
||||||
|
|
@ -71,8 +67,7 @@ var testEnvReplacement = []struct {
|
||||||
expectedPath: "/path/b",
|
expectedPath: "/path/b",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
path: "/$a/b/",
|
path: "/$a/b/",
|
||||||
command: "COPY /${a}/b /c/",
|
|
||||||
envs: []string{
|
envs: []string{
|
||||||
"a=/path/",
|
"a=/path/",
|
||||||
"b=/path2/",
|
"b=/path2/",
|
||||||
|
|
@ -81,8 +76,7 @@ var testEnvReplacement = []struct {
|
||||||
expectedPath: "/path/b/",
|
expectedPath: "/path/b/",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
path: "\\$foo",
|
path: "\\$foo",
|
||||||
command: "COPY \\$foo /quux",
|
|
||||||
envs: []string{
|
envs: []string{
|
||||||
"foo=/path/",
|
"foo=/path/",
|
||||||
},
|
},
|
||||||
|
|
@ -90,8 +84,14 @@ var testEnvReplacement = []struct {
|
||||||
expectedPath: "$foo",
|
expectedPath: "$foo",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
path: "8080/$protocol",
|
path: "8080/$protocol",
|
||||||
command: "EXPOSE 8080/$protocol",
|
envs: []string{
|
||||||
|
"protocol=udp",
|
||||||
|
},
|
||||||
|
expectedPath: "8080/udp",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: "8080/$protocol",
|
||||||
envs: []string{
|
envs: []string{
|
||||||
"protocol=udp",
|
"protocol=udp",
|
||||||
},
|
},
|
||||||
|
|
@ -183,6 +183,7 @@ var urlDestFilepathTests = []struct {
|
||||||
cwd string
|
cwd string
|
||||||
dest string
|
dest string
|
||||||
expectedDest string
|
expectedDest string
|
||||||
|
envs []string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
url: "https://something/something",
|
url: "https://something/something",
|
||||||
|
|
@ -202,12 +203,19 @@ var urlDestFilepathTests = []struct {
|
||||||
dest: "/dest/",
|
dest: "/dest/",
|
||||||
expectedDest: "/dest/something",
|
expectedDest: "/dest/something",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
url: "https://something/$foo.tar.gz",
|
||||||
|
cwd: "/test",
|
||||||
|
dest: "/foo/",
|
||||||
|
expectedDest: "/foo/bar.tar.gz",
|
||||||
|
envs: []string{"foo=bar"},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_UrlDestFilepath(t *testing.T) {
|
func Test_UrlDestFilepath(t *testing.T) {
|
||||||
for _, test := range urlDestFilepathTests {
|
for _, test := range urlDestFilepathTests {
|
||||||
actualDest := URLDestinationFilepath(test.url, test.dest, test.cwd)
|
actualDest, err := URLDestinationFilepath(test.url, test.dest, test.cwd, test.envs)
|
||||||
testutil.CheckErrorAndDeepEqual(t, false, nil, test.expectedDest, actualDest)
|
testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedDest, actualDest)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -400,7 +408,6 @@ var testResolveSources = []struct {
|
||||||
"context/foo",
|
"context/foo",
|
||||||
"context/b*",
|
"context/b*",
|
||||||
testURL,
|
testURL,
|
||||||
"dest/",
|
|
||||||
},
|
},
|
||||||
expectedList: []string{
|
expectedList: []string{
|
||||||
"context/foo",
|
"context/foo",
|
||||||
|
|
@ -448,3 +455,57 @@ func Test_RemoteUrls(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestResolveEnvironmentReplacementList(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
values []string
|
||||||
|
envs []string
|
||||||
|
isFilepath bool
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want []string
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "url",
|
||||||
|
args: args{
|
||||||
|
values: []string{
|
||||||
|
"https://google.com/$foo", "$bar",
|
||||||
|
},
|
||||||
|
envs: []string{
|
||||||
|
"foo=baz",
|
||||||
|
"bar=bat",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: []string{"https://google.com/baz", "bat"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mixed",
|
||||||
|
args: args{
|
||||||
|
values: []string{
|
||||||
|
"$foo", "$bar$baz", "baz",
|
||||||
|
},
|
||||||
|
envs: []string{
|
||||||
|
"foo=FOO",
|
||||||
|
"bar=BAR",
|
||||||
|
"baz=BAZ",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: []string{"FOO", "BARBAZ", "baz"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := ResolveEnvironmentReplacementList(tt.args.values, tt.args.envs, tt.args.isFilepath)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("ResolveEnvironmentReplacementList() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(got, tt.want) {
|
||||||
|
t.Errorf("ResolveEnvironmentReplacementList() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -216,27 +216,16 @@ func extractFile(dest string, hdr *tar.Header, tr io.Reader) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// manually set permissions on file, since the default umask (022) will interfere
|
|
||||||
if err = os.Chmod(path, mode); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err = io.Copy(currFile, tr); err != nil {
|
if _, err = io.Copy(currFile, tr); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err = currFile.Chown(uid, gid); err != nil {
|
if err = setFilePermissions(path, mode, uid, gid); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
currFile.Close()
|
currFile.Close()
|
||||||
case tar.TypeDir:
|
case tar.TypeDir:
|
||||||
logrus.Debugf("creating dir %s", path)
|
logrus.Debugf("creating dir %s", path)
|
||||||
if err := os.MkdirAll(path, mode); err != nil {
|
if err := mkdirAllWithPermissions(path, mode, uid, gid); err != nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
// In some cases, MkdirAll doesn't change the permissions, so run Chmod
|
|
||||||
if err := os.Chmod(path, mode); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := os.Chown(path, uid, gid); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -429,10 +418,7 @@ func CreateFile(path string, reader io.Reader, perm os.FileMode, uid uint32, gid
|
||||||
if _, err := io.Copy(dest, reader); err != nil {
|
if _, err := io.Copy(dest, reader); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := dest.Chmod(perm); err != nil {
|
return setFilePermissions(path, perm, int(uid), int(gid))
|
||||||
return err
|
|
||||||
}
|
|
||||||
return dest.Chown(int(uid), int(gid))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddVolumePath adds the given path to the volume whitelist.
|
// AddVolumePath adds the given path to the volume whitelist.
|
||||||
|
|
@ -492,13 +478,11 @@ func CopyDir(src, dest, buildcontext string) ([]string, error) {
|
||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
logrus.Debugf("Creating directory %s", destPath)
|
logrus.Debugf("Creating directory %s", destPath)
|
||||||
|
|
||||||
|
mode := fi.Mode()
|
||||||
uid := int(fi.Sys().(*syscall.Stat_t).Uid)
|
uid := int(fi.Sys().(*syscall.Stat_t).Uid)
|
||||||
gid := int(fi.Sys().(*syscall.Stat_t).Gid)
|
gid := int(fi.Sys().(*syscall.Stat_t).Gid)
|
||||||
|
|
||||||
if err := os.MkdirAll(destPath, fi.Mode()); err != nil {
|
if err := mkdirAllWithPermissions(destPath, mode, uid, gid); err != nil {
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := os.Chown(destPath, uid, gid); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else if fi.Mode()&os.ModeSymlink != 0 {
|
} else if fi.Mode()&os.ModeSymlink != 0 {
|
||||||
|
|
@ -614,3 +598,24 @@ func HasFilepathPrefix(path, prefix string, prefixMatchOnly bool) bool {
|
||||||
func Volumes() []string {
|
func Volumes() []string {
|
||||||
return volumes
|
return volumes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func mkdirAllWithPermissions(path string, mode os.FileMode, uid, gid int) error {
|
||||||
|
if err := os.MkdirAll(path, mode); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.Chown(path, uid, gid); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// In some cases, MkdirAll doesn't change the permissions, so run Chmod
|
||||||
|
// Must chmod after chown because chown resets the file mode.
|
||||||
|
return os.Chmod(path, mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func setFilePermissions(path string, mode os.FileMode, uid, gid int) error {
|
||||||
|
if err := os.Chown(path, uid, gid); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// manually set permissions on file, since the default umask (022) will interfere
|
||||||
|
// Must chmod after chown because chown resets the file mode.
|
||||||
|
return os.Chmod(path, mode)
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -503,6 +503,30 @@ func TestExtractFile(t *testing.T) {
|
||||||
filesAreHardlinks("/bin/uncompress", "/bin/gzip"),
|
filesAreHardlinks("/bin/uncompress", "/bin/gzip"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "file with setuid bit",
|
||||||
|
contents: []byte("helloworld"),
|
||||||
|
hdrs: []*tar.Header{fileHeader("./bar", "helloworld", 04644)},
|
||||||
|
checkers: []checker{
|
||||||
|
fileExists("/bar"),
|
||||||
|
fileMatches("/bar", []byte("helloworld")),
|
||||||
|
permissionsMatch("/bar", 0644|os.ModeSetuid),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "dir with sticky bit",
|
||||||
|
contents: []byte("helloworld"),
|
||||||
|
hdrs: []*tar.Header{
|
||||||
|
dirHeader("./foo", 01755),
|
||||||
|
fileHeader("./foo/bar", "helloworld", 0644),
|
||||||
|
},
|
||||||
|
checkers: []checker{
|
||||||
|
fileExists("/foo/bar"),
|
||||||
|
fileMatches("/foo/bar", []byte("helloworld")),
|
||||||
|
permissionsMatch("/foo/bar", 0644),
|
||||||
|
permissionsMatch("/foo", 0755|os.ModeDir|os.ModeSticky),
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range tcs {
|
for _, tc := range tcs {
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ func (h *helper) Authorization() (string, error) {
|
||||||
|
|
||||||
var out bytes.Buffer
|
var out bytes.Buffer
|
||||||
cmd.Stdout = &out
|
cmd.Stdout = &out
|
||||||
err := h.r.Run(cmd)
|
cmdErr := h.r.Run(cmd)
|
||||||
|
|
||||||
// If we see this specific message, it means the domain wasn't found
|
// If we see this specific message, it means the domain wasn't found
|
||||||
// and we should fall back on anonymous auth.
|
// and we should fall back on anonymous auth.
|
||||||
|
|
@ -81,16 +81,22 @@ func (h *helper) Authorization() (string, error) {
|
||||||
return Anonymous.Authorization()
|
return Anonymous.Authorization()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Any other output should be parsed as JSON and the Username / Secret
|
// Any other output should be parsed as JSON and the Username / Secret
|
||||||
// fields used for Basic authentication.
|
// fields used for Basic authentication.
|
||||||
ho := helperOutput{}
|
ho := helperOutput{}
|
||||||
if err := json.Unmarshal([]byte(output), &ho); err != nil {
|
if err := json.Unmarshal([]byte(output), &ho); err != nil {
|
||||||
|
if cmdErr != nil {
|
||||||
|
// If we failed to parse output, it won't contain Secret, so returning it
|
||||||
|
// in an error should be fine.
|
||||||
|
return "", fmt.Errorf("invoking %s: %v; output: %s", helperName, cmdErr, output)
|
||||||
|
}
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cmdErr != nil {
|
||||||
|
return "", fmt.Errorf("invoking %s: %v", helperName, cmdErr)
|
||||||
|
}
|
||||||
|
|
||||||
b := Basic{Username: ho.Username, Password: ho.Secret}
|
b := Basic{Username: ho.Username, Password: ho.Secret}
|
||||||
return b.Authorization()
|
return b.Authorization()
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// package k8schain exposes an implementation of the authn.Keychain interface
|
// Package k8schain exposes an implementation of the authn.Keychain interface
|
||||||
// based on the semantics the Kubelet follows when pulling the images for a
|
// based on the semantics the Kubelet follows when pulling the images for a
|
||||||
// Pod in Kubernetes.
|
// Pod in Kubernetes.
|
||||||
package k8schain
|
package k8schain
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ package k8schain
|
||||||
import (
|
import (
|
||||||
"github.com/google/go-containerregistry/pkg/authn"
|
"github.com/google/go-containerregistry/pkg/authn"
|
||||||
"github.com/google/go-containerregistry/pkg/name"
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
|
|
|
||||||
|
|
@ -73,14 +73,14 @@ func NewDigest(name string, strict Strictness) (Digest, error) {
|
||||||
base := parts[0]
|
base := parts[0]
|
||||||
digest := parts[1]
|
digest := parts[1]
|
||||||
|
|
||||||
// We don't require a digest, but if we get one check it's valid,
|
// Always check that the digest is valid.
|
||||||
// even when not being strict.
|
if err := checkDigest(digest); err != nil {
|
||||||
// If we are being strict, we want to validate the digest regardless in case
|
return Digest{}, err
|
||||||
// it's empty.
|
}
|
||||||
if digest != "" || strict == StrictValidation {
|
|
||||||
if err := checkDigest(digest); err != nil {
|
tag, err := NewTag(base, strict)
|
||||||
return Digest{}, err
|
if err == nil {
|
||||||
}
|
base = tag.Repository.Name()
|
||||||
}
|
}
|
||||||
|
|
||||||
repo, err := NewRepository(base, strict)
|
repo, err := NewRepository(base, strict)
|
||||||
|
|
|
||||||
|
|
@ -15,12 +15,14 @@
|
||||||
package name
|
package name
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
// DefaultRegistry is Docker Hub, assumed when a hostname is omitted.
|
||||||
DefaultRegistry = "index.docker.io"
|
DefaultRegistry = "index.docker.io"
|
||||||
defaultRegistryAlias = "docker.io"
|
defaultRegistryAlias = "docker.io"
|
||||||
)
|
)
|
||||||
|
|
@ -63,11 +65,29 @@ func (r Registry) Scope(string) string {
|
||||||
return "registry:catalog:*"
|
return "registry:catalog:*"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r Registry) isRFC1918() bool {
|
||||||
|
ipStr := strings.Split(r.Name(), ":")[0]
|
||||||
|
ip := net.ParseIP(ipStr)
|
||||||
|
if ip == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} {
|
||||||
|
_, block, _ := net.ParseCIDR(cidr)
|
||||||
|
if block.Contains(ip) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined.
|
// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined.
|
||||||
func (r Registry) Scheme() string {
|
func (r Registry) Scheme() string {
|
||||||
if r.insecure {
|
if r.insecure {
|
||||||
return "http"
|
return "http"
|
||||||
}
|
}
|
||||||
|
if r.isRFC1918() {
|
||||||
|
return "http"
|
||||||
|
}
|
||||||
if strings.HasPrefix(r.Name(), "localhost:") {
|
if strings.HasPrefix(r.Name(), "localhost:") {
|
||||||
return "http"
|
return "http"
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,27 +21,28 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConfigFile is the configuration file that holds the metadata describing
|
// ConfigFile is the configuration file that holds the metadata describing
|
||||||
// how to launch a container. The names of the fields are chosen to reflect
|
// how to launch a container. See:
|
||||||
// the JSON payload of the ConfigFile as defined here: https://git.io/vrAEY
|
// https://github.com/opencontainers/image-spec/blob/master/config.md
|
||||||
type ConfigFile struct {
|
type ConfigFile struct {
|
||||||
Architecture string `json:"architecture"`
|
Architecture string `json:"architecture"`
|
||||||
Container string `json:"container"`
|
Author string `json:"author,omitempty"`
|
||||||
Created Time `json:"created"`
|
Container string `json:"container,omitempty"`
|
||||||
DockerVersion string `json:"docker_version"`
|
Created Time `json:"created,omitempty"`
|
||||||
History []History `json:"history"`
|
DockerVersion string `json:"docker_version,omitempty"`
|
||||||
|
History []History `json:"history,omitempty"`
|
||||||
OS string `json:"os"`
|
OS string `json:"os"`
|
||||||
RootFS RootFS `json:"rootfs"`
|
RootFS RootFS `json:"rootfs"`
|
||||||
Config Config `json:"config"`
|
Config Config `json:"config"`
|
||||||
ContainerConfig Config `json:"container_config"`
|
ContainerConfig Config `json:"container_config,omitempty"`
|
||||||
OSVersion string `json:"osversion"`
|
OSVersion string `json:"osversion,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// History is one entry of a list recording how this container image was built.
|
// History is one entry of a list recording how this container image was built.
|
||||||
type History struct {
|
type History struct {
|
||||||
Author string `json:"author"`
|
Author string `json:"author,omitempty"`
|
||||||
Created Time `json:"created"`
|
Created Time `json:"created,omitempty"`
|
||||||
CreatedBy string `json:"created_by"`
|
CreatedBy string `json:"created_by,omitempty"`
|
||||||
Comment string `json:"comment"`
|
Comment string `json:"comment,omitempty"`
|
||||||
EmptyLayer bool `json:"empty_layer,omitempty"`
|
EmptyLayer bool `json:"empty_layer,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -20,11 +20,10 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
|
||||||
|
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/google/go-containerregistry/pkg/name"
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||||
)
|
)
|
||||||
|
|
||||||
// image accesses an image from a docker daemon
|
// image accesses an image from a docker daemon
|
||||||
|
|
@ -42,6 +41,7 @@ type imageOpener struct {
|
||||||
buffered bool
|
buffered bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImageOption is a functional option for Image.
|
||||||
type ImageOption func(*imageOpener) error
|
type ImageOption func(*imageOpener) error
|
||||||
|
|
||||||
func (i *imageOpener) Open() (v1.Image, error) {
|
func (i *imageOpener) Open() (v1.Image, error) {
|
||||||
|
|
@ -66,7 +66,7 @@ func (i *imageOpener) Open() (v1.Image, error) {
|
||||||
return img, nil
|
return img, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// API interface for testing.
|
// ImageSaver is an interface for testing.
|
||||||
type ImageSaver interface {
|
type ImageSaver interface {
|
||||||
ImageSave(context.Context, []string) (io.ReadCloser, error)
|
ImageSave(context.Context, []string) (io.ReadCloser, error)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -14,12 +14,14 @@
|
||||||
|
|
||||||
package daemon
|
package daemon
|
||||||
|
|
||||||
|
// WithBufferedOpener buffers the image.
|
||||||
func WithBufferedOpener() ImageOption {
|
func WithBufferedOpener() ImageOption {
|
||||||
return func(i *imageOpener) error {
|
return func(i *imageOpener) error {
|
||||||
return i.setBuffered(true)
|
return i.setBuffered(true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithUnbufferedOpener streams the image to avoid buffering.
|
||||||
func WithUnbufferedOpener() ImageOption {
|
func WithUnbufferedOpener() ImageOption {
|
||||||
return func(i *imageOpener) error {
|
return func(i *imageOpener) error {
|
||||||
return i.setBuffered(false)
|
return i.setBuffered(false)
|
||||||
|
|
|
||||||
|
|
@ -19,22 +19,21 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/name"
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// API interface for testing.
|
// ImageLoader is an interface for testing.
|
||||||
type ImageLoader interface {
|
type ImageLoader interface {
|
||||||
ImageLoad(context.Context, io.Reader, bool) (types.ImageLoadResponse, error)
|
ImageLoad(context.Context, io.Reader, bool) (types.ImageLoadResponse, error)
|
||||||
|
ImageTag(context.Context, string, string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is a variable so we can override in tests.
|
// GetImageLoader is a variable so we can override in tests.
|
||||||
var GetImageLoader = func() (ImageLoader, error) {
|
var GetImageLoader = func() (ImageLoader, error) {
|
||||||
cli, err := client.NewEnvClient()
|
cli, err := client.NewEnvClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -44,6 +43,16 @@ var GetImageLoader = func() (ImageLoader, error) {
|
||||||
return cli, nil
|
return cli, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tag adds a tag to an already existent image.
|
||||||
|
func Tag(src, dest name.Tag) error {
|
||||||
|
cli, err := GetImageLoader()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return cli.ImageTag(context.Background(), src.String(), dest.String())
|
||||||
|
}
|
||||||
|
|
||||||
// Write saves the image into the daemon as the given tag.
|
// Write saves the image into the daemon as the given tag.
|
||||||
func Write(tag name.Tag, img v1.Image) (string, error) {
|
func Write(tag name.Tag, img v1.Image) (string, error) {
|
||||||
cli, err := GetImageLoader()
|
cli, err := GetImageLoader()
|
||||||
|
|
|
||||||
|
|
@ -12,8 +12,8 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package v1 defines structured types for OCI v1 images
|
//go:generate deepcopy-gen -O zz_deepcopy_generated --go-header-file $BOILER_PLATE_FILE -i .
|
||||||
// +k8s:deepcopy-gen=package
|
// +k8s:deepcopy-gen=package
|
||||||
|
|
||||||
//go:generate deepcopy-gen -O zz_deepcopy_generated --go-header-file $BOILER_PLATE_FILE -i .
|
// Package v1 defines structured types for OCI v1 images
|
||||||
package v1
|
package v1
|
||||||
|
|
|
||||||
59
vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go
generated
vendored
Normal file
59
vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go
generated
vendored
Normal file
|
|
@ -0,0 +1,59 @@
|
||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package empty
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Index is a singleton empty index, think: FROM scratch.
|
||||||
|
var Index = emptyIndex{}
|
||||||
|
|
||||||
|
type emptyIndex struct{}
|
||||||
|
|
||||||
|
func (i emptyIndex) MediaType() (types.MediaType, error) {
|
||||||
|
return types.OCIImageIndex, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i emptyIndex) Digest() (v1.Hash, error) {
|
||||||
|
return partial.Digest(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i emptyIndex) IndexManifest() (*v1.IndexManifest, error) {
|
||||||
|
return &v1.IndexManifest{
|
||||||
|
SchemaVersion: 2,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i emptyIndex) RawManifest() ([]byte, error) {
|
||||||
|
im, err := i.IndexManifest()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return json.Marshal(im)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i emptyIndex) Image(v1.Hash) (v1.Image, error) {
|
||||||
|
return nil, errors.New("empty index")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i emptyIndex) ImageIndex(v1.Hash) (v1.ImageIndex, error) {
|
||||||
|
return nil, errors.New("empty index")
|
||||||
|
}
|
||||||
|
|
@ -49,7 +49,7 @@ func NewHash(s string) (Hash, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON implements json.Marshaler
|
// MarshalJSON implements json.Marshaler
|
||||||
func (h *Hash) MarshalJSON() ([]byte, error) {
|
func (h Hash) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(h.String())
|
return json.Marshal(h.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,9 +24,6 @@ type Image interface {
|
||||||
// The order of the list is oldest/base layer first, and most-recent/top layer last.
|
// The order of the list is oldest/base layer first, and most-recent/top layer last.
|
||||||
Layers() ([]Layer, error)
|
Layers() ([]Layer, error)
|
||||||
|
|
||||||
// BlobSet returns an unordered collection of all the blobs in the image.
|
|
||||||
BlobSet() (map[Hash]struct{}, error)
|
|
||||||
|
|
||||||
// MediaType of this image's manifest.
|
// MediaType of this image's manifest.
|
||||||
MediaType() (types.MediaType, error)
|
MediaType() (types.MediaType, error)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ImageIndex defines the interface for interacting with an OCI image index.
|
||||||
type ImageIndex interface {
|
type ImageIndex interface {
|
||||||
// MediaType of this image's manifest.
|
// MediaType of this image's manifest.
|
||||||
MediaType() (types.MediaType, error)
|
MediaType() (types.MediaType, error)
|
||||||
|
|
@ -28,6 +29,12 @@ type ImageIndex interface {
|
||||||
// IndexManifest returns this image index's manifest object.
|
// IndexManifest returns this image index's manifest object.
|
||||||
IndexManifest() (*IndexManifest, error)
|
IndexManifest() (*IndexManifest, error)
|
||||||
|
|
||||||
// RawIndexManifest returns the serialized bytes of IndexManifest().
|
// RawManifest returns the serialized bytes of IndexManifest().
|
||||||
RawIndexManifest() ([]byte, error)
|
RawManifest() ([]byte, error)
|
||||||
|
|
||||||
|
// Image returns a v1.Image that this ImageIndex references.
|
||||||
|
Image(Hash) (Image, error)
|
||||||
|
|
||||||
|
// ImageIndex returns a v1.ImageIndex that this ImageIndex references.
|
||||||
|
ImageIndex(Hash) (ImageIndex, error)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ import (
|
||||||
|
|
||||||
// Manifest represents the OCI image manifest in a structured way.
|
// Manifest represents the OCI image manifest in a structured way.
|
||||||
type Manifest struct {
|
type Manifest struct {
|
||||||
SchemaVersion int64 `json:"schemaVersion"`
|
SchemaVersion int64 `json:"schemaVersion,omitempty"`
|
||||||
MediaType types.MediaType `json:"mediaType"`
|
MediaType types.MediaType `json:"mediaType"`
|
||||||
Config Descriptor `json:"config"`
|
Config Descriptor `json:"config"`
|
||||||
Layers []Descriptor `json:"layers"`
|
Layers []Descriptor `json:"layers"`
|
||||||
|
|
|
||||||
|
|
@ -26,9 +26,10 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/stream"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||||
|
|
@ -58,77 +59,14 @@ func Append(base v1.Image, adds ...Addendum) (v1.Image, error) {
|
||||||
if len(adds) == 0 {
|
if len(adds) == 0 {
|
||||||
return base, nil
|
return base, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := validate(adds); err != nil {
|
if err := validate(adds); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := base.Manifest()
|
return &image{
|
||||||
if err != nil {
|
base: base,
|
||||||
return nil, err
|
adds: adds,
|
||||||
}
|
}, nil
|
||||||
|
|
||||||
cf, err := base.ConfigFile()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
image := &image{
|
|
||||||
Image: base,
|
|
||||||
manifest: m.DeepCopy(),
|
|
||||||
configFile: cf.DeepCopy(),
|
|
||||||
diffIDMap: make(map[v1.Hash]v1.Layer),
|
|
||||||
digestMap: make(map[v1.Hash]v1.Layer),
|
|
||||||
}
|
|
||||||
|
|
||||||
diffIDs := image.configFile.RootFS.DiffIDs
|
|
||||||
history := image.configFile.History
|
|
||||||
|
|
||||||
for _, add := range adds {
|
|
||||||
diffID, err := add.Layer.DiffID()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
diffIDs = append(diffIDs, diffID)
|
|
||||||
history = append(history, add.History)
|
|
||||||
image.diffIDMap[diffID] = add.Layer
|
|
||||||
}
|
|
||||||
|
|
||||||
manifestLayers := image.manifest.Layers
|
|
||||||
|
|
||||||
for _, add := range adds {
|
|
||||||
d := v1.Descriptor{
|
|
||||||
MediaType: types.DockerLayer,
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.Size, err = add.Layer.Size(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if d.Digest, err = add.Layer.Digest(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
manifestLayers = append(manifestLayers, d)
|
|
||||||
image.digestMap[d.Digest] = add.Layer
|
|
||||||
}
|
|
||||||
|
|
||||||
image.configFile.RootFS.DiffIDs = diffIDs
|
|
||||||
image.configFile.History = history
|
|
||||||
image.manifest.Layers = manifestLayers
|
|
||||||
|
|
||||||
rcfg, err := image.RawConfigFile()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
image.manifest.Config.Digest = d
|
|
||||||
image.manifest.Config.Size = sz
|
|
||||||
|
|
||||||
return image, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config mutates the provided v1.Image to have the provided v1.Config
|
// Config mutates the provided v1.Image to have the provided v1.Config
|
||||||
|
|
@ -150,22 +88,11 @@ func configFile(base v1.Image, cfg *v1.ConfigFile) (v1.Image, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
image := &image{
|
image := &image{
|
||||||
Image: base,
|
base: base,
|
||||||
manifest: m.DeepCopy(),
|
manifest: m.DeepCopy(),
|
||||||
configFile: cfg,
|
configFile: cfg,
|
||||||
digestMap: make(map[v1.Hash]v1.Layer),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rcfg, err := image.RawConfigFile()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
image.manifest.Config.Digest = d
|
|
||||||
image.manifest.Config.Size = sz
|
|
||||||
return image, nil
|
return image, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -183,16 +110,118 @@ func CreatedAt(base v1.Image, created v1.Time) (v1.Image, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type image struct {
|
type image struct {
|
||||||
v1.Image
|
base v1.Image
|
||||||
|
adds []Addendum
|
||||||
|
|
||||||
|
computed bool
|
||||||
configFile *v1.ConfigFile
|
configFile *v1.ConfigFile
|
||||||
manifest *v1.Manifest
|
manifest *v1.Manifest
|
||||||
diffIDMap map[v1.Hash]v1.Layer
|
diffIDMap map[v1.Hash]v1.Layer
|
||||||
digestMap map[v1.Hash]v1.Layer
|
digestMap map[v1.Hash]v1.Layer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ v1.Image = (*image)(nil)
|
||||||
|
|
||||||
|
func (i *image) MediaType() (types.MediaType, error) { return i.base.MediaType() }
|
||||||
|
|
||||||
|
func (i *image) compute() error {
|
||||||
|
// Don't re-compute if already computed.
|
||||||
|
if i.computed {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var configFile *v1.ConfigFile
|
||||||
|
if i.configFile != nil {
|
||||||
|
configFile = i.configFile
|
||||||
|
} else {
|
||||||
|
cf, err := i.base.ConfigFile()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
configFile = cf.DeepCopy()
|
||||||
|
}
|
||||||
|
diffIDs := configFile.RootFS.DiffIDs
|
||||||
|
history := configFile.History
|
||||||
|
|
||||||
|
diffIDMap := make(map[v1.Hash]v1.Layer)
|
||||||
|
digestMap := make(map[v1.Hash]v1.Layer)
|
||||||
|
|
||||||
|
for _, add := range i.adds {
|
||||||
|
diffID, err := add.Layer.DiffID()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
diffIDs = append(diffIDs, diffID)
|
||||||
|
history = append(history, add.History)
|
||||||
|
diffIDMap[diffID] = add.Layer
|
||||||
|
}
|
||||||
|
|
||||||
|
m, err := i.base.Manifest()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
manifest := m.DeepCopy()
|
||||||
|
manifestLayers := manifest.Layers
|
||||||
|
for _, add := range i.adds {
|
||||||
|
d := v1.Descriptor{
|
||||||
|
MediaType: types.DockerLayer,
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
if d.Size, err = add.Layer.Size(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.Digest, err = add.Layer.Digest(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestLayers = append(manifestLayers, d)
|
||||||
|
digestMap[d.Digest] = add.Layer
|
||||||
|
}
|
||||||
|
|
||||||
|
configFile.RootFS.DiffIDs = diffIDs
|
||||||
|
configFile.History = history
|
||||||
|
|
||||||
|
manifest.Layers = manifestLayers
|
||||||
|
|
||||||
|
rcfg, err := json.Marshal(configFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
manifest.Config.Digest = d
|
||||||
|
manifest.Config.Size = sz
|
||||||
|
|
||||||
|
i.configFile = configFile
|
||||||
|
i.manifest = manifest
|
||||||
|
i.diffIDMap = diffIDMap
|
||||||
|
i.digestMap = digestMap
|
||||||
|
i.computed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Layers returns the ordered collection of filesystem layers that comprise this image.
|
// Layers returns the ordered collection of filesystem layers that comprise this image.
|
||||||
// The order of the list is oldest/base layer first, and most-recent/top layer last.
|
// The order of the list is oldest/base layer first, and most-recent/top layer last.
|
||||||
func (i *image) Layers() ([]v1.Layer, error) {
|
func (i *image) Layers() ([]v1.Layer, error) {
|
||||||
|
if err := i.compute(); err == stream.ErrNotComputed {
|
||||||
|
// Image contains a streamable layer which has not yet been
|
||||||
|
// consumed. Just return the layers we have in case the caller
|
||||||
|
// is going to consume the layers.
|
||||||
|
layers, err := i.base.Layers()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, add := range i.adds {
|
||||||
|
layers = append(layers, add.Layer)
|
||||||
|
}
|
||||||
|
return layers, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
diffIDs, err := partial.DiffIDs(i)
|
diffIDs, err := partial.DiffIDs(i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -208,38 +237,51 @@ func (i *image) Layers() ([]v1.Layer, error) {
|
||||||
return ls, nil
|
return ls, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlobSet returns an unordered collection of all the blobs in the image.
|
|
||||||
func (i *image) BlobSet() (map[v1.Hash]struct{}, error) {
|
|
||||||
return partial.BlobSet(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigName returns the hash of the image's config file.
|
// ConfigName returns the hash of the image's config file.
|
||||||
func (i *image) ConfigName() (v1.Hash, error) {
|
func (i *image) ConfigName() (v1.Hash, error) {
|
||||||
|
if err := i.compute(); err != nil {
|
||||||
|
return v1.Hash{}, err
|
||||||
|
}
|
||||||
return partial.ConfigName(i)
|
return partial.ConfigName(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigFile returns this image's config file.
|
// ConfigFile returns this image's config file.
|
||||||
func (i *image) ConfigFile() (*v1.ConfigFile, error) {
|
func (i *image) ConfigFile() (*v1.ConfigFile, error) {
|
||||||
|
if err := i.compute(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
return i.configFile, nil
|
return i.configFile, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RawConfigFile returns the serialized bytes of ConfigFile()
|
// RawConfigFile returns the serialized bytes of ConfigFile()
|
||||||
func (i *image) RawConfigFile() ([]byte, error) {
|
func (i *image) RawConfigFile() ([]byte, error) {
|
||||||
|
if err := i.compute(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
return json.Marshal(i.configFile)
|
return json.Marshal(i.configFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Digest returns the sha256 of this image's manifest.
|
// Digest returns the sha256 of this image's manifest.
|
||||||
func (i *image) Digest() (v1.Hash, error) {
|
func (i *image) Digest() (v1.Hash, error) {
|
||||||
|
if err := i.compute(); err != nil {
|
||||||
|
return v1.Hash{}, err
|
||||||
|
}
|
||||||
return partial.Digest(i)
|
return partial.Digest(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Manifest returns this image's Manifest object.
|
// Manifest returns this image's Manifest object.
|
||||||
func (i *image) Manifest() (*v1.Manifest, error) {
|
func (i *image) Manifest() (*v1.Manifest, error) {
|
||||||
|
if err := i.compute(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
return i.manifest, nil
|
return i.manifest, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RawManifest returns the serialized bytes of Manifest()
|
// RawManifest returns the serialized bytes of Manifest()
|
||||||
func (i *image) RawManifest() ([]byte, error) {
|
func (i *image) RawManifest() ([]byte, error) {
|
||||||
|
if err := i.compute(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
return json.Marshal(i.manifest)
|
return json.Marshal(i.manifest)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -254,7 +296,7 @@ func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) {
|
||||||
if layer, ok := i.digestMap[h]; ok {
|
if layer, ok := i.digestMap[h]; ok {
|
||||||
return layer, nil
|
return layer, nil
|
||||||
}
|
}
|
||||||
return i.Image.LayerByDigest(h)
|
return i.base.LayerByDigest(h)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LayerByDiffID is an analog to LayerByDigest, looking up by "diff id"
|
// LayerByDiffID is an analog to LayerByDigest, looking up by "diff id"
|
||||||
|
|
@ -263,7 +305,7 @@ func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) {
|
||||||
if layer, ok := i.diffIDMap[h]; ok {
|
if layer, ok := i.diffIDMap[h]; ok {
|
||||||
return layer, nil
|
return layer, nil
|
||||||
}
|
}
|
||||||
return i.Image.LayerByDiffID(h)
|
return i.base.LayerByDiffID(h)
|
||||||
}
|
}
|
||||||
|
|
||||||
func validate(adds []Addendum) error {
|
func validate(adds []Addendum) error {
|
||||||
|
|
@ -468,6 +510,10 @@ func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := tarWriter.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
b := w.Bytes()
|
b := w.Bytes()
|
||||||
// gzip the contents, then create the layer
|
// gzip the contents, then create the layer
|
||||||
opener := func() (io.ReadCloser, error) {
|
opener := func() (io.ReadCloser, error) {
|
||||||
|
|
|
||||||
|
|
@ -17,10 +17,11 @@ package mutate
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Rebase returns a new v1.Image where the oldBase in orig is replaced by newBase.
|
||||||
func Rebase(orig, oldBase, newBase v1.Image) (v1.Image, error) {
|
func Rebase(orig, oldBase, newBase v1.Image) (v1.Image, error) {
|
||||||
// Verify that oldBase's layers are present in orig, otherwise orig is
|
// Verify that oldBase's layers are present in orig, otherwise orig is
|
||||||
// not based on oldBase at all.
|
// not based on oldBase at all.
|
||||||
|
|
|
||||||
|
|
@ -17,7 +17,7 @@ package partial
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -91,11 +91,6 @@ type compressedImageExtender struct {
|
||||||
// Assert that our extender type completes the v1.Image interface
|
// Assert that our extender type completes the v1.Image interface
|
||||||
var _ v1.Image = (*compressedImageExtender)(nil)
|
var _ v1.Image = (*compressedImageExtender)(nil)
|
||||||
|
|
||||||
// BlobSet implements v1.Image
|
|
||||||
func (i *compressedImageExtender) BlobSet() (map[v1.Hash]struct{}, error) {
|
|
||||||
return BlobSet(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Digest implements v1.Image
|
// Digest implements v1.Image
|
||||||
func (i *compressedImageExtender) Digest() (v1.Hash, error) {
|
func (i *compressedImageExtender) Digest() (v1.Hash, error) {
|
||||||
return Digest(i)
|
return Digest(i)
|
||||||
|
|
@ -125,11 +120,6 @@ func (i *compressedImageExtender) Layers() ([]v1.Layer, error) {
|
||||||
|
|
||||||
// LayerByDigest implements v1.Image
|
// LayerByDigest implements v1.Image
|
||||||
func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) {
|
func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) {
|
||||||
if cfgName, err := i.ConfigName(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else if cfgName == h {
|
|
||||||
return ConfigLayer(i)
|
|
||||||
}
|
|
||||||
cl, err := i.CompressedImageCore.LayerByDigest(h)
|
cl, err := i.CompressedImageCore.LayerByDigest(h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||||
)
|
)
|
||||||
|
|
@ -112,11 +112,6 @@ type uncompressedImageExtender struct {
|
||||||
// Assert that our extender type completes the v1.Image interface
|
// Assert that our extender type completes the v1.Image interface
|
||||||
var _ v1.Image = (*uncompressedImageExtender)(nil)
|
var _ v1.Image = (*uncompressedImageExtender)(nil)
|
||||||
|
|
||||||
// BlobSet implements v1.Image
|
|
||||||
func (i *uncompressedImageExtender) BlobSet() (map[v1.Hash]struct{}, error) {
|
|
||||||
return BlobSet(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Digest implements v1.Image
|
// Digest implements v1.Image
|
||||||
func (i *uncompressedImageExtender) Digest() (v1.Hash, error) {
|
func (i *uncompressedImageExtender) Digest() (v1.Hash, error) {
|
||||||
return Digest(i)
|
return Digest(i)
|
||||||
|
|
@ -220,13 +215,6 @@ func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, err
|
||||||
|
|
||||||
// LayerByDigest implements v1.Image
|
// LayerByDigest implements v1.Image
|
||||||
func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) {
|
func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) {
|
||||||
// Support returning the ConfigFile when asked for its hash.
|
|
||||||
if cfgName, err := i.ConfigName(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
} else if cfgName == h {
|
|
||||||
return ConfigLayer(i)
|
|
||||||
}
|
|
||||||
|
|
||||||
diffID, err := BlobToDiffID(i, h)
|
diffID, err := BlobToDiffID(i, h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
||||||
|
|
@ -19,8 +19,9 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -49,8 +50,6 @@ func ConfigName(i WithRawConfigFile) (v1.Hash, error) {
|
||||||
return h, err
|
return h, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// configLayer implements v1.Layer from the raw config bytes.
|
|
||||||
// This is so that clients (e.g. remote) can access the config as a blob.
|
|
||||||
type configLayer struct {
|
type configLayer struct {
|
||||||
hash v1.Hash
|
hash v1.Hash
|
||||||
content []byte
|
content []byte
|
||||||
|
|
@ -68,12 +67,12 @@ func (cl *configLayer) DiffID() (v1.Hash, error) {
|
||||||
|
|
||||||
// Uncompressed implements v1.Layer
|
// Uncompressed implements v1.Layer
|
||||||
func (cl *configLayer) Uncompressed() (io.ReadCloser, error) {
|
func (cl *configLayer) Uncompressed() (io.ReadCloser, error) {
|
||||||
return v1util.NopReadCloser(bytes.NewBuffer(cl.content)), nil
|
return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compressed implements v1.Layer
|
// Compressed implements v1.Layer
|
||||||
func (cl *configLayer) Compressed() (io.ReadCloser, error) {
|
func (cl *configLayer) Compressed() (io.ReadCloser, error) {
|
||||||
return v1util.NopReadCloser(bytes.NewBuffer(cl.content)), nil
|
return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size implements v1.Layer
|
// Size implements v1.Layer
|
||||||
|
|
@ -83,6 +82,8 @@ func (cl *configLayer) Size() (int64, error) {
|
||||||
|
|
||||||
var _ v1.Layer = (*configLayer)(nil)
|
var _ v1.Layer = (*configLayer)(nil)
|
||||||
|
|
||||||
|
// ConfigLayer implements v1.Layer from the raw config bytes.
|
||||||
|
// This is so that clients (e.g. remote) can access the config as a blob.
|
||||||
func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) {
|
func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) {
|
||||||
h, err := ConfigName(i)
|
h, err := ConfigName(i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -190,20 +191,6 @@ func FSLayers(i WithManifest) ([]v1.Hash, error) {
|
||||||
return fsl, nil
|
return fsl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlobSet is a helper for implementing v1.Image
|
|
||||||
func BlobSet(i WithManifest) (map[v1.Hash]struct{}, error) {
|
|
||||||
m, err := i.Manifest()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
bs := make(map[v1.Hash]struct{})
|
|
||||||
for _, l := range m.Layers {
|
|
||||||
bs[l.Digest] = struct{}{}
|
|
||||||
}
|
|
||||||
bs[m.Config.Digest] = struct{}{}
|
|
||||||
return bs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobSize is a helper for implementing v1.Image
|
// BlobSize is a helper for implementing v1.Image
|
||||||
func BlobSize(i WithManifest, h v1.Hash) (int64, error) {
|
func BlobSize(i WithManifest, h v1.Hash) (int64, error) {
|
||||||
m, err := i.Manifest()
|
m, err := i.Manifest()
|
||||||
|
|
|
||||||
|
|
@ -20,12 +20,12 @@ import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// uncompressedLayer implements partial.UncompressedLayer from raw bytes.
|
// uncompressedLayer implements partial.UncompressedLayer from raw bytes.
|
||||||
|
|
@ -42,7 +42,7 @@ func (ul *uncompressedLayer) DiffID() (v1.Hash, error) {
|
||||||
|
|
||||||
// Uncompressed implements partial.UncompressedLayer
|
// Uncompressed implements partial.UncompressedLayer
|
||||||
func (ul *uncompressedLayer) Uncompressed() (io.ReadCloser, error) {
|
func (ul *uncompressedLayer) Uncompressed() (io.ReadCloser, error) {
|
||||||
return v1util.NopReadCloser(bytes.NewBuffer(ul.content)), nil
|
return ioutil.NopCloser(bytes.NewBuffer(ul.content)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ partial.UncompressedLayer = (*uncompressedLayer)(nil)
|
var _ partial.UncompressedLayer = (*uncompressedLayer)(nil)
|
||||||
|
|
@ -54,14 +54,18 @@ func Image(byteSize, layers int64) (v1.Image, error) {
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
tw := tar.NewWriter(&b)
|
tw := tar.NewWriter(&b)
|
||||||
if err := tw.WriteHeader(&tar.Header{
|
if err := tw.WriteHeader(&tar.Header{
|
||||||
Name: fmt.Sprintf("random_file_%d.txt", i),
|
Name: fmt.Sprintf("random_file_%d.txt", i),
|
||||||
Size: byteSize,
|
Size: byteSize,
|
||||||
|
Typeflag: tar.TypeRegA,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if _, err := io.CopyN(tw, rand.Reader, byteSize); err != nil {
|
if _, err := io.CopyN(tw, rand.Reader, byteSize); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if err := tw.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
bts := b.Bytes()
|
bts := b.Bytes()
|
||||||
h, _, err := v1.SHA256(bytes.NewReader(bts))
|
h, _, err := v1.SHA256(bytes.NewReader(bts))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -75,6 +79,9 @@ func Image(byteSize, layers int64) (v1.Image, error) {
|
||||||
|
|
||||||
cfg := &v1.ConfigFile{}
|
cfg := &v1.ConfigFile{}
|
||||||
|
|
||||||
|
// Some clients check this.
|
||||||
|
cfg.RootFS.Type = "layers"
|
||||||
|
|
||||||
// It is ok that iteration order is random in Go, because this is the random image anyways.
|
// It is ok that iteration order is random in Go, because this is the random image anyways.
|
||||||
for k := range layerz {
|
for k := range layerz {
|
||||||
cfg.RootFS.DiffIDs = append(cfg.RootFS.DiffIDs, k)
|
cfg.RootFS.DiffIDs = append(cfg.RootFS.DiffIDs, k)
|
||||||
|
|
|
||||||
106
vendor/github.com/google/go-containerregistry/pkg/v1/random/index.go
generated
vendored
Normal file
106
vendor/github.com/google/go-containerregistry/pkg/v1/random/index.go
generated
vendored
Normal file
|
|
@ -0,0 +1,106 @@
|
||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package random
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type randomIndex struct {
|
||||||
|
images map[v1.Hash]v1.Image
|
||||||
|
manifest *v1.IndexManifest
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index returns a pseudo-randomly generated ImageIndex with count images, each
|
||||||
|
// having the given number of layers of size byteSize.
|
||||||
|
func Index(byteSize, layers, count int64) (v1.ImageIndex, error) {
|
||||||
|
manifest := v1.IndexManifest{
|
||||||
|
SchemaVersion: 2,
|
||||||
|
Manifests: []v1.Descriptor{},
|
||||||
|
}
|
||||||
|
|
||||||
|
images := make(map[v1.Hash]v1.Image)
|
||||||
|
for i := int64(0); i < count; i++ {
|
||||||
|
img, err := Image(byteSize, layers)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
rawManifest, err := img.RawManifest()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
digest, size, err := v1.SHA256(bytes.NewReader(rawManifest))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mediaType, err := img.MediaType()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest.Manifests = append(manifest.Manifests, v1.Descriptor{
|
||||||
|
Digest: digest,
|
||||||
|
Size: size,
|
||||||
|
MediaType: mediaType,
|
||||||
|
})
|
||||||
|
|
||||||
|
images[digest] = img
|
||||||
|
}
|
||||||
|
|
||||||
|
return &randomIndex{
|
||||||
|
images: images,
|
||||||
|
manifest: &manifest,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *randomIndex) MediaType() (types.MediaType, error) {
|
||||||
|
return types.OCIImageIndex, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *randomIndex) Digest() (v1.Hash, error) {
|
||||||
|
return partial.Digest(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *randomIndex) IndexManifest() (*v1.IndexManifest, error) {
|
||||||
|
return i.manifest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *randomIndex) RawManifest() ([]byte, error) {
|
||||||
|
m, err := i.IndexManifest()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *randomIndex) Image(h v1.Hash) (v1.Image, error) {
|
||||||
|
if img, ok := i.images[h]; ok {
|
||||||
|
return img, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("image not found: %v", h)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *randomIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) {
|
||||||
|
// This is a single level index (for now?).
|
||||||
|
return nil, fmt.Errorf("image not found: %v", h)
|
||||||
|
}
|
||||||
56
vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go
generated
vendored
Normal file
56
vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go
generated
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/authn"
|
||||||
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CheckPushPermission returns an error if the given keychain cannot authorize
|
||||||
|
// a push operation to the given ref.
|
||||||
|
//
|
||||||
|
// This can be useful to check whether the caller has permission to push an
|
||||||
|
// image before doing work to construct the image.
|
||||||
|
//
|
||||||
|
// TODO(#412): Remove the need for this method.
|
||||||
|
func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTripper) error {
|
||||||
|
auth, err := kc.Resolve(ref.Context().Registry)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
scopes := []string{ref.Scope(transport.PushScope)}
|
||||||
|
tr, err := transport.New(ref.Context().Registry, auth, t, scopes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// TODO(jasonhall): Against GCR, just doing the token handshake is
|
||||||
|
// enough, but this doesn't extend to Dockerhub
|
||||||
|
// (https://github.com/docker/hub-feedback/issues/1771), so we actually
|
||||||
|
// need to initiate an upload to tell whether the credentials can
|
||||||
|
// authorize a push. Figure out how to return early here when we can,
|
||||||
|
// to avoid a roundtrip for spec-compliant registries.
|
||||||
|
w := writer{
|
||||||
|
ref: ref,
|
||||||
|
client: &http.Client{Transport: tr},
|
||||||
|
}
|
||||||
|
loc, _, err := w.initiateUpload("", "")
|
||||||
|
if loc != "" {
|
||||||
|
// Since we're only initiating the upload to check whether we
|
||||||
|
// can, we should attempt to cancel it, in case initiating
|
||||||
|
// reserves some resources on the server. We shouldn't wait for
|
||||||
|
// cancelling to complete, and we don't care if it fails.
|
||||||
|
go w.cancelUpload(loc)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *writer) cancelUpload(loc string) {
|
||||||
|
req, err := http.NewRequest(http.MethodDelete, loc, nil)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, _ = w.client.Do(req)
|
||||||
|
}
|
||||||
|
|
@ -21,27 +21,35 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/authn"
|
"github.com/google/go-containerregistry/pkg/authn"
|
||||||
"github.com/google/go-containerregistry/pkg/name"
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var defaultPlatform = v1.Platform{
|
||||||
|
Architecture: "amd64",
|
||||||
|
OS: "linux",
|
||||||
|
}
|
||||||
|
|
||||||
// remoteImage accesses an image from a remote registry
|
// remoteImage accesses an image from a remote registry
|
||||||
type remoteImage struct {
|
type remoteImage struct {
|
||||||
ref name.Reference
|
fetcher
|
||||||
client *http.Client
|
|
||||||
manifestLock sync.Mutex // Protects manifest
|
manifestLock sync.Mutex // Protects manifest
|
||||||
manifest []byte
|
manifest []byte
|
||||||
configLock sync.Mutex // Protects config
|
configLock sync.Mutex // Protects config
|
||||||
config []byte
|
config []byte
|
||||||
|
mediaType types.MediaType
|
||||||
|
platform v1.Platform
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImageOption is a functional option for Image.
|
||||||
type ImageOption func(*imageOpener) error
|
type ImageOption func(*imageOpener) error
|
||||||
|
|
||||||
var _ partial.CompressedImageCore = (*remoteImage)(nil)
|
var _ partial.CompressedImageCore = (*remoteImage)(nil)
|
||||||
|
|
@ -51,6 +59,7 @@ type imageOpener struct {
|
||||||
transport http.RoundTripper
|
transport http.RoundTripper
|
||||||
ref name.Reference
|
ref name.Reference
|
||||||
client *http.Client
|
client *http.Client
|
||||||
|
platform v1.Platform
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *imageOpener) Open() (v1.Image, error) {
|
func (i *imageOpener) Open() (v1.Image, error) {
|
||||||
|
|
@ -59,8 +68,11 @@ func (i *imageOpener) Open() (v1.Image, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ri := &remoteImage{
|
ri := &remoteImage{
|
||||||
ref: i.ref,
|
fetcher: fetcher{
|
||||||
client: &http.Client{Transport: tr},
|
Ref: i.ref,
|
||||||
|
Client: &http.Client{Transport: tr},
|
||||||
|
},
|
||||||
|
platform: i.platform,
|
||||||
}
|
}
|
||||||
imgCore, err := partial.CompressedToImage(ri)
|
imgCore, err := partial.CompressedToImage(ri)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -81,6 +93,7 @@ func Image(ref name.Reference, options ...ImageOption) (v1.Image, error) {
|
||||||
auth: authn.Anonymous,
|
auth: authn.Anonymous,
|
||||||
transport: http.DefaultTransport,
|
transport: http.DefaultTransport,
|
||||||
ref: ref,
|
ref: ref,
|
||||||
|
platform: defaultPlatform,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
|
|
@ -91,58 +104,57 @@ func Image(ref name.Reference, options ...ImageOption) (v1.Image, error) {
|
||||||
return img.Open()
|
return img.Open()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *remoteImage) url(resource, identifier string) url.URL {
|
// fetcher implements methods for reading from a remote image.
|
||||||
|
type fetcher struct {
|
||||||
|
Ref name.Reference
|
||||||
|
Client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// url returns a url.Url for the specified path in the context of this remote image reference.
|
||||||
|
func (f *fetcher) url(resource, identifier string) url.URL {
|
||||||
return url.URL{
|
return url.URL{
|
||||||
Scheme: r.ref.Context().Registry.Scheme(),
|
Scheme: f.Ref.Context().Registry.Scheme(),
|
||||||
Host: r.ref.Context().RegistryStr(),
|
Host: f.Ref.Context().RegistryStr(),
|
||||||
Path: fmt.Sprintf("/v2/%s/%s/%s", r.ref.Context().RepositoryStr(), resource, identifier),
|
Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *remoteImage) MediaType() (types.MediaType, error) {
|
func (f *fetcher) fetchManifest(acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) {
|
||||||
// TODO(jonjohnsonjr): Determine this based on response.
|
u := f.url("manifests", f.Ref.Identifier())
|
||||||
return types.DockerManifestSchema2, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(jonjohnsonjr): Handle manifest lists.
|
|
||||||
func (r *remoteImage) RawManifest() ([]byte, error) {
|
|
||||||
r.manifestLock.Lock()
|
|
||||||
defer r.manifestLock.Unlock()
|
|
||||||
if r.manifest != nil {
|
|
||||||
return r.manifest, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
u := r.url("manifests", r.ref.Identifier())
|
|
||||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
// TODO(jonjohnsonjr): Accept OCI manifest, manifest list, and image index.
|
accept := []string{}
|
||||||
req.Header.Set("Accept", string(types.DockerManifestSchema2))
|
for _, mt := range acceptable {
|
||||||
resp, err := r.client.Do(req)
|
accept = append(accept, string(mt))
|
||||||
|
}
|
||||||
|
req.Header.Set("Accept", strings.Join(accept, ","))
|
||||||
|
|
||||||
|
resp, err := f.Client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if err := CheckError(resp, http.StatusOK); err != nil {
|
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
manifest, err := ioutil.ReadAll(resp.Body)
|
manifest, err := ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
digest, _, err := v1.SHA256(bytes.NewReader(manifest))
|
digest, size, err := v1.SHA256(bytes.NewReader(manifest))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate the digest matches what we asked for, if pulling by digest.
|
// Validate the digest matches what we asked for, if pulling by digest.
|
||||||
if dgst, ok := r.ref.(name.Digest); ok {
|
if dgst, ok := f.Ref.(name.Digest); ok {
|
||||||
if digest.String() != dgst.DigestStr() {
|
if digest.String() != dgst.DigestStr() {
|
||||||
return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), r.ref)
|
return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Do nothing for tags; I give up.
|
// Do nothing for tags; I give up.
|
||||||
|
|
@ -155,6 +167,52 @@ func (r *remoteImage) RawManifest() ([]byte, error) {
|
||||||
// https://github.com/GoogleContainerTools/kaniko/issues/298
|
// https://github.com/GoogleContainerTools/kaniko/issues/298
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return all this info since we have to calculate it anyway.
|
||||||
|
desc := v1.Descriptor{
|
||||||
|
Digest: digest,
|
||||||
|
Size: size,
|
||||||
|
MediaType: types.MediaType(resp.Header.Get("Content-Type")),
|
||||||
|
}
|
||||||
|
|
||||||
|
return manifest, &desc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteImage) MediaType() (types.MediaType, error) {
|
||||||
|
if string(r.mediaType) != "" {
|
||||||
|
return r.mediaType, nil
|
||||||
|
}
|
||||||
|
return types.DockerManifestSchema2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(jonjohnsonjr): Handle manifest lists.
|
||||||
|
func (r *remoteImage) RawManifest() ([]byte, error) {
|
||||||
|
r.manifestLock.Lock()
|
||||||
|
defer r.manifestLock.Unlock()
|
||||||
|
if r.manifest != nil {
|
||||||
|
return r.manifest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
acceptable := []types.MediaType{
|
||||||
|
types.DockerManifestSchema2,
|
||||||
|
types.OCIManifestSchema1,
|
||||||
|
// We'll resolve these to an image based on the platform.
|
||||||
|
types.DockerManifestList,
|
||||||
|
types.OCIImageIndex,
|
||||||
|
}
|
||||||
|
manifest, desc, err := r.fetchManifest(acceptable)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We want an image but the registry has an index, resolve it to an image.
|
||||||
|
for desc.MediaType == types.DockerManifestList || desc.MediaType == types.OCIImageIndex {
|
||||||
|
manifest, desc, err = r.matchImage(manifest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.mediaType = desc.MediaType
|
||||||
r.manifest = manifest
|
r.manifest = manifest
|
||||||
return r.manifest, nil
|
return r.manifest, nil
|
||||||
}
|
}
|
||||||
|
|
@ -202,12 +260,12 @@ func (rl *remoteLayer) Digest() (v1.Hash, error) {
|
||||||
// Compressed implements partial.CompressedLayer
|
// Compressed implements partial.CompressedLayer
|
||||||
func (rl *remoteLayer) Compressed() (io.ReadCloser, error) {
|
func (rl *remoteLayer) Compressed() (io.ReadCloser, error) {
|
||||||
u := rl.ri.url("blobs", rl.digest.String())
|
u := rl.ri.url("blobs", rl.digest.String())
|
||||||
resp, err := rl.ri.client.Get(u.String())
|
resp, err := rl.ri.Client.Get(u.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := CheckError(resp, http.StatusOK); err != nil {
|
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -244,3 +302,36 @@ func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error)
|
||||||
digest: h,
|
digest: h,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This naively matches the first manifest with matching Architecture and OS.
|
||||||
|
//
|
||||||
|
// We should probably use this instead:
|
||||||
|
// github.com/containerd/containerd/platforms
|
||||||
|
//
|
||||||
|
// But first we'd need to migrate to:
|
||||||
|
// github.com/opencontainers/image-spec/specs-go/v1
|
||||||
|
func (r *remoteImage) matchImage(rawIndex []byte) ([]byte, *v1.Descriptor, error) {
|
||||||
|
index, err := v1.ParseIndexManifest(bytes.NewReader(rawIndex))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
for _, childDesc := range index.Manifests {
|
||||||
|
// If platform is missing from child descriptor, assume it's amd64/linux.
|
||||||
|
p := defaultPlatform
|
||||||
|
if childDesc.Platform != nil {
|
||||||
|
p = *childDesc.Platform
|
||||||
|
}
|
||||||
|
if r.platform.Architecture == p.Architecture && r.platform.OS == p.OS {
|
||||||
|
childRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), childDesc.Digest), name.StrictValidation)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
r.fetcher = fetcher{
|
||||||
|
Client: r.Client,
|
||||||
|
Ref: childRef,
|
||||||
|
}
|
||||||
|
return r.fetchManifest([]types.MediaType{childDesc.MediaType})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil, fmt.Errorf("no matching image for %s/%s, index: %s", r.platform.Architecture, r.platform.OS, string(rawIndex))
|
||||||
|
}
|
||||||
|
|
|
||||||
139
vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go
generated
vendored
Normal file
139
vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go
generated
vendored
Normal file
|
|
@ -0,0 +1,139 @@
|
||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package remote
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/google/go-containerregistry/pkg/authn"
|
||||||
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// remoteIndex accesses an index from a remote registry
|
||||||
|
type remoteIndex struct {
|
||||||
|
fetcher
|
||||||
|
manifestLock sync.Mutex // Protects manifest
|
||||||
|
manifest []byte
|
||||||
|
mediaType types.MediaType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Index provides access to a remote index reference, applying functional options
|
||||||
|
// to the underlying imageOpener before resolving the reference into a v1.ImageIndex.
|
||||||
|
func Index(ref name.Reference, options ...ImageOption) (v1.ImageIndex, error) {
|
||||||
|
i := &imageOpener{
|
||||||
|
auth: authn.Anonymous,
|
||||||
|
transport: http.DefaultTransport,
|
||||||
|
ref: ref,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, option := range options {
|
||||||
|
if err := option(i); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tr, err := transport.New(i.ref.Context().Registry, i.auth, i.transport, []string{i.ref.Scope(transport.PullScope)})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &remoteIndex{
|
||||||
|
fetcher: fetcher{
|
||||||
|
Ref: i.ref,
|
||||||
|
Client: &http.Client{Transport: tr},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteIndex) MediaType() (types.MediaType, error) {
|
||||||
|
if string(r.mediaType) != "" {
|
||||||
|
return r.mediaType, nil
|
||||||
|
}
|
||||||
|
return types.DockerManifestList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteIndex) Digest() (v1.Hash, error) {
|
||||||
|
return partial.Digest(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteIndex) RawManifest() ([]byte, error) {
|
||||||
|
r.manifestLock.Lock()
|
||||||
|
defer r.manifestLock.Unlock()
|
||||||
|
if r.manifest != nil {
|
||||||
|
return r.manifest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
acceptable := []types.MediaType{
|
||||||
|
types.DockerManifestList,
|
||||||
|
types.OCIImageIndex,
|
||||||
|
}
|
||||||
|
manifest, desc, err := r.fetchManifest(acceptable)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.mediaType = desc.MediaType
|
||||||
|
r.manifest = manifest
|
||||||
|
return r.manifest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) {
|
||||||
|
b, err := r.RawManifest()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return v1.ParseIndexManifest(bytes.NewReader(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) {
|
||||||
|
imgRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), h), name.StrictValidation)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ri := &remoteImage{
|
||||||
|
fetcher: fetcher{
|
||||||
|
Ref: imgRef,
|
||||||
|
Client: r.Client,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
imgCore, err := partial.CompressedToImage(ri)
|
||||||
|
if err != nil {
|
||||||
|
return imgCore, err
|
||||||
|
}
|
||||||
|
// Wrap the v1.Layers returned by this v1.Image in a hint for downstream
|
||||||
|
// remote.Write calls to facilitate cross-repo "mounting".
|
||||||
|
return &mountableImage{
|
||||||
|
Image: imgCore,
|
||||||
|
Reference: r.Ref,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) {
|
||||||
|
idxRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), h), name.StrictValidation)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &remoteIndex{
|
||||||
|
fetcher: fetcher{
|
||||||
|
Ref: idxRef,
|
||||||
|
Client: r.Client,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
@ -25,12 +25,12 @@ import (
|
||||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Tags struct {
|
type tags struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Tags []string `json:"tags"`
|
Tags []string `json:"tags"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(jonjohnsonjr): return []name.Tag?
|
// List calls /tags/list for the given repository.
|
||||||
func List(repo name.Repository, auth authn.Authenticator, t http.RoundTripper) ([]string, error) {
|
func List(repo name.Repository, auth authn.Authenticator, t http.RoundTripper) ([]string, error) {
|
||||||
scopes := []string{repo.Scope(transport.PullScope)}
|
scopes := []string{repo.Scope(transport.PullScope)}
|
||||||
tr, err := transport.New(repo.Registry, auth, t, scopes)
|
tr, err := transport.New(repo.Registry, auth, t, scopes)
|
||||||
|
|
@ -51,14 +51,14 @@ func List(repo name.Repository, auth authn.Authenticator, t http.RoundTripper) (
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if err := CheckError(resp, http.StatusOK); err != nil {
|
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tags := Tags{}
|
parsed := tags{}
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil {
|
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return tags.Tags, nil
|
return parsed.Tags, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ package remote
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/google/go-containerregistry/pkg/name"
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MountableLayer wraps a v1.Layer in a shim that enables the layer to be
|
// MountableLayer wraps a v1.Layer in a shim that enables the layer to be
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/authn"
|
"github.com/google/go-containerregistry/pkg/authn"
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// WithTransport is a functional option for overriding the default transport
|
// WithTransport is a functional option for overriding the default transport
|
||||||
|
|
@ -54,3 +55,10 @@ func WithAuthFromKeychain(keys authn.Keychain) ImageOption {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func WithPlatform(p v1.Platform) ImageOption {
|
||||||
|
return func(i *imageOpener) error {
|
||||||
|
i.platform = p
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
11
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
11
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
|
|
@ -15,9 +15,8 @@
|
||||||
package transport
|
package transport
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
@ -40,6 +39,8 @@ type bearerTransport struct {
|
||||||
// See https://docs.docker.com/registry/spec/auth/token/
|
// See https://docs.docker.com/registry/spec/auth/token/
|
||||||
service string
|
service string
|
||||||
scopes []string
|
scopes []string
|
||||||
|
// Scheme we should use, determined by ping response.
|
||||||
|
scheme string
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ http.RoundTripper = (*bearerTransport)(nil)
|
var _ http.RoundTripper = (*bearerTransport)(nil)
|
||||||
|
|
@ -61,6 +62,8 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) {
|
||||||
in.Header.Set("Authorization", hdr)
|
in.Header.Set("Authorization", hdr)
|
||||||
}
|
}
|
||||||
in.Header.Set("User-Agent", transportName)
|
in.Header.Set("User-Agent", transportName)
|
||||||
|
|
||||||
|
in.URL.Scheme = bt.scheme
|
||||||
return bt.inner.RoundTrip(in)
|
return bt.inner.RoundTrip(in)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -103,6 +106,10 @@ func (bt *bearerTransport) refresh() error {
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if err := CheckError(resp, http.StatusOK); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
content, err := ioutil.ReadAll(resp.Body)
|
content, err := ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package remote
|
package transport
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
@ -35,7 +35,7 @@ var _ error = (*Error)(nil)
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
switch len(e.Errors) {
|
switch len(e.Errors) {
|
||||||
case 0:
|
case 0:
|
||||||
return "<empty remote.Error response>"
|
return "<empty transport.Error response>"
|
||||||
case 1:
|
case 1:
|
||||||
return e.Errors[0].String()
|
return e.Errors[0].String()
|
||||||
default:
|
default:
|
||||||
|
|
@ -55,9 +55,13 @@ type Diagnostic struct {
|
||||||
Detail interface{} `json:"detail,omitempty"`
|
Detail interface{} `json:"detail,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// String stringifies the Diagnostic
|
// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail]
|
||||||
func (d Diagnostic) String() string {
|
func (d Diagnostic) String() string {
|
||||||
return fmt.Sprintf("%s: %q", d.Code, d.Message)
|
msg := fmt.Sprintf("%s: %s", d.Code, d.Message)
|
||||||
|
if d.Detail != nil {
|
||||||
|
msg = fmt.Sprintf("%s; %v", msg, d.Detail)
|
||||||
|
}
|
||||||
|
return msg
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorCode is an enumeration of supported error codes.
|
// ErrorCode is an enumeration of supported error codes.
|
||||||
|
|
@ -83,6 +87,7 @@ const (
|
||||||
UnsupportedErrorCode ErrorCode = "UNSUPPORTED"
|
UnsupportedErrorCode ErrorCode = "UNSUPPORTED"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// CheckError returns a structured error if the response status is not in codes.
|
||||||
func CheckError(resp *http.Response, codes ...int) error {
|
func CheckError(resp *http.Response, codes ...int) error {
|
||||||
for _, code := range codes {
|
for _, code := range codes {
|
||||||
if resp.StatusCode == code {
|
if resp.StatusCode == code {
|
||||||
|
|
@ -36,6 +36,9 @@ type pingResp struct {
|
||||||
// Following the challenge there are often key/value pairs
|
// Following the challenge there are often key/value pairs
|
||||||
// e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz"
|
// e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz"
|
||||||
parameters map[string]string
|
parameters map[string]string
|
||||||
|
|
||||||
|
// The registry's scheme to use. Communicates whether we fell back to http.
|
||||||
|
scheme string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c challenge) Canonical() challenge {
|
func (c challenge) Canonical() challenge {
|
||||||
|
|
@ -63,31 +66,50 @@ func parseChallenge(suffix string) map[string]string {
|
||||||
func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) {
|
func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) {
|
||||||
client := http.Client{Transport: t}
|
client := http.Client{Transport: t}
|
||||||
|
|
||||||
url := fmt.Sprintf("%s://%s/v2/", reg.Scheme(), reg.Name())
|
// This first attempts to use "https" for every request, falling back to http
|
||||||
resp, err := client.Get(url)
|
// if the registry matches our localhost heuristic or if it is intentionally
|
||||||
if err != nil {
|
// set to insecure via name.NewInsecureRegistry.
|
||||||
return nil, err
|
schemes := []string{"https"}
|
||||||
|
if reg.Scheme() == "http" {
|
||||||
|
schemes = append(schemes, "http")
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
switch resp.StatusCode {
|
var connErr error
|
||||||
case http.StatusOK:
|
for _, scheme := range schemes {
|
||||||
// If we get a 200, then no authentication is needed.
|
url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name())
|
||||||
return &pingResp{challenge: anonymous}, nil
|
resp, err := client.Get(url)
|
||||||
case http.StatusUnauthorized:
|
if err != nil {
|
||||||
wac := resp.Header.Get(http.CanonicalHeaderKey("WWW-Authenticate"))
|
connErr = err
|
||||||
if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 {
|
// Potentially retry with http.
|
||||||
// If there are two parts, then parse the challenge parameters.
|
continue
|
||||||
return &pingResp{
|
}
|
||||||
challenge: challenge(parts[0]).Canonical(),
|
defer resp.Body.Close()
|
||||||
parameters: parseChallenge(parts[1]),
|
|
||||||
}, nil
|
switch resp.StatusCode {
|
||||||
|
case http.StatusOK:
|
||||||
|
// If we get a 200, then no authentication is needed.
|
||||||
|
return &pingResp{
|
||||||
|
challenge: anonymous,
|
||||||
|
scheme: scheme,
|
||||||
|
}, nil
|
||||||
|
case http.StatusUnauthorized:
|
||||||
|
wac := resp.Header.Get(http.CanonicalHeaderKey("WWW-Authenticate"))
|
||||||
|
if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 {
|
||||||
|
// If there are two parts, then parse the challenge parameters.
|
||||||
|
return &pingResp{
|
||||||
|
challenge: challenge(parts[0]).Canonical(),
|
||||||
|
parameters: parseChallenge(parts[1]),
|
||||||
|
scheme: scheme,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
// Otherwise, just return the challenge without parameters.
|
||||||
|
return &pingResp{
|
||||||
|
challenge: challenge(wac).Canonical(),
|
||||||
|
scheme: scheme,
|
||||||
|
}, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unrecognized HTTP status: %v", resp.Status)
|
||||||
}
|
}
|
||||||
// Otherwise, just return the challenge without parameters.
|
|
||||||
return &pingResp{
|
|
||||||
challenge: challenge(wac).Canonical(),
|
|
||||||
}, nil
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("unrecognized HTTP status: %v", resp.Status)
|
|
||||||
}
|
}
|
||||||
|
return nil, connErr
|
||||||
}
|
}
|
||||||
|
|
|
||||||
1
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go
generated
vendored
1
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go
generated
vendored
|
|
@ -73,6 +73,7 @@ func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scope
|
||||||
registry: reg,
|
registry: reg,
|
||||||
service: service,
|
service: service,
|
||||||
scopes: scopes,
|
scopes: scopes,
|
||||||
|
scheme: pr.scheme,
|
||||||
}
|
}
|
||||||
if err := bt.refresh(); err != nil {
|
if err := bt.refresh(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
||||||
|
|
@ -18,16 +18,27 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/authn"
|
"github.com/google/go-containerregistry/pkg/authn"
|
||||||
"github.com/google/go-containerregistry/pkg/name"
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/stream"
|
||||||
|
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type manifest interface {
|
||||||
|
RawManifest() ([]byte, error)
|
||||||
|
MediaType() (types.MediaType, error)
|
||||||
|
Digest() (v1.Hash, error)
|
||||||
|
}
|
||||||
|
|
||||||
// Write pushes the provided img to the specified image reference.
|
// Write pushes the provided img to the specified image reference.
|
||||||
func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.RoundTripper) error {
|
func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.RoundTripper) error {
|
||||||
ls, err := img.Layers()
|
ls, err := img.Layers()
|
||||||
|
|
@ -41,48 +52,74 @@ func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.Ro
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
w := writer{
|
w := writer{
|
||||||
ref: ref,
|
ref: ref,
|
||||||
client: &http.Client{Transport: tr},
|
client: &http.Client{Transport: tr},
|
||||||
img: img,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bs, err := img.BlobSet()
|
// Upload individual layers in goroutines and collect any errors.
|
||||||
if err != nil {
|
// If we can dedupe by the layer digest, try to do so. If the layer is
|
||||||
return err
|
// a stream.Layer, we can't dedupe and might re-upload.
|
||||||
}
|
var g errgroup.Group
|
||||||
|
uploaded := map[v1.Hash]bool{}
|
||||||
// Spin up go routines to publish each of the members of BlobSet(),
|
for _, l := range ls {
|
||||||
// and use an error channel to collect their results.
|
l := l
|
||||||
errCh := make(chan error)
|
if _, ok := l.(*stream.Layer); !ok {
|
||||||
defer close(errCh)
|
h, err := l.Digest()
|
||||||
for h := range bs {
|
if err != nil {
|
||||||
go func(h v1.Hash) {
|
return err
|
||||||
errCh <- w.uploadOne(h)
|
}
|
||||||
}(h)
|
// If we can determine the layer's digest ahead of
|
||||||
}
|
// time, use it to dedupe uploads.
|
||||||
|
if uploaded[h] {
|
||||||
// Now wait for all of the blob uploads to complete.
|
continue // Already uploading.
|
||||||
var errors []error
|
}
|
||||||
for _ = range bs {
|
uploaded[h] = true
|
||||||
if err := <-errCh; err != nil {
|
|
||||||
errors = append(errors, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
g.Go(func() error {
|
||||||
|
return w.uploadOne(l)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
if len(errors) > 0 {
|
|
||||||
// Return the first error we encountered.
|
if l, err := partial.ConfigLayer(img); err == stream.ErrNotComputed {
|
||||||
return errors[0]
|
// We can't read the ConfigLayer, because of streaming layers, since the
|
||||||
|
// config hasn't been calculated yet.
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that all the layers are uploaded, upload the config file blob.
|
||||||
|
l, err := partial.ConfigLayer(img)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.uploadOne(l); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
// This is an actual error, not a streaming error, just return it.
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
// We *can* read the ConfigLayer, so upload it concurrently with the layers.
|
||||||
|
g.Go(func() error {
|
||||||
|
return w.uploadOne(l)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Wait for the layers + config.
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// With all of the constituent elements uploaded, upload the manifest
|
// With all of the constituent elements uploaded, upload the manifest
|
||||||
// to commit the image.
|
// to commit the image.
|
||||||
return w.commitImage()
|
return w.commitImage(img)
|
||||||
}
|
}
|
||||||
|
|
||||||
// writer writes the elements of an image to a remote image reference.
|
// writer writes the elements of an image to a remote image reference.
|
||||||
type writer struct {
|
type writer struct {
|
||||||
ref name.Reference
|
ref name.Reference
|
||||||
client *http.Client
|
client *http.Client
|
||||||
img v1.Image
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// url returns a url.Url for the specified path in the context of this remote image reference.
|
// url returns a url.Url for the specified path in the context of this remote image reference.
|
||||||
|
|
@ -110,11 +147,11 @@ func (w *writer) nextLocation(resp *http.Response) (string, error) {
|
||||||
return resp.Request.URL.ResolveReference(u).String(), nil
|
return resp.Request.URL.ResolveReference(u).String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkExisting checks if a blob exists already in the repository by making a
|
// checkExistingBlob checks if a blob exists already in the repository by making a
|
||||||
// HEAD request to the blob store API. GCR performs an existence check on the
|
// HEAD request to the blob store API. GCR performs an existence check on the
|
||||||
// initiation if "mount" is specified, even if no "from" sources are specified.
|
// initiation if "mount" is specified, even if no "from" sources are specified.
|
||||||
// However, this is not broadly applicable to all registries, e.g. ECR.
|
// However, this is not broadly applicable to all registries, e.g. ECR.
|
||||||
func (w *writer) checkExisting(h v1.Hash) (bool, error) {
|
func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) {
|
||||||
u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.ref.Context().RepositoryStr(), h.String()))
|
u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.ref.Context().RepositoryStr(), h.String()))
|
||||||
|
|
||||||
resp, err := w.client.Head(u.String())
|
resp, err := w.client.Head(u.String())
|
||||||
|
|
@ -123,7 +160,31 @@ func (w *writer) checkExisting(h v1.Hash) (bool, error) {
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if err := CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
|
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp.StatusCode == http.StatusOK, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkExistingManifest checks if a manifest exists already in the repository
|
||||||
|
// by making a HEAD request to the manifest API.
|
||||||
|
func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, error) {
|
||||||
|
u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.ref.Context().RepositoryStr(), h.String()))
|
||||||
|
|
||||||
|
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
req.Header.Set("Accept", string(mt))
|
||||||
|
|
||||||
|
resp, err := w.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -136,20 +197,13 @@ func (w *writer) checkExisting(h v1.Hash) (bool, error) {
|
||||||
// On success, the layer was either mounted (nothing more to do) or a blob
|
// On success, the layer was either mounted (nothing more to do) or a blob
|
||||||
// upload was initiated and the body of that blob should be sent to the returned
|
// upload was initiated and the body of that blob should be sent to the returned
|
||||||
// location.
|
// location.
|
||||||
func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err error) {
|
func (w *writer) initiateUpload(from, mount string) (location string, mounted bool, err error) {
|
||||||
u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.ref.Context().RepositoryStr()))
|
u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.ref.Context().RepositoryStr()))
|
||||||
uv := url.Values{
|
uv := url.Values{}
|
||||||
"mount": []string{h.String()},
|
if mount != "" && from != "" {
|
||||||
}
|
// Quay will fail if we specify a "mount" without a "from".
|
||||||
l, err := w.img.LayerByDigest(h)
|
uv["mount"] = []string{mount}
|
||||||
if err != nil {
|
uv["from"] = []string{from}
|
||||||
return "", false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if ml, ok := l.(*MountableLayer); ok {
|
|
||||||
if w.ref.Context().RegistryStr() == ml.Reference.Context().RegistryStr() {
|
|
||||||
uv["from"] = []string{ml.Reference.Context().RepositoryStr()}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
u.RawQuery = uv.Encode()
|
u.RawQuery = uv.Encode()
|
||||||
|
|
||||||
|
|
@ -160,7 +214,7 @@ func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err e
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if err := CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil {
|
if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil {
|
||||||
return "", false, err
|
return "", false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -181,15 +235,7 @@ func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err e
|
||||||
// streamBlob streams the contents of the blob to the specified location.
|
// streamBlob streams the contents of the blob to the specified location.
|
||||||
// On failure, this will return an error. On success, this will return the location
|
// On failure, this will return an error. On success, this will return the location
|
||||||
// header indicating how to commit the streamed blob.
|
// header indicating how to commit the streamed blob.
|
||||||
func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation string, err error) {
|
func (w *writer) streamBlob(blob io.ReadCloser, streamLocation string) (commitLocation string, err error) {
|
||||||
l, err := w.img.LayerByDigest(h)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
blob, err := l.Compressed()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer blob.Close()
|
defer blob.Close()
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodPatch, streamLocation, blob)
|
req, err := http.NewRequest(http.MethodPatch, streamLocation, blob)
|
||||||
|
|
@ -203,7 +249,7 @@ func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation st
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if err := CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil {
|
if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -212,14 +258,15 @@ func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation st
|
||||||
return w.nextLocation(resp)
|
return w.nextLocation(resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// commitBlob commits this blob by sending a PUT to the location returned from streaming the blob.
|
// commitBlob commits this blob by sending a PUT to the location returned from
|
||||||
func (w *writer) commitBlob(h v1.Hash, location string) (err error) {
|
// streaming the blob.
|
||||||
|
func (w *writer) commitBlob(location, digest string) error {
|
||||||
u, err := url.Parse(location)
|
u, err := url.Parse(location)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
v := u.Query()
|
v := u.Query()
|
||||||
v.Set("digest", h.String())
|
v.Set("digest", digest)
|
||||||
u.RawQuery = v.Encode()
|
u.RawQuery = v.Encode()
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodPut, u.String(), nil)
|
req, err := http.NewRequest(http.MethodPut, u.String(), nil)
|
||||||
|
|
@ -233,47 +280,82 @@ func (w *writer) commitBlob(h v1.Hash, location string) (err error) {
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
return CheckError(resp, http.StatusCreated)
|
return transport.CheckError(resp, http.StatusCreated)
|
||||||
}
|
}
|
||||||
|
|
||||||
// uploadOne performs a complete upload of a single layer.
|
// uploadOne performs a complete upload of a single layer.
|
||||||
func (w *writer) uploadOne(h v1.Hash) error {
|
func (w *writer) uploadOne(l v1.Layer) error {
|
||||||
existing, err := w.checkExisting(h)
|
var from, mount, digest string
|
||||||
if err != nil {
|
if _, ok := l.(*stream.Layer); !ok {
|
||||||
return err
|
// Layer isn't streamable, we should take advantage of that to
|
||||||
|
// skip uploading if possible.
|
||||||
|
// By sending ?digest= in the request, we'll also check that
|
||||||
|
// our computed digest matches the one computed by the
|
||||||
|
// registry.
|
||||||
|
h, err := l.Digest()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
digest = h.String()
|
||||||
|
|
||||||
|
existing, err := w.checkExistingBlob(h)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if existing {
|
||||||
|
log.Printf("existing blob: %v", h)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mount = h.String()
|
||||||
}
|
}
|
||||||
if existing {
|
if ml, ok := l.(*MountableLayer); ok {
|
||||||
log.Printf("existing blob: %v", h)
|
if w.ref.Context().RegistryStr() == ml.Reference.Context().RegistryStr() {
|
||||||
return nil
|
from = ml.Reference.Context().RepositoryStr()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
location, mounted, err := w.initiateUpload(h)
|
location, mounted, err := w.initiateUpload(from, mount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else if mounted {
|
} else if mounted {
|
||||||
log.Printf("mounted blob: %v", h)
|
h, err := l.Digest()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("mounted blob: %s", h.String())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
location, err = w.streamBlob(h, location)
|
blob, err := l.Compressed()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
location, err = w.streamBlob(blob, location)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := w.commitBlob(h, location); err != nil {
|
h, err := l.Digest()
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Printf("pushed blob %v", h)
|
digest = h.String()
|
||||||
|
|
||||||
|
if err := w.commitBlob(location, digest); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("pushed blob: %s", digest)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// commitImage does a PUT of the image's manifest.
|
// commitImage does a PUT of the image's manifest.
|
||||||
func (w *writer) commitImage() error {
|
func (w *writer) commitImage(man manifest) error {
|
||||||
raw, err := w.img.RawManifest()
|
raw, err := man.RawManifest()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
mt, err := w.img.MediaType()
|
mt, err := man.MediaType()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -293,11 +375,11 @@ func (w *writer) commitImage() error {
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if err := CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil {
|
if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
digest, err := w.img.Digest()
|
digest, err := man.Digest()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -324,11 +406,68 @@ func scopesForUploadingImage(ref name.Reference, layers []v1.Layer) []string {
|
||||||
// Push scope should be the first element because a few registries just look at the first scope to determine access.
|
// Push scope should be the first element because a few registries just look at the first scope to determine access.
|
||||||
scopes = append(scopes, ref.Scope(transport.PushScope))
|
scopes = append(scopes, ref.Scope(transport.PushScope))
|
||||||
|
|
||||||
for scope, _ := range scopeSet {
|
for scope := range scopeSet {
|
||||||
scopes = append(scopes, scope)
|
scopes = append(scopes, scope)
|
||||||
}
|
}
|
||||||
|
|
||||||
return scopes
|
return scopes
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(mattmoor): WriteIndex
|
// WriteIndex pushes the provided ImageIndex to the specified image reference.
|
||||||
|
// WriteIndex will attempt to push all of the referenced manifests before
|
||||||
|
// attempting to push the ImageIndex, to retain referential integrity.
|
||||||
|
func WriteIndex(ref name.Reference, ii v1.ImageIndex, auth authn.Authenticator, t http.RoundTripper) error {
|
||||||
|
index, err := ii.IndexManifest()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
scopes := []string{ref.Scope(transport.PushScope)}
|
||||||
|
tr, err := transport.New(ref.Context().Registry, auth, t, scopes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w := writer{
|
||||||
|
ref: ref,
|
||||||
|
client: &http.Client{Transport: tr},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, desc := range index.Manifests {
|
||||||
|
ref, err := name.ParseReference(fmt.Sprintf("%s@%s", ref.Context(), desc.Digest), name.StrictValidation)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
exists, err := w.checkExistingManifest(desc.Digest, desc.MediaType)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if exists {
|
||||||
|
log.Printf("existing manifest: %v", desc.Digest)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch desc.MediaType {
|
||||||
|
case types.OCIImageIndex, types.DockerManifestList:
|
||||||
|
ii, err := ii.ImageIndex(desc.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := WriteIndex(ref, ii, auth, t); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case types.OCIManifestSchema1, types.DockerManifestSchema2:
|
||||||
|
img, err := ii.Image(desc.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := Write(ref, img, auth, t); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// With all of the constituent elements uploaded, upload the manifest
|
||||||
|
// to commit the image.
|
||||||
|
return w.commitImage(ii)
|
||||||
|
}
|
||||||
|
|
|
||||||
194
vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go
generated
vendored
Normal file
194
vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,194 @@
|
||||||
|
// Copyright 2018 Google LLC All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package stream
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNotComputed is returned when the requested value is not yet
|
||||||
|
// computed because the stream has not been consumed yet.
|
||||||
|
ErrNotComputed = errors.New("value not computed until stream is consumed")
|
||||||
|
|
||||||
|
// ErrConsumed is returned by Compressed when the underlying stream has
|
||||||
|
// already been consumed and closed.
|
||||||
|
ErrConsumed = errors.New("stream was already consumed")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Layer is a streaming implementation of v1.Layer.
|
||||||
|
type Layer struct {
|
||||||
|
blob io.ReadCloser
|
||||||
|
consumed bool
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
digest, diffID *v1.Hash
|
||||||
|
size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ v1.Layer = (*Layer)(nil)
|
||||||
|
|
||||||
|
// NewLayer creates a Layer from an io.ReadCloser.
|
||||||
|
func NewLayer(rc io.ReadCloser) *Layer { return &Layer{blob: rc} }
|
||||||
|
|
||||||
|
// Digest implements v1.Layer.
|
||||||
|
func (l *Layer) Digest() (v1.Hash, error) {
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if l.digest == nil {
|
||||||
|
return v1.Hash{}, ErrNotComputed
|
||||||
|
}
|
||||||
|
return *l.digest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiffID implements v1.Layer.
|
||||||
|
func (l *Layer) DiffID() (v1.Hash, error) {
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if l.diffID == nil {
|
||||||
|
return v1.Hash{}, ErrNotComputed
|
||||||
|
}
|
||||||
|
return *l.diffID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size implements v1.Layer.
|
||||||
|
func (l *Layer) Size() (int64, error) {
|
||||||
|
l.mu.Lock()
|
||||||
|
defer l.mu.Unlock()
|
||||||
|
if l.size == 0 {
|
||||||
|
return 0, ErrNotComputed
|
||||||
|
}
|
||||||
|
return l.size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uncompressed implements v1.Layer.
|
||||||
|
func (l *Layer) Uncompressed() (io.ReadCloser, error) {
|
||||||
|
return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compressed implements v1.Layer.
|
||||||
|
func (l *Layer) Compressed() (io.ReadCloser, error) {
|
||||||
|
if l.consumed {
|
||||||
|
return nil, ErrConsumed
|
||||||
|
}
|
||||||
|
return newCompressedReader(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
type compressedReader struct {
|
||||||
|
closer io.Closer // original blob's Closer.
|
||||||
|
|
||||||
|
h, zh hash.Hash // collects digests of compressed and uncompressed stream.
|
||||||
|
pr io.Reader
|
||||||
|
count *countWriter
|
||||||
|
|
||||||
|
l *Layer // stream.Layer to update upon Close.
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCompressedReader(l *Layer) (*compressedReader, error) {
|
||||||
|
h := sha256.New()
|
||||||
|
zh := sha256.New()
|
||||||
|
count := &countWriter{}
|
||||||
|
|
||||||
|
// gzip.Writer writes to the output stream via pipe, a hasher to
|
||||||
|
// capture compressed digest, and a countWriter to capture compressed
|
||||||
|
// size.
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
zw, err := gzip.NewWriterLevel(io.MultiWriter(pw, zh, count), gzip.BestSpeed)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cr := &compressedReader{
|
||||||
|
closer: newMultiCloser(zw, l.blob),
|
||||||
|
pr: pr,
|
||||||
|
h: h,
|
||||||
|
zh: zh,
|
||||||
|
count: count,
|
||||||
|
l: l,
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
if _, err := io.Copy(io.MultiWriter(h, zw), l.blob); err != nil {
|
||||||
|
pw.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Now close the compressed reader, to flush the gzip stream
|
||||||
|
// and calculate digest/diffID/size. This will cause pr to
|
||||||
|
// return EOF which will cause readers of the Compressed stream
|
||||||
|
// to finish reading.
|
||||||
|
pw.CloseWithError(cr.Close())
|
||||||
|
}()
|
||||||
|
|
||||||
|
return cr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) }
|
||||||
|
|
||||||
|
func (cr *compressedReader) Close() error {
|
||||||
|
cr.l.mu.Lock()
|
||||||
|
defer cr.l.mu.Unlock()
|
||||||
|
|
||||||
|
// Close the inner ReadCloser.
|
||||||
|
if err := cr.closer.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.h.Sum(nil)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cr.l.diffID = &diffID
|
||||||
|
|
||||||
|
digest, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.zh.Sum(nil)))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cr.l.digest = &digest
|
||||||
|
|
||||||
|
cr.l.size = cr.count.n
|
||||||
|
cr.l.consumed = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// countWriter counts bytes written to it.
|
||||||
|
type countWriter struct{ n int64 }
|
||||||
|
|
||||||
|
func (c *countWriter) Write(p []byte) (int, error) {
|
||||||
|
c.n += int64(len(p))
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// multiCloser is a Closer that collects multiple Closers and Closes them in order.
|
||||||
|
type multiCloser []io.Closer
|
||||||
|
|
||||||
|
var _ io.Closer = (multiCloser)(nil)
|
||||||
|
|
||||||
|
func newMultiCloser(c ...io.Closer) multiCloser { return multiCloser(c) }
|
||||||
|
|
||||||
|
func (m multiCloser) Close() error {
|
||||||
|
for _, c := range m {
|
||||||
|
if err := c.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
@ -26,7 +26,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/name"
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||||
|
|
@ -54,6 +54,7 @@ type compressedImage struct {
|
||||||
var _ partial.UncompressedImageCore = (*uncompressedImage)(nil)
|
var _ partial.UncompressedImageCore = (*uncompressedImage)(nil)
|
||||||
var _ partial.CompressedImageCore = (*compressedImage)(nil)
|
var _ partial.CompressedImageCore = (*compressedImage)(nil)
|
||||||
|
|
||||||
|
// Opener is a thunk for opening a tar file.
|
||||||
type Opener func() (io.ReadCloser, error)
|
type Opener func() (io.ReadCloser, error)
|
||||||
|
|
||||||
func pathOpener(path string) Opener {
|
func pathOpener(path string) Opener {
|
||||||
|
|
@ -62,6 +63,7 @@ func pathOpener(path string) Opener {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImageFromPath returns a v1.Image from a tarball located on path.
|
||||||
func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) {
|
func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) {
|
||||||
return Image(pathOpener(path), tag)
|
return Image(pathOpener(path), tag)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
"github.com/google/go-containerregistry/pkg/v1/v1util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -23,36 +23,46 @@ import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/name"
|
"github.com/google/go-containerregistry/pkg/name"
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// WriteToFile writes in the compressed format to a tarball, on disk.
|
// WriteToFile writes in the compressed format to a tarball, on disk.
|
||||||
// This is just syntactic sugar wrapping tarball.Write with a new file.
|
// This is just syntactic sugar wrapping tarball.Write with a new file.
|
||||||
func WriteToFile(p string, tag name.Tag, img v1.Image) error {
|
func WriteToFile(p string, ref name.Reference, img v1.Image) error {
|
||||||
w, err := os.Create(p)
|
w, err := os.Create(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
|
|
||||||
return Write(tag, img, w)
|
return Write(ref, img, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MultiWriteToFile writes in the compressed format to a tarball, on disk.
|
// MultiWriteToFile writes in the compressed format to a tarball, on disk.
|
||||||
// This is just syntactic sugar wrapping tarball.MultiWrite with a new file.
|
// This is just syntactic sugar wrapping tarball.MultiWrite with a new file.
|
||||||
func MultiWriteToFile(p string, tagToImage map[name.Tag]v1.Image) error {
|
func MultiWriteToFile(p string, tagToImage map[name.Tag]v1.Image) error {
|
||||||
|
var refToImage map[name.Reference]v1.Image = make(map[name.Reference]v1.Image, len(tagToImage))
|
||||||
|
for i, d := range tagToImage {
|
||||||
|
refToImage[i] = d
|
||||||
|
}
|
||||||
|
return MultiRefWriteToFile(p, refToImage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MultiRefWriteToFile writes in the compressed format to a tarball, on disk.
|
||||||
|
// This is just syntactic sugar wrapping tarball.MultiRefWrite with a new file.
|
||||||
|
func MultiRefWriteToFile(p string, refToImage map[name.Reference]v1.Image) error {
|
||||||
w, err := os.Create(p)
|
w, err := os.Create(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
|
|
||||||
return MultiWrite(tagToImage, w)
|
return MultiRefWrite(refToImage, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write is a wrapper to write a single image and tag to a tarball.
|
// Write is a wrapper to write a single image and tag to a tarball.
|
||||||
func Write(tag name.Tag, img v1.Image, w io.Writer) error {
|
func Write(ref name.Reference, img v1.Image, w io.Writer) error {
|
||||||
return MultiWrite(map[name.Tag]v1.Image{tag: img}, w)
|
return MultiRefWrite(map[name.Reference]v1.Image{ref: img}, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MultiWrite writes the contents of each image to the provided reader, in the compressed format.
|
// MultiWrite writes the contents of each image to the provided reader, in the compressed format.
|
||||||
|
|
@ -61,10 +71,23 @@ func Write(tag name.Tag, img v1.Image, w io.Writer) error {
|
||||||
// One file for each layer, named after the layer's SHA.
|
// One file for each layer, named after the layer's SHA.
|
||||||
// One file for the config blob, named after its SHA.
|
// One file for the config blob, named after its SHA.
|
||||||
func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer) error {
|
func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer) error {
|
||||||
|
var refToImage map[name.Reference]v1.Image = make(map[name.Reference]v1.Image, len(tagToImage))
|
||||||
|
for i, d := range tagToImage {
|
||||||
|
refToImage[i] = d
|
||||||
|
}
|
||||||
|
return MultiRefWrite(refToImage, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MultiRefWrite writes the contents of each image to the provided reader, in the compressed format.
|
||||||
|
// The contents are written in the following format:
|
||||||
|
// One manifest.json file at the top level containing information about several images.
|
||||||
|
// One file for each layer, named after the layer's SHA.
|
||||||
|
// One file for the config blob, named after its SHA.
|
||||||
|
func MultiRefWrite(refToImage map[name.Reference]v1.Image, w io.Writer) error {
|
||||||
tf := tar.NewWriter(w)
|
tf := tar.NewWriter(w)
|
||||||
defer tf.Close()
|
defer tf.Close()
|
||||||
|
|
||||||
imageToTags := dedupTagToImage(tagToImage)
|
imageToTags := dedupRefToImage(refToImage)
|
||||||
var td tarDescriptor
|
var td tarDescriptor
|
||||||
|
|
||||||
for img, tags := range imageToTags {
|
for img, tags := range imageToTags {
|
||||||
|
|
@ -135,14 +158,20 @@ func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer) error {
|
||||||
return writeTarEntry(tf, "manifest.json", bytes.NewReader(tdBytes), int64(len(tdBytes)))
|
return writeTarEntry(tf, "manifest.json", bytes.NewReader(tdBytes), int64(len(tdBytes)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func dedupTagToImage(tagToImage map[name.Tag]v1.Image) map[v1.Image][]string {
|
func dedupRefToImage(refToImage map[name.Reference]v1.Image) map[v1.Image][]string {
|
||||||
imageToTags := make(map[v1.Image][]string)
|
imageToTags := make(map[v1.Image][]string)
|
||||||
|
|
||||||
for tag, img := range tagToImage {
|
for ref, img := range refToImage {
|
||||||
if tags, ok := imageToTags[img]; ok {
|
if tag, ok := ref.(name.Tag); ok {
|
||||||
imageToTags[img] = append(tags, tag.String())
|
if tags, ok := imageToTags[img]; ok && tags != nil {
|
||||||
|
imageToTags[img] = append(tags, tag.String())
|
||||||
|
} else {
|
||||||
|
imageToTags[img] = []string{tag.String()}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
imageToTags[img] = []string{tag.String()}
|
if _, ok := imageToTags[img]; !ok {
|
||||||
|
imageToTags[img] = nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,40 +0,0 @@
|
||||||
// Copyright 2018 Google LLC All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package v1util
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
func nop() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NopWriteCloser wraps the io.Writer as an io.WriteCloser with a Close() method that does nothing.
|
|
||||||
func NopWriteCloser(w io.Writer) io.WriteCloser {
|
|
||||||
return &writeAndCloser{
|
|
||||||
Writer: w,
|
|
||||||
CloseFunc: nop,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NopReadCloser wraps the io.Reader as an io.ReadCloser with a Close() method that does nothing.
|
|
||||||
// This is technically redundant with ioutil.NopCloser, but provided for symmetry and clarity.
|
|
||||||
func NopReadCloser(r io.Reader) io.ReadCloser {
|
|
||||||
return &readAndCloser{
|
|
||||||
Reader: r,
|
|
||||||
CloseFunc: nop,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -20,7 +20,7 @@ import (
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"github.com/google/go-containerregistry/pkg/v1"
|
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type verifyReader struct {
|
type verifyReader struct {
|
||||||
|
|
|
||||||
|
|
@ -70,56 +70,14 @@ func GunzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GzipWriteCloser returns an io.WriteCloser to which uncompressed data may be
|
|
||||||
// written, and the compressed data is then written to the provided
|
|
||||||
// io.WriteCloser.
|
|
||||||
func GzipWriteCloser(w io.WriteCloser) io.WriteCloser {
|
|
||||||
gw := gzip.NewWriter(w)
|
|
||||||
return &writeAndCloser{
|
|
||||||
Writer: gw,
|
|
||||||
CloseFunc: func() error {
|
|
||||||
if err := gw.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return w.Close()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// gunzipWriteCloser implements io.WriteCloser
|
|
||||||
// It is used to implement GunzipWriteClose.
|
|
||||||
type gunzipWriteCloser struct {
|
|
||||||
*bytes.Buffer
|
|
||||||
writer io.WriteCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close implements io.WriteCloser
|
|
||||||
func (gwc *gunzipWriteCloser) Close() error {
|
|
||||||
// TODO(mattmoor): How to avoid buffering this whole thing into memory?
|
|
||||||
gr, err := gzip.NewReader(gwc.Buffer)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := io.Copy(gwc.writer, gr); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return gwc.writer.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GunzipWriteCloser returns an io.WriteCloser to which compressed data may be
|
|
||||||
// written, and the uncompressed data is then written to the provided
|
|
||||||
// io.WriteCloser.
|
|
||||||
func GunzipWriteCloser(w io.WriteCloser) (io.WriteCloser, error) {
|
|
||||||
return &gunzipWriteCloser{
|
|
||||||
Buffer: bytes.NewBuffer(nil),
|
|
||||||
writer: w,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsGzipped detects whether the input stream is compressed.
|
// IsGzipped detects whether the input stream is compressed.
|
||||||
func IsGzipped(r io.Reader) (bool, error) {
|
func IsGzipped(r io.Reader) (bool, error) {
|
||||||
magicHeader := make([]byte, 2)
|
magicHeader := make([]byte, 2)
|
||||||
if _, err := r.Read(magicHeader); err != nil {
|
n, err := r.Read(magicHeader)
|
||||||
|
if n == 0 && err == io.EOF {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
return bytes.Equal(magicHeader, gzipMagicHeader), nil
|
return bytes.Equal(magicHeader, gzipMagicHeader), nil
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2018 otiai10
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
|
@ -0,0 +1,93 @@
|
||||||
|
package copy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Copy copies src to dest, doesn't matter if src is a directory or a file
|
||||||
|
func Copy(src, dest string) error {
|
||||||
|
info, err := os.Lstat(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return copy(src, dest, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy dispatches copy-funcs according to the mode.
|
||||||
|
// Because this "copy" could be called recursively,
|
||||||
|
// "info" MUST be given here, NOT nil.
|
||||||
|
func copy(src, dest string, info os.FileInfo) error {
|
||||||
|
if info.Mode()&os.ModeSymlink != 0 {
|
||||||
|
return lcopy(src, dest, info)
|
||||||
|
}
|
||||||
|
if info.IsDir() {
|
||||||
|
return dcopy(src, dest, info)
|
||||||
|
}
|
||||||
|
return fcopy(src, dest, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fcopy is for just a file,
|
||||||
|
// with considering existence of parent directory
|
||||||
|
// and file permission.
|
||||||
|
func fcopy(src, dest string, info os.FileInfo) error {
|
||||||
|
|
||||||
|
if err := os.MkdirAll(filepath.Dir(dest), os.ModePerm); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Create(dest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if err = os.Chmod(f.Name(), info.Mode()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer s.Close()
|
||||||
|
|
||||||
|
_, err = io.Copy(f, s)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// dcopy is for a directory,
|
||||||
|
// with scanning contents inside the directory
|
||||||
|
// and pass everything to "copy" recursively.
|
||||||
|
func dcopy(srcdir, destdir string, info os.FileInfo) error {
|
||||||
|
|
||||||
|
if err := os.MkdirAll(destdir, info.Mode()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
contents, err := ioutil.ReadDir(srcdir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, content := range contents {
|
||||||
|
cs, cd := filepath.Join(srcdir, content.Name()), filepath.Join(destdir, content.Name())
|
||||||
|
if err := copy(cs, cd, content); err != nil {
|
||||||
|
// If any error, exit immediately
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// lcopy is for a symlink,
|
||||||
|
// with just creating a new symlink by replicating src symlink.
|
||||||
|
func lcopy(src, dest string, info os.FileInfo) error {
|
||||||
|
src, err := os.Readlink(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Symlink(src, dest)
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
./testdata/case01
|
||||||
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
|
|
@ -178,21 +179,10 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error {
|
||||||
return &roundTripper{a, rt}
|
return &roundTripper{a, rt}
|
||||||
}
|
}
|
||||||
|
|
||||||
getCert := c.TLS.GetCert
|
if c.TLS.GetCert != nil {
|
||||||
c.TLS.GetCert = func() (*tls.Certificate, error) {
|
return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set")
|
||||||
// If previous GetCert is present and returns a valid non-nil
|
|
||||||
// certificate, use that. Otherwise use cert from exec plugin.
|
|
||||||
if getCert != nil {
|
|
||||||
cert, err := getCert()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if cert != nil {
|
|
||||||
return cert, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return a.cert()
|
|
||||||
}
|
}
|
||||||
|
c.TLS.GetCert = a.cert
|
||||||
|
|
||||||
var dial func(ctx context.Context, network, addr string) (net.Conn, error)
|
var dial func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||||
if c.Dial != nil {
|
if c.Dial != nil {
|
||||||
|
|
|
||||||
|
|
@ -129,7 +129,7 @@ func SetAuthProxyHeaders(req *http.Request, username string, groups []string, ex
|
||||||
}
|
}
|
||||||
for key, values := range extra {
|
for key, values := range extra {
|
||||||
for _, value := range values {
|
for _, value := range values {
|
||||||
req.Header.Add("X-Remote-Extra-"+key, value)
|
req.Header.Add("X-Remote-Extra-"+headerKeyEscape(key), value)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -246,7 +246,7 @@ func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Respons
|
||||||
}
|
}
|
||||||
for k, vv := range rt.impersonate.Extra {
|
for k, vv := range rt.impersonate.Extra {
|
||||||
for _, v := range vv {
|
for _, v := range vv {
|
||||||
req.Header.Add(ImpersonateUserExtraHeaderPrefix+k, v)
|
req.Header.Add(ImpersonateUserExtraHeaderPrefix+headerKeyEscape(k), v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -422,3 +422,110 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
|
||||||
func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper {
|
func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper {
|
||||||
return rt.delegatedRoundTripper
|
return rt.delegatedRoundTripper
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func legalHeaderByte(b byte) bool {
|
||||||
|
return int(b) < len(legalHeaderKeyBytes) && legalHeaderKeyBytes[b]
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldEscape(b byte) bool {
|
||||||
|
// url.PathUnescape() returns an error if any '%' is not followed by two
|
||||||
|
// hexadecimal digits, so we'll intentionally encode it.
|
||||||
|
return !legalHeaderByte(b) || b == '%'
|
||||||
|
}
|
||||||
|
|
||||||
|
func headerKeyEscape(key string) string {
|
||||||
|
buf := strings.Builder{}
|
||||||
|
for i := 0; i < len(key); i++ {
|
||||||
|
b := key[i]
|
||||||
|
if shouldEscape(b) {
|
||||||
|
// %-encode bytes that should be escaped:
|
||||||
|
// https://tools.ietf.org/html/rfc3986#section-2.1
|
||||||
|
fmt.Fprintf(&buf, "%%%02X", b)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
buf.WriteByte(b)
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// legalHeaderKeyBytes was copied from net/http/lex.go's isTokenTable.
|
||||||
|
// See https://httpwg.github.io/specs/rfc7230.html#rule.token.separators
|
||||||
|
var legalHeaderKeyBytes = [127]bool{
|
||||||
|
'%': true,
|
||||||
|
'!': true,
|
||||||
|
'#': true,
|
||||||
|
'$': true,
|
||||||
|
'&': true,
|
||||||
|
'\'': true,
|
||||||
|
'*': true,
|
||||||
|
'+': true,
|
||||||
|
'-': true,
|
||||||
|
'.': true,
|
||||||
|
'0': true,
|
||||||
|
'1': true,
|
||||||
|
'2': true,
|
||||||
|
'3': true,
|
||||||
|
'4': true,
|
||||||
|
'5': true,
|
||||||
|
'6': true,
|
||||||
|
'7': true,
|
||||||
|
'8': true,
|
||||||
|
'9': true,
|
||||||
|
'A': true,
|
||||||
|
'B': true,
|
||||||
|
'C': true,
|
||||||
|
'D': true,
|
||||||
|
'E': true,
|
||||||
|
'F': true,
|
||||||
|
'G': true,
|
||||||
|
'H': true,
|
||||||
|
'I': true,
|
||||||
|
'J': true,
|
||||||
|
'K': true,
|
||||||
|
'L': true,
|
||||||
|
'M': true,
|
||||||
|
'N': true,
|
||||||
|
'O': true,
|
||||||
|
'P': true,
|
||||||
|
'Q': true,
|
||||||
|
'R': true,
|
||||||
|
'S': true,
|
||||||
|
'T': true,
|
||||||
|
'U': true,
|
||||||
|
'W': true,
|
||||||
|
'V': true,
|
||||||
|
'X': true,
|
||||||
|
'Y': true,
|
||||||
|
'Z': true,
|
||||||
|
'^': true,
|
||||||
|
'_': true,
|
||||||
|
'`': true,
|
||||||
|
'a': true,
|
||||||
|
'b': true,
|
||||||
|
'c': true,
|
||||||
|
'd': true,
|
||||||
|
'e': true,
|
||||||
|
'f': true,
|
||||||
|
'g': true,
|
||||||
|
'h': true,
|
||||||
|
'i': true,
|
||||||
|
'j': true,
|
||||||
|
'k': true,
|
||||||
|
'l': true,
|
||||||
|
'm': true,
|
||||||
|
'n': true,
|
||||||
|
'o': true,
|
||||||
|
'p': true,
|
||||||
|
'q': true,
|
||||||
|
'r': true,
|
||||||
|
's': true,
|
||||||
|
't': true,
|
||||||
|
'u': true,
|
||||||
|
'v': true,
|
||||||
|
'w': true,
|
||||||
|
'x': true,
|
||||||
|
'y': true,
|
||||||
|
'z': true,
|
||||||
|
'|': true,
|
||||||
|
'~': true,
|
||||||
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue