Fix merge conflict, change flag to snapshot-mode
This commit is contained in:
commit
5dc47258a0
|
|
@ -1,6 +1,20 @@
|
|||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = [
|
||||
"compute/metadata",
|
||||
"iam",
|
||||
"internal",
|
||||
"internal/optional",
|
||||
"internal/trace",
|
||||
"internal/version",
|
||||
"storage"
|
||||
]
|
||||
revision = "4b98a6370e36d7a85192e7bad08a4ebd82eac2a8"
|
||||
version = "v0.20.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/BurntSushi/toml"
|
||||
packages = ["."]
|
||||
|
|
@ -163,9 +177,14 @@
|
|||
"builder/dockerfile/parser",
|
||||
"builder/dockerfile/shell",
|
||||
"client",
|
||||
"pkg/archive",
|
||||
"pkg/fileutils",
|
||||
"pkg/homedir",
|
||||
"pkg/idtools",
|
||||
"pkg/ioutils",
|
||||
"pkg/longpath",
|
||||
"pkg/mount",
|
||||
"pkg/pools",
|
||||
"pkg/system"
|
||||
]
|
||||
revision = "b1a1234c60cf87048814aa37da523b03a7b0d344"
|
||||
|
|
@ -222,10 +241,23 @@
|
|||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
packages = [
|
||||
"proto",
|
||||
"protoc-gen-go/descriptor",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp"
|
||||
]
|
||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gax-go"
|
||||
packages = ["."]
|
||||
revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
|
||||
version = "v2.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gorilla/context"
|
||||
packages = ["."]
|
||||
|
|
@ -407,6 +439,24 @@
|
|||
revision = "38ec4ddb06dedbea0a895c4848b248eb38af221b"
|
||||
version = "v0.10.2"
|
||||
|
||||
[[projects]]
|
||||
name = "go.opencensus.io"
|
||||
packages = [
|
||||
"exporter/stackdriver/propagation",
|
||||
"internal",
|
||||
"internal/tagencoding",
|
||||
"plugin/ochttp",
|
||||
"plugin/ochttp/propagation/b3",
|
||||
"stats",
|
||||
"stats/internal",
|
||||
"stats/view",
|
||||
"tag",
|
||||
"trace",
|
||||
"trace/propagation"
|
||||
]
|
||||
revision = "6e3f034057826b530038d93267906ec3c012183f"
|
||||
version = "v0.6.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
|
@ -431,11 +481,25 @@
|
|||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"lex/httplex",
|
||||
"proxy"
|
||||
"proxy",
|
||||
"trace"
|
||||
]
|
||||
revision = "07e8617a6db2368fa55d4616f371ee1b1403c817"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt"
|
||||
]
|
||||
revision = "fdc9e635145ae97e6c2cb777c48305600cf515cb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
|
|
@ -466,6 +530,80 @@
|
|||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = [
|
||||
"gensupport",
|
||||
"googleapi",
|
||||
"googleapi/internal/uritemplates",
|
||||
"googleapi/transport",
|
||||
"internal",
|
||||
"iterator",
|
||||
"option",
|
||||
"storage/v1",
|
||||
"transport/http"
|
||||
]
|
||||
revision = "e4126357c891acdef6dcd7805daa4c6533be6544"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/app_identity",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"urlfetch"
|
||||
]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = [
|
||||
"googleapis/api/annotations",
|
||||
"googleapis/iam/v1",
|
||||
"googleapis/rpc/code",
|
||||
"googleapis/rpc/status"
|
||||
]
|
||||
revision = "ab0870e398d5dd054b868c0db1481ab029b9a9f2"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclb/grpc_lb_v1/messages",
|
||||
"grpclog",
|
||||
"internal",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"resolver/dns",
|
||||
"resolver/passthrough",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport"
|
||||
]
|
||||
revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655"
|
||||
version = "v1.10.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/cheggaaa/pb.v1"
|
||||
packages = ["."]
|
||||
|
|
@ -487,6 +625,6 @@
|
|||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "eadec1feacc8473e54622d5f3a25fbc9c7fb1f9bd38776475c3e2d283bd80d2a"
|
||||
inputs-digest = "46a5e132ee4e288f54fbe425376c80ce19d01bce195ad56d0ff74602ace9402f"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -35,7 +35,7 @@ EXECUTOR_PACKAGE = $(REPOPATH)/executor
|
|||
KBUILD_PACKAGE = $(REPOPATH)/kbuild
|
||||
|
||||
out/executor: $(GO_FILES)
|
||||
GOOS=$* GOARCH=$(GOARCH) CGO_ENABLED=0 go build -ldflags $(GO_LDFLAGS) -tags $(GO_BUILD_TAGS) -o $@ $(EXECUTOR_PACKAGE)
|
||||
GOARCH=$(GOARCH) GOOS=linux CGO_ENABLED=0 go build -ldflags $(GO_LDFLAGS) -tags $(GO_BUILD_TAGS) -o $@ $(EXECUTOR_PACKAGE)
|
||||
|
||||
|
||||
out/kbuild: $(GO_FILES)
|
||||
|
|
|
|||
|
|
@ -21,3 +21,4 @@ ADD files/docker-credential-gcr /usr/local/bin/
|
|||
ADD files/config.json /root/.docker/
|
||||
ENV HOME /root
|
||||
ENV PATH /usr/local/bin
|
||||
ENTRYPOINT ["/kbuild/executor"]
|
||||
|
|
|
|||
|
|
@ -23,25 +23,29 @@ import (
|
|||
"github.com/GoogleCloudPlatform/k8s-container-builder/pkg/image"
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/pkg/snapshot"
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
dockerfilePath string
|
||||
destination string
|
||||
srcContext string
|
||||
mtime string
|
||||
snapshotMode string
|
||||
bucket string
|
||||
logLevel string
|
||||
)
|
||||
|
||||
func init() {
|
||||
RootCmd.PersistentFlags().StringVarP(&dockerfilePath, "dockerfile", "f", "/workspace/Dockerfile", "Path to the dockerfile to be built.")
|
||||
RootCmd.PersistentFlags().StringVarP(&dockerfilePath, "dockerfile", "f", "Dockerfile", "Path to the dockerfile to be built.")
|
||||
RootCmd.PersistentFlags().StringVarP(&srcContext, "context", "c", "", "Path to the dockerfile build context.")
|
||||
RootCmd.PersistentFlags().StringVarP(&bucket, "bucket", "b", "", "Name of the GCS bucket from which to access build context as tarball.")
|
||||
RootCmd.PersistentFlags().StringVarP(&destination, "destination", "d", "", "Registry the final image should be pushed to (ex: gcr.io/test/example:latest)")
|
||||
RootCmd.PersistentFlags().StringVarP(&mtime, "mtime", "", "full", "Set this flag to change the file attributes inspected during snapshotting")
|
||||
RootCmd.PersistentFlags().StringVarP(&snapshotMode, "snapshotMode", "", "full", "Set this flag to change the file attributes inspected during snapshotting")
|
||||
RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", constants.DefaultLogLevel, "Log level (debug, info, warn, error, fatal, panic")
|
||||
}
|
||||
|
||||
|
|
@ -51,6 +55,10 @@ var RootCmd = &cobra.Command{
|
|||
return util.SetLogLevel(logLevel)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := resolveSourceContext(); err != nil {
|
||||
logrus.Error(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := execute(); err != nil {
|
||||
logrus.Error(err)
|
||||
os.Exit(1)
|
||||
|
|
@ -58,6 +66,33 @@ var RootCmd = &cobra.Command{
|
|||
},
|
||||
}
|
||||
|
||||
// resolveSourceContext unpacks the source context if it is a tar in a GCS bucket
|
||||
// it resets srcContext to be the path to the unpacked build context within the image
|
||||
func resolveSourceContext() error {
|
||||
if srcContext == "" && bucket == "" {
|
||||
return errors.New("please specify a path to the build context with the --context flag or a GCS bucket with the --bucket flag")
|
||||
}
|
||||
if srcContext != "" && bucket != "" {
|
||||
return errors.New("please specify either --bucket or --context as the desired build context")
|
||||
}
|
||||
if srcContext != "" {
|
||||
return nil
|
||||
}
|
||||
logrus.Infof("Using GCS bucket %s as source context", bucket)
|
||||
buildContextPath := constants.BuildContextDir
|
||||
if err := util.UnpackTarFromGCSBucket(bucket, buildContextPath); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Unpacked tar from %s to path %s", bucket, buildContextPath)
|
||||
srcContext = buildContextPath
|
||||
// If path to dockerfile doesn't exist, assume it is in the unpacked tar
|
||||
if !util.FilepathExists(dockerfilePath) {
|
||||
logrus.Debugf("Expecting dockerfile to be located at %s within the tar build context", dockerfilePath)
|
||||
dockerfilePath = filepath.Join(srcContext, dockerfilePath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func execute() error {
|
||||
// Parse dockerfile and unpack base image to root
|
||||
d, err := ioutil.ReadFile(dockerfilePath)
|
||||
|
|
@ -125,13 +160,31 @@ func execute() error {
|
|||
}
|
||||
}
|
||||
// Push the image
|
||||
if err := setDefaultEnv(); err != nil {
|
||||
return err
|
||||
}
|
||||
return image.PushImage(sourceImage, destination)
|
||||
}
|
||||
|
||||
func getHasher() func(string) (string, error) {
|
||||
if mtime == "time" {
|
||||
if snapshotMode == "time" {
|
||||
logrus.Info("Only file modification time will be considered when snapshotting")
|
||||
return util.MtimeHasher()
|
||||
}
|
||||
return util.Hasher()
|
||||
}
|
||||
|
||||
// setDefaultEnv sets default values for HOME and PATH so that
|
||||
// config.json and docker-credential-gcr can be accessed
|
||||
func setDefaultEnv() error {
|
||||
defaultEnvs := map[string]string{
|
||||
"HOME": "/root",
|
||||
"PATH": "/usr/local/bin/",
|
||||
}
|
||||
for key, val := range defaultEnvs {
|
||||
if err := os.Setenv(key, val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
normal file
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -0,0 +1,18 @@
|
|||
FROM gcr.io/google-appengine/debian9:latest
|
||||
# First, try adding some regular files
|
||||
ADD context/foo foo
|
||||
ADD context/foo /foodir/
|
||||
ADD context/bar/b* bar/
|
||||
ADD . newdir
|
||||
ADD ["context/foo", "/tmp/foo" ]
|
||||
|
||||
# Next, make sure environment replacement works
|
||||
ENV contextenv ./context
|
||||
ADD $contextenv/* /tmp/${contextenv}/
|
||||
|
||||
# Now, test extracting local tar archives
|
||||
ADD context/tars/fil* /tars/
|
||||
ADD context/tars/file.tar /tars_again
|
||||
|
||||
# Finally, test adding a remote URL, concurrently with a normal file
|
||||
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3/docker-credential-gcr_linux_386-1.4.3.tar.gz context/foo /test/all/
|
||||
|
|
@ -12,3 +12,9 @@ COPY ["context/foo", "/tmp/foo" ]
|
|||
COPY context/b* /baz/
|
||||
COPY context/foo context/bar/ba? /test/
|
||||
COPY context/arr[[]0].txt /mydir/
|
||||
COPY context/bar/bat .
|
||||
|
||||
ENV contextenv ./context
|
||||
COPY ${contextenv}/foo /tmp/foo2
|
||||
COPY $contextenv/foo /tmp/foo3
|
||||
COPY $contextenv/* /tmp/${contextenv}/
|
||||
|
|
|
|||
|
|
@ -0,0 +1,19 @@
|
|||
# Copyright 2018 Google, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM gcr.io/google-appengine/debian9
|
||||
RUN useradd testuser
|
||||
RUN groupadd testgroup
|
||||
USER testuser:testgroup
|
||||
RUN echo "hey" > /tmp/foo
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
FROM gcr.io/google-appengine/debian9:latest
|
||||
COPY context/foo foo
|
||||
WORKDIR /test
|
||||
# Test that this will be appended on to the previous command, to create /test/workdir
|
||||
WORKDIR workdir
|
||||
COPY context/foo ./currentfoo
|
||||
# Test that the RUN command will happen in the correct directory
|
||||
RUN cp currentfoo newfoo
|
||||
WORKDIR /new/dir
|
||||
ENV dir /another/new/dir
|
||||
WORKDIR $dir/newdir
|
||||
WORKDIR $dir/$doesntexist
|
||||
WORKDIR /
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
[
|
||||
{
|
||||
"Image1": "gcr.io/kbuild-test/docker-test-add:latest",
|
||||
"Image2": "gcr.io/kbuild-test/kbuild-test-add:latest",
|
||||
"DiffType": "File",
|
||||
"Diff": {
|
||||
"Adds": null,
|
||||
"Dels": null,
|
||||
"Mods": null
|
||||
}
|
||||
}
|
||||
]
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
[
|
||||
{
|
||||
"Image1": "gcr.io/kbuild-test/docker-test-bucket-buildcontext:latest",
|
||||
"Image2": "gcr.io/kbuild-test/kbuild-test-bucket-buildcontext:latest",
|
||||
"DiffType": "File",
|
||||
"Diff": {
|
||||
"Adds": null,
|
||||
"Dels": null,
|
||||
"Mods": null
|
||||
}
|
||||
}
|
||||
]
|
||||
|
|
@ -14,8 +14,8 @@
|
|||
},
|
||||
{
|
||||
"Name": "/var/log/apt/term.log",
|
||||
"Size1": 24421,
|
||||
"Size2": 24421
|
||||
"Size1": 23671,
|
||||
"Size2": 23671
|
||||
},
|
||||
{
|
||||
"Name": "/var/cache/ldconfig/aux-cache",
|
||||
|
|
@ -24,8 +24,8 @@
|
|||
},
|
||||
{
|
||||
"Name": "/var/log/apt/history.log",
|
||||
"Size1": 5415,
|
||||
"Size2": 5415
|
||||
"Size1": 5661,
|
||||
"Size2": 5661
|
||||
},
|
||||
{
|
||||
"Name": "/var/log/alternatives.log",
|
||||
|
|
|
|||
|
|
@ -0,0 +1,12 @@
|
|||
[
|
||||
{
|
||||
"Image1": "gcr.io/kbuild-test/docker-test-workdir:latest",
|
||||
"Image2": "gcr.io/kbuild-test/kbuild-test-workdir:latest",
|
||||
"DiffType": "File",
|
||||
"Diff": {
|
||||
"Adds": null,
|
||||
"Dels": null,
|
||||
"Mods": null
|
||||
}
|
||||
}
|
||||
]
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
schemaVersion: '2.0.0'
|
||||
commandTests:
|
||||
- name: 'whoami'
|
||||
command: 'whoami'
|
||||
expectedOutput: ['testuser']
|
||||
excludedOutput: ['root']
|
||||
- name: 'file owner'
|
||||
command: 'ls'
|
||||
args: ['-l', '/tmp/foo']
|
||||
expectedOutput: ['.*testuser.*', '.*testgroup.*']
|
||||
excludedOutput: ['.*root.*']
|
||||
fileContentTests:
|
||||
- name: "/tmp/foo"
|
||||
path: "/tmp/foo"
|
||||
expectedContent: ["hey"]
|
||||
|
|
@ -22,44 +22,89 @@ import (
|
|||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
executorImage = "executor-image"
|
||||
dockerImage = "gcr.io/cloud-builders/docker"
|
||||
ubuntuImage = "ubuntu"
|
||||
testRepo = "gcr.io/kbuild-test/"
|
||||
dockerPrefix = "docker-"
|
||||
kbuildPrefix = "kbuild-"
|
||||
daemonPrefix = "daemon://"
|
||||
containerDiffOutputFile = "container-diff.json"
|
||||
kbuildTestBucket = "kbuild-test-bucket"
|
||||
buildcontextPath = "/workspace/integration_tests"
|
||||
dockerfilesPath = "/workspace/integration_tests/dockerfiles"
|
||||
)
|
||||
|
||||
var fileTests = []struct {
|
||||
description string
|
||||
dockerfilePath string
|
||||
configPath string
|
||||
context string
|
||||
repo string
|
||||
mtime string
|
||||
description string
|
||||
dockerfilePath string
|
||||
configPath string
|
||||
dockerContext string
|
||||
kbuildContext string
|
||||
kbuildContextBucket bool
|
||||
repo string
|
||||
snapshotMode string
|
||||
}{
|
||||
{
|
||||
description: "test extract filesystem",
|
||||
dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_extract_fs",
|
||||
configPath: "/workspace/integration_tests/dockerfiles/config_test_extract_fs.json",
|
||||
context: "integration_tests/dockerfiles/",
|
||||
dockerContext: dockerfilesPath,
|
||||
kbuildContext: dockerfilesPath,
|
||||
repo: "extract-filesystem",
|
||||
mtime: "time",
|
||||
snapshotMode: "time",
|
||||
},
|
||||
{
|
||||
description: "test run",
|
||||
dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_run",
|
||||
configPath: "/workspace/integration_tests/dockerfiles/config_test_run.json",
|
||||
context: "integration_tests/dockerfiles/",
|
||||
dockerContext: dockerfilesPath,
|
||||
kbuildContext: dockerfilesPath,
|
||||
repo: "test-run",
|
||||
},
|
||||
{
|
||||
description: "test run no files changed",
|
||||
dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_run_2",
|
||||
configPath: "/workspace/integration_tests/dockerfiles/config_test_run_2.json",
|
||||
context: "integration_tests/dockerfiles/",
|
||||
dockerContext: dockerfilesPath,
|
||||
kbuildContext: dockerfilesPath,
|
||||
repo: "test-run-2",
|
||||
mtime: "time",
|
||||
snapshotMode: "time",
|
||||
},
|
||||
{
|
||||
description: "test copy",
|
||||
dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_copy",
|
||||
configPath: "/workspace/integration_tests/dockerfiles/config_test_copy.json",
|
||||
context: "/workspace/integration_tests/",
|
||||
dockerContext: buildcontextPath,
|
||||
kbuildContext: buildcontextPath,
|
||||
repo: "test-copy",
|
||||
mtime: "time",
|
||||
snapshotMode: "time",
|
||||
},
|
||||
{
|
||||
description: "test bucket build context",
|
||||
dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_copy",
|
||||
configPath: "/workspace/integration_tests/dockerfiles/config_test_bucket_buildcontext.json",
|
||||
dockerContext: buildcontextPath,
|
||||
kbuildContext: kbuildTestBucket,
|
||||
kbuildContextBucket: true,
|
||||
repo: "test-bucket-buildcontext",
|
||||
},
|
||||
{
|
||||
description: "test workdir",
|
||||
dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_workdir",
|
||||
configPath: "/workspace/integration_tests/dockerfiles/config_test_workdir.json",
|
||||
dockerContext: buildcontextPath,
|
||||
kbuildContext: buildcontextPath,
|
||||
repo: "test-workdir",
|
||||
},
|
||||
{
|
||||
description: "test add",
|
||||
dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_add",
|
||||
configPath: "/workspace/integration_tests/dockerfiles/config_test_add.json",
|
||||
dockerContext: buildcontextPath,
|
||||
kbuildContext: buildcontextPath,
|
||||
repo: "test-add",
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -68,22 +113,33 @@ var structureTests = []struct {
|
|||
dockerfilePath string
|
||||
structureTestYamlPath string
|
||||
dockerBuildContext string
|
||||
kbuildContext string
|
||||
repo string
|
||||
}{
|
||||
{
|
||||
description: "test env",
|
||||
dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_env",
|
||||
repo: "test-env",
|
||||
dockerBuildContext: "/workspace/integration_tests/dockerfiles/",
|
||||
dockerBuildContext: dockerfilesPath,
|
||||
kbuildContext: dockerfilesPath,
|
||||
structureTestYamlPath: "/workspace/integration_tests/dockerfiles/test_env.yaml",
|
||||
},
|
||||
{
|
||||
description: "test metadata",
|
||||
dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_metadata",
|
||||
repo: "test-metadata",
|
||||
dockerBuildContext: "/workspace/integration_tests/dockerfiles/",
|
||||
dockerBuildContext: dockerfilesPath,
|
||||
kbuildContext: dockerfilesPath,
|
||||
structureTestYamlPath: "/workspace/integration_tests/dockerfiles/test_metadata.yaml",
|
||||
},
|
||||
{
|
||||
description: "test user command",
|
||||
dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_user_run",
|
||||
repo: "test-user",
|
||||
dockerBuildContext: dockerfilesPath,
|
||||
kbuildContext: dockerfilesPath,
|
||||
structureTestYamlPath: "/workspace/integration_tests/dockerfiles/test_user.yaml",
|
||||
},
|
||||
}
|
||||
|
||||
type step struct {
|
||||
|
|
@ -96,16 +152,6 @@ type testyaml struct {
|
|||
Steps []step
|
||||
}
|
||||
|
||||
var executorImage = "executor-image"
|
||||
var executorCommand = "/kbuild/executor"
|
||||
var dockerImage = "gcr.io/cloud-builders/docker"
|
||||
var ubuntuImage = "ubuntu"
|
||||
var testRepo = "gcr.io/kbuild-test/"
|
||||
var dockerPrefix = "docker-"
|
||||
var kbuildPrefix = "kbuild-"
|
||||
var daemonPrefix = "daemon://"
|
||||
var containerDiffOutputFile = "container-diff.json"
|
||||
|
||||
func main() {
|
||||
|
||||
// First, copy container-diff in
|
||||
|
|
@ -125,31 +171,45 @@ func main() {
|
|||
Name: ubuntuImage,
|
||||
Args: []string{"chmod", "+x", "container-structure-test"},
|
||||
}
|
||||
|
||||
GCSBucketTarBuildContext := step{
|
||||
Name: ubuntuImage,
|
||||
Args: []string{"tar", "-C", "/workspace/integration_tests/", "-zcvf", "/workspace/context.tar.gz", "."},
|
||||
}
|
||||
uploadTarBuildContext := step{
|
||||
Name: "gcr.io/cloud-builders/gsutil",
|
||||
Args: []string{"cp", "/workspace/context.tar.gz", "gs://kbuild-test-bucket/"},
|
||||
}
|
||||
|
||||
// Build executor image
|
||||
buildExecutorImage := step{
|
||||
Name: dockerImage,
|
||||
Args: []string{"build", "-t", executorImage, "-f", "deploy/Dockerfile", "."},
|
||||
}
|
||||
y := testyaml{
|
||||
Steps: []step{containerDiffStep, containerDiffPermissions, structureTestsStep, structureTestPermissions, buildExecutorImage},
|
||||
Steps: []step{containerDiffStep, containerDiffPermissions, structureTestsStep, structureTestPermissions, GCSBucketTarBuildContext, uploadTarBuildContext, buildExecutorImage},
|
||||
}
|
||||
for _, test := range fileTests {
|
||||
// First, build the image with docker
|
||||
dockerImageTag := testRepo + dockerPrefix + test.repo
|
||||
dockerBuild := step{
|
||||
Name: dockerImage,
|
||||
Args: []string{"build", "-t", dockerImageTag, "-f", test.dockerfilePath, test.context},
|
||||
Args: []string{"build", "-t", dockerImageTag, "-f", test.dockerfilePath, test.dockerContext},
|
||||
}
|
||||
|
||||
// Then, buld the image with kbuild
|
||||
kbuildImage := testRepo + kbuildPrefix + test.repo
|
||||
mtime := ""
|
||||
if test.mtime != "" {
|
||||
mtime = "--mtime=" + test.mtime
|
||||
snapshotMode := ""
|
||||
if test.snapshotMode != "" {
|
||||
snapshotMode = "--snapshotMode=" + test.snapshotMode
|
||||
}
|
||||
contextFlag := "--context"
|
||||
if test.kbuildContextBucket {
|
||||
contextFlag = "--bucket"
|
||||
}
|
||||
kbuild := step{
|
||||
Name: executorImage,
|
||||
Args: []string{executorCommand, "--destination", kbuildImage, "--dockerfile", test.dockerfilePath, "--context", test.context, mtime},
|
||||
Args: []string{"--destination", kbuildImage, "--dockerfile", test.dockerfilePath, contextFlag, test.kbuildContext, snapshotMode},
|
||||
}
|
||||
|
||||
// Pull the kbuild image
|
||||
|
|
@ -193,7 +253,7 @@ func main() {
|
|||
kbuildImage := testRepo + kbuildPrefix + test.repo
|
||||
kbuild := step{
|
||||
Name: executorImage,
|
||||
Args: []string{executorCommand, "--destination", kbuildImage, "--dockerfile", test.dockerfilePath},
|
||||
Args: []string{"--destination", kbuildImage, "--dockerfile", test.dockerfilePath, "--context", test.kbuildContext},
|
||||
}
|
||||
// Pull the kbuild image
|
||||
pullKbuildImage := step{
|
||||
|
|
|
|||
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/pkg/util"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/sirupsen/logrus"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type AddCommand struct {
|
||||
cmd *instructions.AddCommand
|
||||
buildcontext string
|
||||
snapshotFiles []string
|
||||
}
|
||||
|
||||
// ExecuteCommand executes the ADD command
|
||||
// Special stuff about ADD:
|
||||
// 1. If <src> is a remote file URL:
|
||||
// - destination will have permissions of 0600
|
||||
// - If remote file has HTTP Last-Modified header, we set the mtime of the file to that timestamp
|
||||
// - If dest doesn't end with a slash, the filepath is inferred to be <dest>/<filename>
|
||||
// 2. If <src> is a local tar archive:
|
||||
// -If <src> is a local tar archive, it is unpacked at the dest, as 'tar -x' would
|
||||
func (a *AddCommand) ExecuteCommand(config *manifest.Schema2Config) error {
|
||||
srcs := a.cmd.SourcesAndDest[:len(a.cmd.SourcesAndDest)-1]
|
||||
dest := a.cmd.SourcesAndDest[len(a.cmd.SourcesAndDest)-1]
|
||||
|
||||
logrus.Infof("cmd: Add %s", srcs)
|
||||
logrus.Infof("dest: %s", dest)
|
||||
|
||||
// First, resolve any environment replacement
|
||||
resolvedEnvs, err := util.ResolveEnvironmentReplacementList(a.cmd.SourcesAndDest, config.Env, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest = resolvedEnvs[len(resolvedEnvs)-1]
|
||||
// Get a map of [src]:[files rooted at src]
|
||||
srcMap, err := util.ResolveSources(resolvedEnvs, a.buildcontext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If any of the sources are local tar archives:
|
||||
// 1. Unpack them to the specified destination
|
||||
// 2. Remove it as a source that needs to be copied over
|
||||
// If any of the sources is a remote file URL:
|
||||
// 1. Download and copy it to the specifed dest
|
||||
// 2. Remove it as a source that needs to be copied
|
||||
for src, files := range srcMap {
|
||||
for _, file := range files {
|
||||
// If file is a local tar archive, then we unpack it to dest
|
||||
filePath := filepath.Join(a.buildcontext, file)
|
||||
isFilenameSource, err := isFilenameSource(srcMap, file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if util.IsSrcRemoteFileURL(file) {
|
||||
urlDest := util.URLDestinationFilepath(file, dest, config.WorkingDir)
|
||||
logrus.Infof("Adding remote URL %s to %s", file, urlDest)
|
||||
if err := util.DownloadFileToDest(file, urlDest); err != nil {
|
||||
return err
|
||||
}
|
||||
a.snapshotFiles = append(a.snapshotFiles, urlDest)
|
||||
delete(srcMap, src)
|
||||
} else if isFilenameSource && util.IsFileLocalTarArchive(filePath) {
|
||||
logrus.Infof("Unpacking local tar archive %s to %s", file, dest)
|
||||
if err := util.UnpackLocalTarArchive(filePath, dest); err != nil {
|
||||
return err
|
||||
}
|
||||
// Add the unpacked files to the snapshotter
|
||||
filesAdded, err := util.Files(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Added %v from local tar archive %s", filesAdded, file)
|
||||
a.snapshotFiles = append(a.snapshotFiles, filesAdded...)
|
||||
delete(srcMap, src)
|
||||
}
|
||||
}
|
||||
}
|
||||
// With the remaining "normal" sources, create and execute a standard copy command
|
||||
if len(srcMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
var regularSrcs []string
|
||||
for src := range srcMap {
|
||||
regularSrcs = append(regularSrcs, src)
|
||||
}
|
||||
copyCmd := CopyCommand{
|
||||
cmd: &instructions.CopyCommand{
|
||||
SourcesAndDest: append(regularSrcs, dest),
|
||||
},
|
||||
buildcontext: a.buildcontext,
|
||||
}
|
||||
if err := copyCmd.ExecuteCommand(config); err != nil {
|
||||
return err
|
||||
}
|
||||
a.snapshotFiles = append(a.snapshotFiles, copyCmd.snapshotFiles...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func isFilenameSource(srcMap map[string][]string, fileName string) (bool, error) {
|
||||
for src := range srcMap {
|
||||
matched, err := filepath.Match(src, fileName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if matched || (src == fileName) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// FilesToSnapshot should return an empty array if still nil; no files were changed
|
||||
func (a *AddCommand) FilesToSnapshot() []string {
|
||||
return a.snapshotFiles
|
||||
}
|
||||
|
||||
// CreatedBy returns some information about the command for the image config
|
||||
func (a *AddCommand) CreatedBy() string {
|
||||
return strings.Join(a.cmd.SourcesAndDest, " ")
|
||||
}
|
||||
|
|
@ -44,12 +44,18 @@ func GetCommand(cmd instructions.Command, buildcontext string) (DockerCommand, e
|
|||
return &ExposeCommand{cmd: c}, nil
|
||||
case *instructions.EnvCommand:
|
||||
return &EnvCommand{cmd: c}, nil
|
||||
case *instructions.WorkdirCommand:
|
||||
return &WorkdirCommand{cmd: c}, nil
|
||||
case *instructions.AddCommand:
|
||||
return &AddCommand{cmd: c, buildcontext: buildcontext}, nil
|
||||
case *instructions.CmdCommand:
|
||||
return &CmdCommand{cmd: c}, nil
|
||||
case *instructions.EntrypointCommand:
|
||||
return &EntrypointCommand{cmd: c}, nil
|
||||
case *instructions.LabelCommand:
|
||||
return &LabelCommand{cmd: c}, nil
|
||||
case *instructions.UserCommand:
|
||||
return &UserCommand{cmd: c}, nil
|
||||
}
|
||||
return nil, errors.Errorf("%s is not a supported command", cmd.Name())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,8 +39,14 @@ func (c *CopyCommand) ExecuteCommand(config *manifest.Schema2Config) error {
|
|||
logrus.Infof("cmd: copy %s", srcs)
|
||||
logrus.Infof("dest: %s", dest)
|
||||
|
||||
// First, resolve any environment replacement
|
||||
resolvedEnvs, err := util.ResolveEnvironmentReplacementList(c.cmd.SourcesAndDest, config.Env, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest = resolvedEnvs[len(resolvedEnvs)-1]
|
||||
// Get a map of [src]:[files rooted at src]
|
||||
srcMap, err := util.ResolveSources(c.cmd.SourcesAndDest, c.buildcontext)
|
||||
srcMap, err := util.ResolveSources(resolvedEnvs, c.buildcontext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,11 +17,9 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/pkg/util"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/builder/dockerfile/shell"
|
||||
"github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"strings"
|
||||
|
|
@ -33,26 +31,18 @@ type EnvCommand struct {
|
|||
|
||||
func (e *EnvCommand) ExecuteCommand(config *manifest.Schema2Config) error {
|
||||
logrus.Info("cmd: ENV")
|
||||
// The dockerfile/shell package handles processing env values
|
||||
// It handles escape characters and supports expansion from the config.Env array
|
||||
// Shlex handles some of the following use cases (these and more are tested in integration tests)
|
||||
// ""a'b'c"" -> "a'b'c"
|
||||
// "Rex\ The\ Dog \" -> "Rex The Dog"
|
||||
// "a\"b" -> "a"b"
|
||||
envString := envToString(e.cmd)
|
||||
p, err := parser.Parse(bytes.NewReader([]byte(envString)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
shlex := shell.NewLex(p.EscapeToken)
|
||||
newEnvs := e.cmd.Env
|
||||
for index, pair := range newEnvs {
|
||||
expandedValue, err := shlex.ProcessWord(pair.Value, config.Env)
|
||||
expandedKey, err := util.ResolveEnvironmentReplacement(pair.Key, config.Env, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expandedValue, err := util.ResolveEnvironmentReplacement(pair.Value, config.Env, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newEnvs[index] = instructions.KeyValuePair{
|
||||
Key: pair.Key,
|
||||
Key: expandedKey,
|
||||
Value: expandedValue,
|
||||
}
|
||||
logrus.Infof("Setting environment variable %s=%s", pair.Key, expandedValue)
|
||||
|
|
@ -98,14 +88,6 @@ Loop:
|
|||
return nil
|
||||
}
|
||||
|
||||
func envToString(cmd *instructions.EnvCommand) string {
|
||||
env := []string{"ENV"}
|
||||
for _, kvp := range cmd.Env {
|
||||
env = append(env, kvp.Key+"="+kvp.Value)
|
||||
}
|
||||
return strings.Join(env, " ")
|
||||
}
|
||||
|
||||
// We know that no files have changed, so return an empty array
|
||||
func (e *EnvCommand) FilesToSnapshot() []string {
|
||||
return []string{}
|
||||
|
|
|
|||
|
|
@ -53,21 +53,39 @@ func TestUpdateEnvConfig(t *testing.T) {
|
|||
updateConfigEnv(newEnvs, cfg)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, nil, expectedEnvArray, cfg.Env)
|
||||
}
|
||||
func Test_EnvExecute(t *testing.T) {
|
||||
cfg := &manifest.Schema2Config{
|
||||
Env: []string{
|
||||
"path=/usr/",
|
||||
"home=/root",
|
||||
},
|
||||
}
|
||||
|
||||
func TestEnvToString(t *testing.T) {
|
||||
envCmd := &instructions.EnvCommand{
|
||||
Env: []instructions.KeyValuePair{
|
||||
{
|
||||
Key: "PATH",
|
||||
Value: "/some/path",
|
||||
},
|
||||
{
|
||||
Key: "HOME",
|
||||
Value: "/root",
|
||||
envCmd := &EnvCommand{
|
||||
&instructions.EnvCommand{
|
||||
Env: []instructions.KeyValuePair{
|
||||
{
|
||||
Key: "path",
|
||||
Value: "/some/path",
|
||||
},
|
||||
{
|
||||
Key: "HOME",
|
||||
Value: "$home",
|
||||
},
|
||||
{
|
||||
Key: "$path",
|
||||
Value: "$home/",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedString := "ENV PATH=/some/path HOME=/root"
|
||||
actualString := envToString(envCmd)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, nil, expectedString, actualString)
|
||||
|
||||
expectedEnvs := []string{
|
||||
"path=/some/path",
|
||||
"home=/root",
|
||||
"HOME=/root",
|
||||
"/usr/=/root/",
|
||||
}
|
||||
err := envCmd.ExecuteCommand(cfg)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, err, expectedEnvs, cfg.Env)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package commands
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/pkg/util"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -29,25 +30,16 @@ type ExposeCommand struct {
|
|||
}
|
||||
|
||||
func (r *ExposeCommand) ExecuteCommand(config *manifest.Schema2Config) error {
|
||||
return updateExposedPorts(r.cmd.Ports, config)
|
||||
}
|
||||
|
||||
func validProtocol(protocol string) bool {
|
||||
validProtocols := [2]string{"tcp", "udp"}
|
||||
for _, p := range validProtocols {
|
||||
if protocol == p {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func updateExposedPorts(ports []string, config *manifest.Schema2Config) error {
|
||||
logrus.Info("cmd: EXPOSE")
|
||||
// Grab the currently exposed ports
|
||||
existingPorts := config.ExposedPorts
|
||||
|
||||
// Add any new ones in
|
||||
for _, p := range ports {
|
||||
for _, p := range r.cmd.Ports {
|
||||
// Resolve any environment variables
|
||||
p, err := util.ResolveEnvironmentReplacement(p, config.Env, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Add the default protocol if one isn't specified
|
||||
if !strings.Contains(p, "/") {
|
||||
p = p + "/tcp"
|
||||
|
|
@ -64,6 +56,16 @@ func updateExposedPorts(ports []string, config *manifest.Schema2Config) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func validProtocol(protocol string) bool {
|
||||
validProtocols := [2]string{"tcp", "udp"}
|
||||
for _, p := range validProtocols {
|
||||
if protocol == p {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *ExposeCommand) FilesToSnapshot() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ package commands
|
|||
import (
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/testutil"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
|
@ -27,6 +28,10 @@ func TestUpdateExposedPorts(t *testing.T) {
|
|||
ExposedPorts: manifest.Schema2PortSet{
|
||||
"8080/tcp": {},
|
||||
},
|
||||
Env: []string{
|
||||
"port=udp",
|
||||
"num=8085",
|
||||
},
|
||||
}
|
||||
|
||||
ports := []string{
|
||||
|
|
@ -34,6 +39,15 @@ func TestUpdateExposedPorts(t *testing.T) {
|
|||
"8081/tcp",
|
||||
"8082",
|
||||
"8083/udp",
|
||||
"8084/$port",
|
||||
"$num",
|
||||
"$num/$port",
|
||||
}
|
||||
|
||||
exposeCmd := &ExposeCommand{
|
||||
&instructions.ExposeCommand{
|
||||
Ports: ports,
|
||||
},
|
||||
}
|
||||
|
||||
expectedPorts := manifest.Schema2PortSet{
|
||||
|
|
@ -41,9 +55,12 @@ func TestUpdateExposedPorts(t *testing.T) {
|
|||
"8081/tcp": {},
|
||||
"8082/tcp": {},
|
||||
"8083/udp": {},
|
||||
"8084/udp": {},
|
||||
"8085/tcp": {},
|
||||
"8085/udp": {},
|
||||
}
|
||||
|
||||
err := updateExposedPorts(ports, cfg)
|
||||
err := exposeCmd.ExecuteCommand(cfg)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, err, expectedPorts, cfg.ExposedPorts)
|
||||
}
|
||||
|
||||
|
|
@ -56,6 +73,12 @@ func TestInvalidProtocol(t *testing.T) {
|
|||
"80/garbage",
|
||||
}
|
||||
|
||||
err := updateExposedPorts(ports, cfg)
|
||||
exposeCmd := &ExposeCommand{
|
||||
&instructions.ExposeCommand{
|
||||
Ports: ports,
|
||||
},
|
||||
}
|
||||
|
||||
err := exposeCmd.ExecuteCommand(cfg)
|
||||
testutil.CheckErrorAndDeepEqual(t, true, err, nil, nil)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,9 +17,9 @@ limitations under the License.
|
|||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/pkg/util"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/docker/docker/builder/dockerfile/shell"
|
||||
"github.com/sirupsen/logrus"
|
||||
"strings"
|
||||
)
|
||||
|
|
@ -29,6 +29,7 @@ type LabelCommand struct {
|
|||
}
|
||||
|
||||
func (r *LabelCommand) ExecuteCommand(config *manifest.Schema2Config) error {
|
||||
logrus.Info("cmd: LABEL")
|
||||
return updateLabels(r.cmd.Labels, config)
|
||||
}
|
||||
|
||||
|
|
@ -36,9 +37,8 @@ func updateLabels(labels []instructions.KeyValuePair, config *manifest.Schema2Co
|
|||
existingLabels := config.Labels
|
||||
|
||||
// Let's unescape values before setting the label
|
||||
shlex := shell.NewLex('\\')
|
||||
for index, kvp := range labels {
|
||||
unescaped, err := shlex.ProcessWord(kvp.Value, []string{})
|
||||
unescaped, err := util.ResolveEnvironmentReplacement(kvp.Value, []string{}, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,9 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type RunCommand struct {
|
||||
|
|
@ -44,7 +46,28 @@ func (r *RunCommand) ExecuteCommand(config *manifest.Schema2Config) error {
|
|||
logrus.Infof("args: %s", newCommand[1:])
|
||||
|
||||
cmd := exec.Command(newCommand[0], newCommand[1:]...)
|
||||
cmd.Dir = config.WorkingDir
|
||||
cmd.Stdout = os.Stdout
|
||||
// If specified, run the command as a specific user
|
||||
if config.User != "" {
|
||||
userAndGroup := strings.Split(config.User, ":")
|
||||
// uid and gid need to be uint32
|
||||
uid64, err := strconv.ParseUint(userAndGroup[0], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uid := uint32(uid64)
|
||||
var gid uint32
|
||||
if len(userAndGroup) > 1 {
|
||||
gid64, err := strconv.ParseUint(userAndGroup[1], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gid = uint32(gid64)
|
||||
}
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uid, Gid: gid}
|
||||
}
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/pkg/util"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/sirupsen/logrus"
|
||||
"os/user"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type UserCommand struct {
|
||||
cmd *instructions.UserCommand
|
||||
}
|
||||
|
||||
func (r *UserCommand) ExecuteCommand(config *manifest.Schema2Config) error {
|
||||
logrus.Info("cmd: USER")
|
||||
u := r.cmd.User
|
||||
userAndGroup := strings.Split(u, ":")
|
||||
userStr, err := util.ResolveEnvironmentReplacement(userAndGroup[0], config.Env, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var groupStr string
|
||||
if len(userAndGroup) > 1 {
|
||||
groupStr, err = util.ResolveEnvironmentReplacement(userAndGroup[1], config.Env, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup by username
|
||||
userObj, err := user.Lookup(userStr)
|
||||
if err != nil {
|
||||
if _, ok := err.(user.UnknownUserError); ok {
|
||||
// Lookup by id
|
||||
userObj, err = user.LookupId(userStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Same dance with groups
|
||||
var group *user.Group
|
||||
if groupStr != "" {
|
||||
group, err = user.LookupGroup(groupStr)
|
||||
if err != nil {
|
||||
if _, ok := err.(user.UnknownGroupError); ok {
|
||||
group, err = user.LookupGroupId(groupStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uid := userObj.Uid
|
||||
if group != nil {
|
||||
uid = uid + ":" + group.Gid
|
||||
}
|
||||
|
||||
logrus.Infof("Setting user to %s", uid)
|
||||
config.User = uid
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *UserCommand) FilesToSnapshot() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (r *UserCommand) CreatedBy() string {
|
||||
s := []string{r.cmd.Name(), r.cmd.User}
|
||||
return strings.Join(s, " ")
|
||||
}
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/testutil"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var userTests = []struct {
|
||||
user string
|
||||
expectedUid string
|
||||
shouldError bool
|
||||
}{
|
||||
{
|
||||
user: "root",
|
||||
expectedUid: "0",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "0",
|
||||
expectedUid: "0",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "fakeUser",
|
||||
expectedUid: "",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
user: "root:root",
|
||||
expectedUid: "0:0",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "0:root",
|
||||
expectedUid: "0:0",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "root:0",
|
||||
expectedUid: "0:0",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "0:0",
|
||||
expectedUid: "0:0",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "root:fakeGroup",
|
||||
expectedUid: "",
|
||||
shouldError: true,
|
||||
},
|
||||
{
|
||||
user: "$envuser",
|
||||
expectedUid: "0",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
user: "root:$envgroup",
|
||||
expectedUid: "0:0",
|
||||
shouldError: false,
|
||||
},
|
||||
}
|
||||
|
||||
func TestUpdateUser(t *testing.T) {
|
||||
for _, test := range userTests {
|
||||
cfg := &manifest.Schema2Config{
|
||||
Env: []string{
|
||||
"envuser=root",
|
||||
"envgroup=root",
|
||||
},
|
||||
}
|
||||
cmd := UserCommand{
|
||||
&instructions.UserCommand{
|
||||
User: test.user,
|
||||
},
|
||||
}
|
||||
err := cmd.ExecuteCommand(cfg)
|
||||
testutil.CheckErrorAndDeepEqual(t, test.shouldError, err, test.expectedUid, cfg.User)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/pkg/util"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type WorkdirCommand struct {
|
||||
cmd *instructions.WorkdirCommand
|
||||
snapshotFiles []string
|
||||
}
|
||||
|
||||
func (w *WorkdirCommand) ExecuteCommand(config *manifest.Schema2Config) error {
|
||||
logrus.Info("cmd: workdir")
|
||||
workdirPath := w.cmd.Path
|
||||
resolvedWorkingDir, err := util.ResolveEnvironmentReplacement(workdirPath, config.Env, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if filepath.IsAbs(resolvedWorkingDir) {
|
||||
config.WorkingDir = resolvedWorkingDir
|
||||
} else {
|
||||
config.WorkingDir = filepath.Join(config.WorkingDir, resolvedWorkingDir)
|
||||
}
|
||||
logrus.Infof("Changed working directory to %s", config.WorkingDir)
|
||||
w.snapshotFiles = []string{config.WorkingDir}
|
||||
return os.MkdirAll(config.WorkingDir, 0755)
|
||||
}
|
||||
|
||||
// FilesToSnapshot returns the workingdir, which should have been created if it didn't already exist
|
||||
func (w *WorkdirCommand) FilesToSnapshot() []string {
|
||||
return w.snapshotFiles
|
||||
}
|
||||
|
||||
// CreatedBy returns some information about the command for the image config history
|
||||
func (w *WorkdirCommand) CreatedBy() string {
|
||||
return w.cmd.Name() + " " + w.cmd.Path
|
||||
}
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package commands
|
||||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/testutil"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Each test here changes the same WorkingDir field in the config
|
||||
// So, some of the tests build off of each other
|
||||
// This is needed to make sure WorkingDir handles paths correctly
|
||||
// For example, if WORKDIR specifies a non-absolute path, it should be appended to the current WORKDIR
|
||||
var workdirTests = []struct {
|
||||
path string
|
||||
expectedPath string
|
||||
}{
|
||||
{
|
||||
path: "/a",
|
||||
expectedPath: "/a",
|
||||
},
|
||||
{
|
||||
path: "b",
|
||||
expectedPath: "/a/b",
|
||||
},
|
||||
{
|
||||
path: "c",
|
||||
expectedPath: "/a/b/c",
|
||||
},
|
||||
{
|
||||
path: "/d",
|
||||
expectedPath: "/d",
|
||||
},
|
||||
{
|
||||
path: "$path",
|
||||
expectedPath: "/d/usr",
|
||||
},
|
||||
{
|
||||
path: "$home",
|
||||
expectedPath: "/root",
|
||||
},
|
||||
{
|
||||
path: "$path/$home",
|
||||
expectedPath: "/root/usr/root",
|
||||
},
|
||||
}
|
||||
|
||||
func TestWorkdirCommand(t *testing.T) {
|
||||
|
||||
cfg := &manifest.Schema2Config{
|
||||
WorkingDir: "/",
|
||||
Env: []string{
|
||||
"path=usr/",
|
||||
"home=/root",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range workdirTests {
|
||||
cmd := WorkdirCommand{
|
||||
cmd: &instructions.WorkdirCommand{
|
||||
Path: test.path,
|
||||
},
|
||||
snapshotFiles: []string{},
|
||||
}
|
||||
cmd.ExecuteCommand(cfg)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, nil, test.expectedPath, cfg.WorkingDir)
|
||||
}
|
||||
}
|
||||
|
|
@ -26,7 +26,17 @@ const (
|
|||
// WorkspaceDir is the path to the workspace directory
|
||||
WorkspaceDir = "/workspace"
|
||||
|
||||
//KbuildDir is the path to the kbuild directory
|
||||
KbuildDir = "/kbuild"
|
||||
|
||||
WhitelistPath = "/proc/self/mountinfo"
|
||||
|
||||
Author = "kbuild"
|
||||
|
||||
// ContextTar is the default name of the tar uploaded to GCS buckets
|
||||
ContextTar = "context.tar.gz"
|
||||
|
||||
// BuildContextDir is the directory a build context will be unpacked into,
|
||||
// for example, a tarball from a GCS bucket will be unpacked here
|
||||
BuildContextDir = "/kbuild/buildcontext/"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/storage"
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/pkg/constants"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// UnpackTarFromGCSBucket unpacks the kbuild.tar file in the given bucket to the given directory
|
||||
func UnpackTarFromGCSBucket(bucketName, directory string) error {
|
||||
// Get the tar from the bucket
|
||||
tarPath, err := getTarFromBucket(bucketName, directory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debug("Unpacking source context tar...")
|
||||
if err := UnpackCompressedTar(tarPath, directory); err != nil {
|
||||
return err
|
||||
}
|
||||
// Remove the tar so it doesn't interfere with subsequent commands
|
||||
logrus.Debugf("Deleting %s", tarPath)
|
||||
return os.Remove(tarPath)
|
||||
}
|
||||
|
||||
// getTarFromBucket gets kbuild.tar from the GCS bucket and saves it to the filesystem
|
||||
// It returns the path to the tar file
|
||||
func getTarFromBucket(bucketName, directory string) (string, error) {
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
bucket := client.Bucket(bucketName)
|
||||
// Get the tarfile kbuild.tar from the GCS bucket, and save it to a tar object
|
||||
reader, err := bucket.Object(constants.ContextTar).NewReader(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer reader.Close()
|
||||
tarPath := filepath.Join(directory, constants.ContextTar)
|
||||
if err := CreateFile(tarPath, reader, 0600); err != nil {
|
||||
return "", err
|
||||
}
|
||||
logrus.Debugf("Copied tarball %s from GCS bucket %s to %s", constants.ContextTar, bucketName, tarPath)
|
||||
return tarPath, nil
|
||||
}
|
||||
|
|
@ -18,13 +18,60 @@ package util
|
|||
|
||||
import (
|
||||
"github.com/docker/docker/builder/dockerfile/instructions"
|
||||
"github.com/docker/docker/builder/dockerfile/parser"
|
||||
"github.com/docker/docker/builder/dockerfile/shell"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ResolveEnvironmentReplacement resolves a list of values by calling resolveEnvironmentReplacement
|
||||
func ResolveEnvironmentReplacementList(values, envs []string, isFilepath bool) ([]string, error) {
|
||||
var resolvedValues []string
|
||||
for _, value := range values {
|
||||
if IsSrcRemoteFileURL(value) {
|
||||
resolvedValues = append(resolvedValues, value)
|
||||
continue
|
||||
}
|
||||
resolved, err := ResolveEnvironmentReplacement(value, envs, isFilepath)
|
||||
logrus.Debugf("Resolved %s to %s", value, resolved)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resolvedValues = append(resolvedValues, resolved)
|
||||
}
|
||||
return resolvedValues, nil
|
||||
}
|
||||
|
||||
// ResolveEnvironmentReplacement resolves replacing env variables in some text from envs
|
||||
// It takes in a string representation of the command, the value to be resolved, and a list of envs (config.Env)
|
||||
// Ex: fp = $foo/newdir, envs = [foo=/foodir], then this should return /foodir/newdir
|
||||
// The dockerfile/shell package handles processing env values
|
||||
// It handles escape characters and supports expansion from the config.Env array
|
||||
// Shlex handles some of the following use cases (these and more are tested in integration tests)
|
||||
// ""a'b'c"" -> "a'b'c"
|
||||
// "Rex\ The\ Dog \" -> "Rex The Dog"
|
||||
// "a\"b" -> "a"b"
|
||||
func ResolveEnvironmentReplacement(value string, envs []string, isFilepath bool) (string, error) {
|
||||
shlex := shell.NewLex(parser.DefaultEscapeToken)
|
||||
fp, err := shlex.ProcessWord(value, envs)
|
||||
if !isFilepath {
|
||||
return fp, err
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
fp = filepath.Clean(fp)
|
||||
if IsDestDir(value) {
|
||||
fp = fp + "/"
|
||||
}
|
||||
return fp, nil
|
||||
}
|
||||
|
||||
// ContainsWildcards returns true if any entry in paths contains wildcards
|
||||
func ContainsWildcards(paths []string) bool {
|
||||
for _, path := range paths {
|
||||
|
|
@ -65,13 +112,17 @@ func ResolveSources(srcsAndDest instructions.SourcesAndDest, root string) (map[s
|
|||
func matchSources(srcs, files []string) ([]string, error) {
|
||||
var matchedSources []string
|
||||
for _, src := range srcs {
|
||||
if IsSrcRemoteFileURL(src) {
|
||||
matchedSources = append(matchedSources, src)
|
||||
continue
|
||||
}
|
||||
src = filepath.Clean(src)
|
||||
for _, file := range files {
|
||||
matched, err := filepath.Match(src, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if matched {
|
||||
if matched || src == file {
|
||||
matchedSources = append(matchedSources, file)
|
||||
}
|
||||
}
|
||||
|
|
@ -119,10 +170,31 @@ func DestinationFilepath(filename, srcName, dest, cwd, buildcontext string) (str
|
|||
return filepath.Join(cwd, dest), nil
|
||||
}
|
||||
|
||||
// URLDestinationFilepath gives the destination a file from a remote URL should be saved to
|
||||
func URLDestinationFilepath(rawurl, dest, cwd string) string {
|
||||
if !IsDestDir(dest) {
|
||||
if !filepath.IsAbs(dest) {
|
||||
return filepath.Join(cwd, dest)
|
||||
}
|
||||
return dest
|
||||
}
|
||||
urlBase := filepath.Base(rawurl)
|
||||
destPath := filepath.Join(dest, urlBase)
|
||||
|
||||
if !filepath.IsAbs(dest) {
|
||||
destPath = filepath.Join(cwd, destPath)
|
||||
}
|
||||
return destPath
|
||||
}
|
||||
|
||||
// SourcesToFilesMap returns a map of [src]:[files rooted at source]
|
||||
func SourcesToFilesMap(srcs []string, root string) (map[string][]string, error) {
|
||||
srcMap := make(map[string][]string)
|
||||
for _, src := range srcs {
|
||||
if IsSrcRemoteFileURL(src) {
|
||||
srcMap[src] = []string{src}
|
||||
continue
|
||||
}
|
||||
src = filepath.Clean(src)
|
||||
files, err := RelativeFiles(src, root)
|
||||
if err != nil {
|
||||
|
|
@ -161,3 +233,15 @@ func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, srcMap map[string][]st
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsSrcRemoteFileURL(rawurl string) bool {
|
||||
_, err := url.ParseRequestURI(rawurl)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
_, err = http.Get(rawurl)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,6 +22,90 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
var testUrl = "https://github.com/GoogleCloudPlatform/runtimes-common/blob/master/LICENSE"
|
||||
|
||||
var testEnvReplacement = []struct {
|
||||
path string
|
||||
command string
|
||||
envs []string
|
||||
isFilepath bool
|
||||
expectedPath string
|
||||
}{
|
||||
{
|
||||
path: "/simple/path",
|
||||
command: "WORKDIR /simple/path",
|
||||
envs: []string{
|
||||
"simple=/path/",
|
||||
},
|
||||
isFilepath: true,
|
||||
expectedPath: "/simple/path",
|
||||
},
|
||||
{
|
||||
path: "/simple/path/",
|
||||
command: "WORKDIR /simple/path/",
|
||||
envs: []string{
|
||||
"simple=/path/",
|
||||
},
|
||||
isFilepath: true,
|
||||
expectedPath: "/simple/path/",
|
||||
},
|
||||
{
|
||||
path: "${a}/b",
|
||||
command: "WORKDIR ${a}/b",
|
||||
envs: []string{
|
||||
"a=/path/",
|
||||
"b=/path2/",
|
||||
},
|
||||
isFilepath: true,
|
||||
expectedPath: "/path/b",
|
||||
},
|
||||
{
|
||||
path: "/$a/b",
|
||||
command: "COPY ${a}/b /c/",
|
||||
envs: []string{
|
||||
"a=/path/",
|
||||
"b=/path2/",
|
||||
},
|
||||
isFilepath: true,
|
||||
expectedPath: "/path/b",
|
||||
},
|
||||
{
|
||||
path: "/$a/b/",
|
||||
command: "COPY /${a}/b /c/",
|
||||
envs: []string{
|
||||
"a=/path/",
|
||||
"b=/path2/",
|
||||
},
|
||||
isFilepath: true,
|
||||
expectedPath: "/path/b/",
|
||||
},
|
||||
{
|
||||
path: "\\$foo",
|
||||
command: "COPY \\$foo /quux",
|
||||
envs: []string{
|
||||
"foo=/path/",
|
||||
},
|
||||
isFilepath: true,
|
||||
expectedPath: "$foo",
|
||||
},
|
||||
{
|
||||
path: "8080/$protocol",
|
||||
command: "EXPOSE 8080/$protocol",
|
||||
envs: []string{
|
||||
"protocol=udp",
|
||||
},
|
||||
expectedPath: "8080/udp",
|
||||
},
|
||||
}
|
||||
|
||||
func Test_EnvReplacement(t *testing.T) {
|
||||
for _, test := range testEnvReplacement {
|
||||
actualPath, err := ResolveEnvironmentReplacement(test.path, test.envs, test.isFilepath)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedPath, actualPath)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
var buildContextPath = "../../integration_tests/"
|
||||
|
||||
var destinationFilepathTests = []struct {
|
||||
|
|
@ -125,6 +209,39 @@ func Test_DestinationFilepath(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
var urlDestFilepathTests = []struct {
|
||||
url string
|
||||
cwd string
|
||||
dest string
|
||||
expectedDest string
|
||||
}{
|
||||
{
|
||||
url: "https://something/something",
|
||||
cwd: "/test",
|
||||
dest: ".",
|
||||
expectedDest: "/test/something",
|
||||
},
|
||||
{
|
||||
url: "https://something/something",
|
||||
cwd: "/cwd",
|
||||
dest: "/test",
|
||||
expectedDest: "/test",
|
||||
},
|
||||
{
|
||||
url: "https://something/something",
|
||||
cwd: "/test",
|
||||
dest: "/dest/",
|
||||
expectedDest: "/dest/something",
|
||||
},
|
||||
}
|
||||
|
||||
func Test_UrlDestFilepath(t *testing.T) {
|
||||
for _, test := range urlDestFilepathTests {
|
||||
actualDest := URLDestinationFilepath(test.url, test.dest, test.cwd)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, nil, test.expectedDest, actualDest)
|
||||
}
|
||||
}
|
||||
|
||||
var matchSourcesTests = []struct {
|
||||
srcs []string
|
||||
files []string
|
||||
|
|
@ -133,6 +250,7 @@ var matchSourcesTests = []struct {
|
|||
{
|
||||
srcs: []string{
|
||||
"pkg/*",
|
||||
testUrl,
|
||||
},
|
||||
files: []string{
|
||||
"pkg/a",
|
||||
|
|
@ -144,6 +262,7 @@ var matchSourcesTests = []struct {
|
|||
expectedFiles: []string{
|
||||
"pkg/a",
|
||||
"pkg/b",
|
||||
testUrl,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -298,6 +417,7 @@ var testResolveSources = []struct {
|
|||
srcsAndDest: []string{
|
||||
"context/foo",
|
||||
"context/b*",
|
||||
testUrl,
|
||||
"dest/",
|
||||
},
|
||||
expectedMap: map[string][]string{
|
||||
|
|
@ -311,6 +431,9 @@ var testResolveSources = []struct {
|
|||
"context/bar/bat",
|
||||
"context/bar/baz",
|
||||
},
|
||||
testUrl: {
|
||||
testUrl,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -321,3 +444,29 @@ func Test_ResolveSources(t *testing.T) {
|
|||
testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedMap, actualMap)
|
||||
}
|
||||
}
|
||||
|
||||
var testRemoteUrls = []struct {
|
||||
url string
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
url: testUrl,
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
url: "not/real/",
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
url: "https://url.com/something/not/real",
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
func Test_RemoteUrls(t *testing.T) {
|
||||
for _, test := range testRemoteUrls {
|
||||
valid := IsSrcRemoteFileURL(test.url)
|
||||
testutil.CheckErrorAndDeepEqual(t, false, nil, test.valid, valid)
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,9 +23,11 @@ import (
|
|||
"github.com/containers/image/docker"
|
||||
"github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var whitelist = []string{"/kbuild"}
|
||||
|
|
@ -117,6 +119,17 @@ func RelativeFiles(fp string, root string) ([]string, error) {
|
|||
return files, err
|
||||
}
|
||||
|
||||
// Files returns a list of all files rooted at root
|
||||
func Files(root string) ([]string, error) {
|
||||
var files []string
|
||||
logrus.Debugf("Getting files and contents at root %s", root)
|
||||
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
files = append(files, path)
|
||||
return err
|
||||
})
|
||||
return files, err
|
||||
}
|
||||
|
||||
// FilepathExists returns true if the path exists
|
||||
func FilepathExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
|
|
@ -142,3 +155,27 @@ func CreateFile(path string, reader io.Reader, perm os.FileMode) error {
|
|||
}
|
||||
return dest.Chmod(perm)
|
||||
}
|
||||
|
||||
// DownloadFileToDest downloads the file at rawurl to the given dest for the ADD command
|
||||
// From add command docs:
|
||||
// 1. If <src> is a remote file URL:
|
||||
// - destination will have permissions of 0600
|
||||
// - If remote file has HTTP Last-Modified header, we set the mtime of the file to that timestamp
|
||||
func DownloadFileToDest(rawurl, dest string) error {
|
||||
resp, err := http.Get(rawurl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if err := CreateFile(dest, resp.Body, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
mTime := time.Time{}
|
||||
lastMod := resp.Header.Get("Last-Modified")
|
||||
if lastMod != "" {
|
||||
if parsedMTime, err := http.ParseTime(lastMod); err == nil {
|
||||
mTime = parsedMTime
|
||||
}
|
||||
}
|
||||
return os.Chtimes(dest, mTime, mTime)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,8 +18,14 @@ package util
|
|||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
pkgutil "github.com/GoogleCloudPlatform/container-diff/pkg/util"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
|
@ -86,3 +92,93 @@ func checkHardlink(p string, i os.FileInfo) (bool, string) {
|
|||
}
|
||||
return hardlink, linkDst
|
||||
}
|
||||
|
||||
// UnpackLocalTarArchive unpacks the tar archive at path to the directory dest
|
||||
// Returns true if the path was acutally unpacked
|
||||
func UnpackLocalTarArchive(path, dest string) error {
|
||||
// First, we need to check if the path is a local tar archive
|
||||
if compressed, compressionLevel := fileIsCompressedTar(path); compressed {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
if compressionLevel == archive.Gzip {
|
||||
return UnpackCompressedTar(path, dest)
|
||||
} else if compressionLevel == archive.Bzip2 {
|
||||
bzr := bzip2.NewReader(file)
|
||||
return pkgutil.UnTar(bzr, dest, nil)
|
||||
}
|
||||
}
|
||||
if fileIsUncompressedTar(path) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
return pkgutil.UnTar(file, dest, nil)
|
||||
}
|
||||
return errors.New("path does not lead to local tar archive")
|
||||
}
|
||||
|
||||
//IsFileLocalTarArchive returns true if the file is a local tar archive
|
||||
func IsFileLocalTarArchive(src string) bool {
|
||||
compressed, _ := fileIsCompressedTar(src)
|
||||
uncompressed := fileIsUncompressedTar(src)
|
||||
return compressed || uncompressed
|
||||
}
|
||||
|
||||
func fileIsCompressedTar(src string) (bool, archive.Compression) {
|
||||
r, err := os.Open(src)
|
||||
if err != nil {
|
||||
return false, -1
|
||||
}
|
||||
defer r.Close()
|
||||
buf, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return false, -1
|
||||
}
|
||||
compressionLevel := archive.DetectCompression(buf)
|
||||
return (compressionLevel > 0), compressionLevel
|
||||
}
|
||||
|
||||
func fileIsUncompressedTar(src string) bool {
|
||||
r, err := os.Open(src)
|
||||
defer r.Close()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fi, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if fi.Size() == 0 {
|
||||
return false
|
||||
}
|
||||
tr := tar.NewReader(r)
|
||||
if tr == nil {
|
||||
return false
|
||||
}
|
||||
for {
|
||||
_, err := tr.Next()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// UnpackCompressedTar unpacks the compressed tar at path to dir
|
||||
func UnpackCompressedTar(path, dir string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
gzr, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer gzr.Close()
|
||||
return pkgutil.UnTar(gzr, dir, nil)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
Copyright 2018 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"github.com/GoogleCloudPlatform/k8s-container-builder/testutil"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var regularFiles = []string{"file", "file.tar", "file.tar.gz"}
|
||||
var uncompressedTars = []string{"uncompressed", "uncompressed.tar"}
|
||||
var compressedTars = []string{"compressed", "compressed.tar.gz"}
|
||||
|
||||
func Test_IsLocalTarArchive(t *testing.T) {
|
||||
testDir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("err setting up temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(testDir)
|
||||
if err := setUpFilesAndTars(testDir); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test we get the correct result for regular files
|
||||
for _, regularFile := range regularFiles {
|
||||
isTarArchive := IsFileLocalTarArchive(filepath.Join(testDir, regularFile))
|
||||
testutil.CheckErrorAndDeepEqual(t, false, nil, false, isTarArchive)
|
||||
}
|
||||
// Test we get the correct result for uncompressed tars
|
||||
for _, uncompressedTar := range uncompressedTars {
|
||||
isTarArchive := IsFileLocalTarArchive(filepath.Join(testDir, uncompressedTar))
|
||||
testutil.CheckErrorAndDeepEqual(t, false, nil, true, isTarArchive)
|
||||
}
|
||||
// Test we get the correct result for compressed tars
|
||||
for _, compressedTar := range compressedTars {
|
||||
isTarArchive := IsFileLocalTarArchive(filepath.Join(testDir, compressedTar))
|
||||
testutil.CheckErrorAndDeepEqual(t, false, nil, true, isTarArchive)
|
||||
}
|
||||
}
|
||||
|
||||
func setUpFilesAndTars(testDir string) error {
|
||||
regularFilesAndContents := map[string]string{
|
||||
regularFiles[0]: "",
|
||||
regularFiles[1]: "something",
|
||||
regularFiles[2]: "here",
|
||||
}
|
||||
if err := testutil.SetupFiles(testDir, regularFilesAndContents); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, uncompressedTar := range uncompressedTars {
|
||||
tarFile, err := os.Create(filepath.Join(testDir, uncompressedTar))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := createTar(testDir, tarFile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, compressedTar := range compressedTars {
|
||||
tarFile, err := os.Create(filepath.Join(testDir, compressedTar))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gzr := gzip.NewWriter(tarFile)
|
||||
if err := createTar(testDir, gzr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTar(testdir string, writer io.Writer) error {
|
||||
|
||||
w := tar.NewWriter(writer)
|
||||
defer w.Close()
|
||||
for _, regFile := range regularFiles {
|
||||
filePath := filepath.Join(testdir, regFile)
|
||||
fi, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := AddToTar(filePath, fi, w); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
# Copyright 2018 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
if [ $# -ne 2 ];
|
||||
then echo "Usage: run_in_docker.sh <context directory> <image tag>"
|
||||
fi
|
||||
|
||||
|
||||
context=$1
|
||||
tag=$2
|
||||
|
||||
if [[ ! -e $HOME/.config/gcloud/application_default_credentials.json ]]; then
|
||||
echo "Application Default Credentials do not exist. Run [gcloud auth application-default login] to configure them"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker run \
|
||||
-v $HOME/.config/gcloud:/root/.config/gcloud \
|
||||
-v ${context}:/workspace \
|
||||
gcr.io/kbuild-project/executor:latest \
|
||||
/kbuild/executor -d ${tag}
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
# This is the official list of cloud authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
Filippo Valsorda <hi@filippo.io>
|
||||
Google Inc.
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
Palm Stone Games, Inc.
|
||||
Paweł Knap <pawelknap88@gmail.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
# People who have agreed to one of the CLAs and can contribute patches.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# https://developers.google.com/open-source/cla/individual
|
||||
# https://developers.google.com/open-source/cla/corporate
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name <email address>
|
||||
|
||||
# Keep the list alphabetically sorted.
|
||||
|
||||
Alexis Hunt <lexer@google.com>
|
||||
Andreas Litt <andreas.litt@gmail.com>
|
||||
Andrew Gerrand <adg@golang.org>
|
||||
Brad Fitzpatrick <bradfitz@golang.org>
|
||||
Burcu Dogan <jbd@google.com>
|
||||
Dave Day <djd@golang.org>
|
||||
David Sansome <me@davidsansome.com>
|
||||
David Symonds <dsymonds@golang.org>
|
||||
Filippo Valsorda <hi@filippo.io>
|
||||
Glenn Lewis <gmlewis@google.com>
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Jonathan Amsterdam <jba@google.com>
|
||||
Kunpei Sakai <namusyaka@gmail.com>
|
||||
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||
Magnus Hiie <magnus.hiie@gmail.com>
|
||||
Mario Castro <mariocaster@gmail.com>
|
||||
Michael McGreevy <mcgreevy@golang.org>
|
||||
Omar Jarjur <ojarjur@google.com>
|
||||
Paweł Knap <pawelknap88@gmail.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Sarah Adams <shadams@google.com>
|
||||
Thanatat Tamtan <acoshift@gmail.com>
|
||||
Toby Burress <kurin@google.com>
|
||||
Tuo Shan <shantuo@google.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
||||
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,437 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package metadata provides access to Google Compute Engine (GCE)
|
||||
// metadata and API service accounts.
|
||||
//
|
||||
// This package is a wrapper around the GCE metadata service,
|
||||
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
const (
|
||||
// metadataIP is the documented metadata server IP address.
|
||||
metadataIP = "169.254.169.254"
|
||||
|
||||
// metadataHostEnv is the environment variable specifying the
|
||||
// GCE metadata hostname. If empty, the default value of
|
||||
// metadataIP ("169.254.169.254") is used instead.
|
||||
// This is variable name is not defined by any spec, as far as
|
||||
// I know; it was made up for the Go package.
|
||||
metadataHostEnv = "GCE_METADATA_HOST"
|
||||
|
||||
userAgent = "gcloud-golang/0.1"
|
||||
)
|
||||
|
||||
type cachedValue struct {
|
||||
k string
|
||||
trim bool
|
||||
mu sync.Mutex
|
||||
v string
|
||||
}
|
||||
|
||||
var (
|
||||
projID = &cachedValue{k: "project/project-id", trim: true}
|
||||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
||||
instID = &cachedValue{k: "instance/id", trim: true}
|
||||
)
|
||||
|
||||
var (
|
||||
metaClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 2 * time.Second,
|
||||
},
|
||||
}
|
||||
subscribeClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// NotDefinedError is returned when requested metadata is not defined.
|
||||
//
|
||||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
||||
//
|
||||
// This error is not returned if the value is defined to be the empty
|
||||
// string.
|
||||
type NotDefinedError string
|
||||
|
||||
func (suffix NotDefinedError) Error() string {
|
||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||
}
|
||||
|
||||
// Get returns a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
//
|
||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||
// 169.254.169.254 will be used instead.
|
||||
//
|
||||
// If the requested metadata is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
func Get(suffix string) (string, error) {
|
||||
val, _, err := getETag(metaClient, suffix)
|
||||
return val, err
|
||||
}
|
||||
|
||||
// getETag returns a value from the metadata service as well as the associated
|
||||
// ETag using the provided client. This func is otherwise equivalent to Get.
|
||||
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||
// requests shall go.
|
||||
host := os.Getenv(metadataHostEnv)
|
||||
if host == "" {
|
||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||
// binaries built with the "netgo" tag and without cgo won't
|
||||
// know the search suffix for "metadata" is
|
||||
// ".google.internal", and this IP address is documented as
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
func getTrimmed(suffix string) (s string, err error) {
|
||||
s, err = Get(suffix)
|
||||
s = strings.TrimSpace(s)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cachedValue) get() (v string, err error) {
|
||||
defer c.mu.Unlock()
|
||||
c.mu.Lock()
|
||||
if c.v != "" {
|
||||
return c.v, nil
|
||||
}
|
||||
if c.trim {
|
||||
v, err = getTrimmed(c.k)
|
||||
} else {
|
||||
v, err = Get(c.k)
|
||||
}
|
||||
if err == nil {
|
||||
c.v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
onGCEOnce sync.Once
|
||||
onGCE bool
|
||||
)
|
||||
|
||||
// OnGCE reports whether this process is running on Google Compute Engine.
|
||||
func OnGCE() bool {
|
||||
onGCEOnce.Do(initOnGCE)
|
||||
return onGCE
|
||||
}
|
||||
|
||||
func initOnGCE() {
|
||||
onGCE = testOnGCE()
|
||||
}
|
||||
|
||||
func testOnGCE() bool {
|
||||
// The user explicitly said they're on GCE, so trust them.
|
||||
if os.Getenv(metadataHostEnv) != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
resc := make(chan bool, 2)
|
||||
|
||||
// Try two strategies in parallel.
|
||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
||||
go func() {
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := ctxhttp.Do(ctx, metaClient, req)
|
||||
if err != nil {
|
||||
resc <- false
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
||||
}()
|
||||
|
||||
go func() {
|
||||
addrs, err := net.LookupHost("metadata.google.internal")
|
||||
if err != nil || len(addrs) == 0 {
|
||||
resc <- false
|
||||
return
|
||||
}
|
||||
resc <- strsContains(addrs, metadataIP)
|
||||
}()
|
||||
|
||||
tryHarder := systemInfoSuggestsGCE()
|
||||
if tryHarder {
|
||||
res := <-resc
|
||||
if res {
|
||||
// The first strategy succeeded, so let's use it.
|
||||
return true
|
||||
}
|
||||
// Wait for either the DNS or metadata server probe to
|
||||
// contradict the other one and say we are running on
|
||||
// GCE. Give it a lot of time to do so, since the system
|
||||
// info already suggests we're running on a GCE BIOS.
|
||||
timer := time.NewTimer(5 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case res = <-resc:
|
||||
return res
|
||||
case <-timer.C:
|
||||
// Too slow. Who knows what this system is.
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// There's no hint from the system info that we're running on
|
||||
// GCE, so use the first probe's result as truth, whether it's
|
||||
// true or false. The goal here is to optimize for speed for
|
||||
// users who are NOT running on GCE. We can't assume that
|
||||
// either a DNS lookup or an HTTP request to a blackholed IP
|
||||
// address is fast. Worst case this should return when the
|
||||
// metaClient's Transport.ResponseHeaderTimeout or
|
||||
// Transport.Dial.Timeout fires (in two seconds).
|
||||
return <-resc
|
||||
}
|
||||
|
||||
// systemInfoSuggestsGCE reports whether the local system (without
|
||||
// doing network requests) suggests that we're running on GCE. If this
|
||||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
||||
// server.
|
||||
func systemInfoSuggestsGCE() bool {
|
||||
if runtime.GOOS != "linux" {
|
||||
// We don't have any non-Linux clues available, at least yet.
|
||||
return false
|
||||
}
|
||||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
||||
name := strings.TrimSpace(string(slurp))
|
||||
return name == "Google" || name == "Google Compute Engine"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
// The suffix may contain query parameters.
|
||||
//
|
||||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
||||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||
// is deleted. Subscribe returns the error value returned from the last call to
|
||||
// fn, which may be nil when ok == false.
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
const failedSubscribeSleep = time.Second * 5
|
||||
|
||||
// First check to see if the metadata value exists at all.
|
||||
val, lastETag, err := getETag(subscribeClient, suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fn(val, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok := true
|
||||
if strings.ContainsRune(suffix, '?') {
|
||||
suffix += "&wait_for_change=true&last_etag="
|
||||
} else {
|
||||
suffix += "?wait_for_change=true&last_etag="
|
||||
}
|
||||
for {
|
||||
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
|
||||
if err != nil {
|
||||
if _, deleted := err.(NotDefinedError); !deleted {
|
||||
time.Sleep(failedSubscribeSleep)
|
||||
continue // Retry on other errors.
|
||||
}
|
||||
ok = false
|
||||
}
|
||||
lastETag = etag
|
||||
|
||||
if err := fn(val, ok); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func ProjectID() (string, error) { return projID.get() }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func NumericProjectID() (string, error) { return projNum.get() }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func InternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/ip")
|
||||
}
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func ExternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
}
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func Hostname() (string, error) {
|
||||
return getTrimmed("instance/hostname")
|
||||
}
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func InstanceTags() ([]string, error) {
|
||||
var s []string
|
||||
j, err := Get("instance/tags")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func InstanceID() (string, error) {
|
||||
return instID.get()
|
||||
}
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func InstanceName() (string, error) {
|
||||
host, err := Hostname()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Split(host, ".")[0], nil
|
||||
}
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func Zone() (string, error) {
|
||||
zone, err := getTrimmed("instance/zone")
|
||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||
}
|
||||
|
||||
// InstanceAttributes returns the list of user-defined attributes,
|
||||
// assigned when initially creating a GCE VM instance. The value of an
|
||||
// attribute can be obtained with InstanceAttributeValue.
|
||||
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
|
||||
|
||||
// ProjectAttributes returns the list of user-defined attributes
|
||||
// applying to the project as a whole, not just this VM. The value of
|
||||
// an attribute can be obtained with ProjectAttributeValue.
|
||||
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
|
||||
|
||||
func lines(suffix string) ([]string, error) {
|
||||
j, err := Get(suffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||
for i := range s {
|
||||
s[i] = strings.TrimSpace(s[i])
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceAttributeValue returns the value of the provided VM
|
||||
// instance attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func InstanceAttributeValue(attr string) (string, error) {
|
||||
return Get("instance/attributes/" + attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue returns the value of the provided
|
||||
// project attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func ProjectAttributeValue(attr string) (string, error) {
|
||||
return Get("project/attributes/" + attr)
|
||||
}
|
||||
|
||||
// Scopes returns the service account scopes for the given account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func Scopes(serviceAccount string) ([]string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||
}
|
||||
|
||||
func strsContains(ss []string, s string) bool {
|
||||
for _, v := range ss {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
@ -0,0 +1,284 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package iam supports the resource-specific operations of Google Cloud
|
||||
// IAM (Identity and Access Management) for the Google Cloud Libraries.
|
||||
// See https://cloud.google.com/iam for more about IAM.
|
||||
//
|
||||
// Users of the Google Cloud Libraries will typically not use this package
|
||||
// directly. Instead they will begin with some resource that supports IAM, like
|
||||
// a pubsub topic, and call its IAM method to get a Handle for that resource.
|
||||
package iam
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
pb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
// client abstracts the IAMPolicy API to allow multiple implementations.
|
||||
type client interface {
|
||||
Get(ctx context.Context, resource string) (*pb.Policy, error)
|
||||
Set(ctx context.Context, resource string, p *pb.Policy) error
|
||||
Test(ctx context.Context, resource string, perms []string) ([]string, error)
|
||||
}
|
||||
|
||||
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
|
||||
type grpcClient struct {
|
||||
c pb.IAMPolicyClient
|
||||
}
|
||||
|
||||
var withRetry = gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60 * time.Second,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
})
|
||||
|
||||
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
|
||||
var proto *pb.Policy
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||
var err error
|
||||
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
|
||||
return err
|
||||
}, withRetry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto, nil
|
||||
}
|
||||
|
||||
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
|
||||
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
||||
Resource: resource,
|
||||
Policy: p,
|
||||
})
|
||||
return err
|
||||
}, withRetry)
|
||||
}
|
||||
|
||||
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
||||
var res *pb.TestIamPermissionsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
||||
var err error
|
||||
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
||||
Resource: resource,
|
||||
Permissions: perms,
|
||||
})
|
||||
return err
|
||||
}, withRetry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
}
|
||||
|
||||
// A Handle provides IAM operations for a resource.
|
||||
type Handle struct {
|
||||
c client
|
||||
resource string
|
||||
}
|
||||
|
||||
// InternalNewHandle is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandle returns a Handle for resource.
|
||||
// The conn parameter refers to a server that must support the IAMPolicy service.
|
||||
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
|
||||
return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource)
|
||||
}
|
||||
|
||||
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
|
||||
//
|
||||
// InternalNewHandleClient returns a Handle for resource using the given
|
||||
// client implementation.
|
||||
func InternalNewHandleClient(c client, resource string) *Handle {
|
||||
return &Handle{
|
||||
c: c,
|
||||
resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
// Policy retrieves the IAM policy for the resource.
|
||||
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
|
||||
proto, err := h.c.Get(ctx, h.resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Policy{InternalProto: proto}, nil
|
||||
}
|
||||
|
||||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
||||
//
|
||||
// If policy was created from a prior call to Get, then the modification will
|
||||
// only succeed if the policy has not changed since the Get.
|
||||
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
|
||||
return h.c.Set(ctx, h.resource, policy.InternalProto)
|
||||
}
|
||||
|
||||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
||||
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
|
||||
return h.c.Test(ctx, h.resource, permissions)
|
||||
}
|
||||
|
||||
// A RoleName is a name representing a collection of permissions.
|
||||
type RoleName string
|
||||
|
||||
// Common role names.
|
||||
const (
|
||||
Owner RoleName = "roles/owner"
|
||||
Editor RoleName = "roles/editor"
|
||||
Viewer RoleName = "roles/viewer"
|
||||
)
|
||||
|
||||
const (
|
||||
// AllUsers is a special member that denotes all users, even unauthenticated ones.
|
||||
AllUsers = "allUsers"
|
||||
|
||||
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
|
||||
AllAuthenticatedUsers = "allAuthenticatedUsers"
|
||||
)
|
||||
|
||||
// A Policy is a list of Bindings representing roles
|
||||
// granted to members.
|
||||
//
|
||||
// The zero Policy is a valid policy with no bindings.
|
||||
type Policy struct {
|
||||
// TODO(jba): when type aliases are available, put Policy into an internal package
|
||||
// and provide an exported alias here.
|
||||
|
||||
// This field is exported for use by the Google Cloud Libraries only.
|
||||
// It may become unexported in a future release.
|
||||
InternalProto *pb.Policy
|
||||
}
|
||||
|
||||
// Members returns the list of members with the supplied role.
|
||||
// The return value should not be modified. Use Add and Remove
|
||||
// to modify the members of a role.
|
||||
func (p *Policy) Members(r RoleName) []string {
|
||||
b := p.binding(r)
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return b.Members
|
||||
}
|
||||
|
||||
// HasRole reports whether member has role r.
|
||||
func (p *Policy) HasRole(member string, r RoleName) bool {
|
||||
return memberIndex(member, p.binding(r)) >= 0
|
||||
}
|
||||
|
||||
// Add adds member member to role r if it is not already present.
|
||||
// A new binding is created if there is no binding for the role.
|
||||
func (p *Policy) Add(member string, r RoleName) {
|
||||
b := p.binding(r)
|
||||
if b == nil {
|
||||
if p.InternalProto == nil {
|
||||
p.InternalProto = &pb.Policy{}
|
||||
}
|
||||
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
|
||||
Role: string(r),
|
||||
Members: []string{member},
|
||||
})
|
||||
return
|
||||
}
|
||||
if memberIndex(member, b) < 0 {
|
||||
b.Members = append(b.Members, member)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes member from role r if it is present.
|
||||
func (p *Policy) Remove(member string, r RoleName) {
|
||||
bi := p.bindingIndex(r)
|
||||
if bi < 0 {
|
||||
return
|
||||
}
|
||||
bindings := p.InternalProto.Bindings
|
||||
b := bindings[bi]
|
||||
mi := memberIndex(member, b)
|
||||
if mi < 0 {
|
||||
return
|
||||
}
|
||||
// Order doesn't matter for bindings or members, so to remove, move the last item
|
||||
// into the removed spot and shrink the slice.
|
||||
if len(b.Members) == 1 {
|
||||
// Remove binding.
|
||||
last := len(bindings) - 1
|
||||
bindings[bi] = bindings[last]
|
||||
bindings[last] = nil
|
||||
p.InternalProto.Bindings = bindings[:last]
|
||||
return
|
||||
}
|
||||
// Remove member.
|
||||
// TODO(jba): worry about multiple copies of m?
|
||||
last := len(b.Members) - 1
|
||||
b.Members[mi] = b.Members[last]
|
||||
b.Members[last] = ""
|
||||
b.Members = b.Members[:last]
|
||||
}
|
||||
|
||||
// Roles returns the names of all the roles that appear in the Policy.
|
||||
func (p *Policy) Roles() []RoleName {
|
||||
if p.InternalProto == nil {
|
||||
return nil
|
||||
}
|
||||
var rns []RoleName
|
||||
for _, b := range p.InternalProto.Bindings {
|
||||
rns = append(rns, RoleName(b.Role))
|
||||
}
|
||||
return rns
|
||||
}
|
||||
|
||||
// binding returns the Binding for the suppied role, or nil if there isn't one.
|
||||
func (p *Policy) binding(r RoleName) *pb.Binding {
|
||||
i := p.bindingIndex(r)
|
||||
if i < 0 {
|
||||
return nil
|
||||
}
|
||||
return p.InternalProto.Bindings[i]
|
||||
}
|
||||
|
||||
func (p *Policy) bindingIndex(r RoleName) int {
|
||||
if p.InternalProto == nil {
|
||||
return -1
|
||||
}
|
||||
for i, b := range p.InternalProto.Bindings {
|
||||
if b.Role == string(r) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// memberIndex returns the index of m in b's Members, or -1 if not found.
|
||||
func memberIndex(m string, b *pb.Binding) int {
|
||||
if b == nil {
|
||||
return -1
|
||||
}
|
||||
for i, mm := range b.Members {
|
||||
if mm == m {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Annotate prepends msg to the error message in err, attempting
|
||||
// to preserve other information in err, like an error code.
|
||||
//
|
||||
// Annotate panics if err is nil.
|
||||
//
|
||||
// Annotate knows about these error types:
|
||||
// - "google.golang.org/grpc/status".Status
|
||||
// - "google.golang.org/api/googleapi".Error
|
||||
// If the error is not one of these types, Annotate behaves
|
||||
// like
|
||||
// fmt.Errorf("%s: %v", msg, err)
|
||||
func Annotate(err error, msg string) error {
|
||||
if err == nil {
|
||||
panic("Annotate called with nil")
|
||||
}
|
||||
if s, ok := status.FromError(err); ok {
|
||||
p := s.Proto()
|
||||
p.Message = msg + ": " + p.Message
|
||||
return status.ErrorProto(p)
|
||||
}
|
||||
if g, ok := err.(*googleapi.Error); ok {
|
||||
g.Message = msg + ": " + g.Message
|
||||
return g
|
||||
}
|
||||
return fmt.Errorf("%s: %v", msg, err)
|
||||
}
|
||||
|
||||
// Annotatef uses format and args to format a string, then calls Annotate.
|
||||
func Annotatef(err error, format string, args ...interface{}) error {
|
||||
return Annotate(err, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package optional provides versions of primitive types that can
|
||||
// be nil. These are useful in methods that update some of an API object's
|
||||
// fields.
|
||||
package optional
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type (
|
||||
// Bool is either a bool or nil.
|
||||
Bool interface{}
|
||||
|
||||
// String is either a string or nil.
|
||||
String interface{}
|
||||
|
||||
// Int is either an int or nil.
|
||||
Int interface{}
|
||||
|
||||
// Uint is either a uint or nil.
|
||||
Uint interface{}
|
||||
|
||||
// Float64 is either a float64 or nil.
|
||||
Float64 interface{}
|
||||
|
||||
// Duration is either a time.Duration or nil.
|
||||
Duration interface{}
|
||||
)
|
||||
|
||||
// ToBool returns its argument as a bool.
|
||||
// It panics if its argument is nil or not a bool.
|
||||
func ToBool(v Bool) bool {
|
||||
x, ok := v.(bool)
|
||||
if !ok {
|
||||
doPanic("Bool", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToString returns its argument as a string.
|
||||
// It panics if its argument is nil or not a string.
|
||||
func ToString(v String) string {
|
||||
x, ok := v.(string)
|
||||
if !ok {
|
||||
doPanic("String", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToInt returns its argument as an int.
|
||||
// It panics if its argument is nil or not an int.
|
||||
func ToInt(v Int) int {
|
||||
x, ok := v.(int)
|
||||
if !ok {
|
||||
doPanic("Int", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToUint returns its argument as a uint.
|
||||
// It panics if its argument is nil or not a uint.
|
||||
func ToUint(v Uint) uint {
|
||||
x, ok := v.(uint)
|
||||
if !ok {
|
||||
doPanic("Uint", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToFloat64 returns its argument as a float64.
|
||||
// It panics if its argument is nil or not a float64.
|
||||
func ToFloat64(v Float64) float64 {
|
||||
x, ok := v.(float64)
|
||||
if !ok {
|
||||
doPanic("Float64", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// ToDuration returns its argument as a time.Duration.
|
||||
// It panics if its argument is nil or not a time.Duration.
|
||||
func ToDuration(v Duration) time.Duration {
|
||||
x, ok := v.(time.Duration)
|
||||
if !ok {
|
||||
doPanic("Duration", v)
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func doPanic(capType string, v interface{}) {
|
||||
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
|
||||
}
|
||||
|
|
@ -0,0 +1,55 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Retry calls the supplied function f repeatedly according to the provided
|
||||
// backoff parameters. It returns when one of the following occurs:
|
||||
// When f's first return value is true, Retry immediately returns with f's second
|
||||
// return value.
|
||||
// When the provided context is done, Retry returns with an error that
|
||||
// includes both ctx.Error() and the last error returned by f.
|
||||
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
|
||||
return retry(ctx, bo, f, gax.Sleep)
|
||||
}
|
||||
|
||||
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
|
||||
sleep func(context.Context, time.Duration) error) error {
|
||||
var lastErr error
|
||||
for {
|
||||
stop, err := f()
|
||||
if stop {
|
||||
return err
|
||||
}
|
||||
// Remember the last "real" error from f.
|
||||
if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
|
||||
lastErr = err
|
||||
}
|
||||
p := bo.Pause()
|
||||
if cerr := sleep(ctx, p); cerr != nil {
|
||||
if lastErr != nil {
|
||||
return Annotatef(lastErr, "retry failed with %v; last error", cerr)
|
||||
}
|
||||
return cerr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/genproto/googleapis/rpc/code"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func StartSpan(ctx context.Context, name string) context.Context {
|
||||
ctx, _ = trace.StartSpan(ctx, name)
|
||||
return ctx
|
||||
}
|
||||
|
||||
func EndSpan(ctx context.Context, err error) {
|
||||
span := trace.FromContext(ctx)
|
||||
if err != nil {
|
||||
span.SetStatus(toStatus(err))
|
||||
}
|
||||
span.End()
|
||||
}
|
||||
|
||||
// ToStatus interrogates an error and converts it to an appropriate
|
||||
// OpenCensus status.
|
||||
func toStatus(err error) trace.Status {
|
||||
if err2, ok := err.(*googleapi.Error); ok {
|
||||
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
|
||||
} else if s, ok := status.FromError(err); ok {
|
||||
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
|
||||
} else {
|
||||
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO (deklerk): switch to using OpenCensus function when it becomes available.
|
||||
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
|
||||
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
|
||||
switch httpStatusCode {
|
||||
case 200:
|
||||
return int32(code.Code_OK)
|
||||
case 499:
|
||||
return int32(code.Code_CANCELLED)
|
||||
case 500:
|
||||
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
|
||||
case 400:
|
||||
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
|
||||
case 504:
|
||||
return int32(code.Code_DEADLINE_EXCEEDED)
|
||||
case 404:
|
||||
return int32(code.Code_NOT_FOUND)
|
||||
case 409:
|
||||
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
|
||||
case 403:
|
||||
return int32(code.Code_PERMISSION_DENIED)
|
||||
case 401:
|
||||
return int32(code.Code_UNAUTHENTICATED)
|
||||
case 429:
|
||||
return int32(code.Code_RESOURCE_EXHAUSTED)
|
||||
case 501:
|
||||
return int32(code.Code_UNIMPLEMENTED)
|
||||
case 503:
|
||||
return int32(code.Code_UNAVAILABLE)
|
||||
default:
|
||||
return int32(code.Code_UNKNOWN)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package trace
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// OpenCensus only supports go 1.8 and higher.
|
||||
|
||||
func StartSpan(ctx context.Context, _ string) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func EndSpan(context.Context, error) {
|
||||
}
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:generate ./update_version.sh
|
||||
|
||||
// Package version contains version information for Google Cloud Client
|
||||
// Libraries for Go, as reported in request headers.
|
||||
package version
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Repo is the current version of the client libraries in this
|
||||
// repo. It should be a date in YYYYMMDD format.
|
||||
const Repo = "20180226"
|
||||
|
||||
// Go returns the Go runtime version. The returned string
|
||||
// has no whitespace.
|
||||
func Go() string {
|
||||
return goVersion
|
||||
}
|
||||
|
||||
var goVersion = goVer(runtime.Version())
|
||||
|
||||
const develPrefix = "devel +"
|
||||
|
||||
func goVer(s string) string {
|
||||
if strings.HasPrefix(s, develPrefix) {
|
||||
s = s[len(develPrefix):]
|
||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||
s = s[:p]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "go1") {
|
||||
s = s[2:]
|
||||
var prerelease string
|
||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||
s, prerelease = s[:p], s[p:]
|
||||
}
|
||||
if strings.HasSuffix(s, ".") {
|
||||
s += "0"
|
||||
} else if strings.Count(s, ".") < 2 {
|
||||
s += ".0"
|
||||
}
|
||||
if prerelease != "" {
|
||||
s += "-" + prerelease
|
||||
}
|
||||
return s
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func notSemverRune(r rune) bool {
|
||||
return strings.IndexRune("0123456789.", r) < 0
|
||||
}
|
||||
|
|
@ -0,0 +1,245 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// ACLRole is the level of access to grant.
|
||||
type ACLRole string
|
||||
|
||||
const (
|
||||
RoleOwner ACLRole = "OWNER"
|
||||
RoleReader ACLRole = "READER"
|
||||
RoleWriter ACLRole = "WRITER"
|
||||
)
|
||||
|
||||
// ACLEntity refers to a user or group.
|
||||
// They are sometimes referred to as grantees.
|
||||
//
|
||||
// It could be in the form of:
|
||||
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
|
||||
// "domain-<domain>" and "project-team-<projectId>".
|
||||
//
|
||||
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
|
||||
type ACLEntity string
|
||||
|
||||
const (
|
||||
AllUsers ACLEntity = "allUsers"
|
||||
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
|
||||
)
|
||||
|
||||
// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
|
||||
type ACLRule struct {
|
||||
Entity ACLEntity
|
||||
Role ACLRole
|
||||
}
|
||||
|
||||
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
|
||||
type ACLHandle struct {
|
||||
c *Client
|
||||
bucket string
|
||||
object string
|
||||
isDefault bool
|
||||
userProject string // for requester-pays buckets
|
||||
}
|
||||
|
||||
// Delete permanently deletes the ACL entry for the given entity.
|
||||
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if a.object != "" {
|
||||
return a.objectDelete(ctx, entity)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.bucketDefaultDelete(ctx, entity)
|
||||
}
|
||||
return a.bucketDelete(ctx, entity)
|
||||
}
|
||||
|
||||
// Set sets the permission level for the given entity.
|
||||
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if a.object != "" {
|
||||
return a.objectSet(ctx, entity, role, false)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.objectSet(ctx, entity, role, true)
|
||||
}
|
||||
return a.bucketSet(ctx, entity, role)
|
||||
}
|
||||
|
||||
// List retrieves ACL entries.
|
||||
func (a *ACLHandle) List(ctx context.Context) (_ []ACLRule, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if a.object != "" {
|
||||
return a.objectList(ctx)
|
||||
}
|
||||
if a.isDefault {
|
||||
return a.bucketDefaultList(ctx)
|
||||
}
|
||||
return a.bucketList(ctx)
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
|
||||
a.configureCall(req, ctx)
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toACLRules(acls.Items), nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
||||
return runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(req, ctx)
|
||||
return req.Do()
|
||||
})
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.BucketAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.BucketAccessControls.List(a.bucket)
|
||||
a.configureCall(req, ctx)
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r := make([]ACLRule, len(acls.Items))
|
||||
for i, v := range acls.Items {
|
||||
r[i].Entity = ACLEntity(v.Entity)
|
||||
r[i].Role = ACLRole(v.Role)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
||||
acl := &raw.BucketAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
err := runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
|
||||
a.configureCall(req, ctx)
|
||||
_, err := req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
||||
err := runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
|
||||
a.configureCall(req, ctx)
|
||||
return req.Do()
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
||||
var acls *raw.ObjectAccessControls
|
||||
var err error
|
||||
err = runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
|
||||
a.configureCall(req, ctx)
|
||||
acls, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toACLRules(acls.Items), nil
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
|
||||
type setRequest interface {
|
||||
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
|
||||
Header() http.Header
|
||||
}
|
||||
|
||||
acl := &raw.ObjectAccessControl{
|
||||
Bucket: a.bucket,
|
||||
Entity: string(entity),
|
||||
Role: string(role),
|
||||
}
|
||||
var req setRequest
|
||||
if isBucketDefault {
|
||||
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
|
||||
} else {
|
||||
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
|
||||
}
|
||||
a.configureCall(req, ctx)
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := req.Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
||||
return runWithRetry(ctx, func() error {
|
||||
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
|
||||
a.configureCall(req, ctx)
|
||||
return req.Do()
|
||||
})
|
||||
}
|
||||
|
||||
func (a *ACLHandle) configureCall(call interface {
|
||||
Header() http.Header
|
||||
}, ctx context.Context) {
|
||||
vc := reflect.ValueOf(call)
|
||||
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
|
||||
if a.userProject != "" {
|
||||
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
}
|
||||
|
||||
func toACLRules(items []*raw.ObjectAccessControl) []ACLRule {
|
||||
r := make([]ACLRule, 0, len(items))
|
||||
for _, item := range items {
|
||||
r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)})
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
|
@ -0,0 +1,944 @@
|
|||
// Copyright 2014 Google Inc. LiveAndArchived Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// BucketHandle provides operations on a Google Cloud Storage bucket.
|
||||
// Use Client.Bucket to get a handle.
|
||||
type BucketHandle struct {
|
||||
c *Client
|
||||
name string
|
||||
acl ACLHandle
|
||||
defaultObjectACL ACLHandle
|
||||
conds *BucketConditions
|
||||
userProject string // project for Requester Pays buckets
|
||||
}
|
||||
|
||||
// Bucket returns a BucketHandle, which provides operations on the named bucket.
|
||||
// This call does not perform any network operations.
|
||||
//
|
||||
// The supplied name must contain only lowercase letters, numbers, dashes,
|
||||
// underscores, and dots. The full specification for valid bucket names can be
|
||||
// found at:
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
func (c *Client) Bucket(name string) *BucketHandle {
|
||||
return &BucketHandle{
|
||||
c: c,
|
||||
name: name,
|
||||
acl: ACLHandle{
|
||||
c: c,
|
||||
bucket: name,
|
||||
},
|
||||
defaultObjectACL: ACLHandle{
|
||||
c: c,
|
||||
bucket: name,
|
||||
isDefault: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Create creates the Bucket in the project.
|
||||
// If attrs is nil the API defaults will be used.
|
||||
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
var bkt *raw.Bucket
|
||||
if attrs != nil {
|
||||
bkt = attrs.toRawBucket()
|
||||
} else {
|
||||
bkt = &raw.Bucket{}
|
||||
}
|
||||
bkt.Name = b.name
|
||||
// If there is lifecycle information but no location, explicitly set
|
||||
// the location. This is a GCS quirk/bug.
|
||||
if bkt.Location == "" && bkt.Lifecycle != nil {
|
||||
bkt.Location = "US"
|
||||
}
|
||||
req := b.c.raw.Buckets.Insert(projectID, bkt)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
|
||||
}
|
||||
|
||||
// Delete deletes the Bucket.
|
||||
func (b *BucketHandle) Delete(ctx context.Context) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
req, err := b.newDeleteCall()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
|
||||
req := b.c.raw.Buckets.Delete(b.name)
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// ACL returns an ACLHandle, which provides access to the bucket's access control list.
|
||||
// This controls who can list, create or overwrite the objects in a bucket.
|
||||
// This call does not perform any network operations.
|
||||
func (b *BucketHandle) ACL() *ACLHandle {
|
||||
return &b.acl
|
||||
}
|
||||
|
||||
// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs.
|
||||
// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL.
|
||||
// This call does not perform any network operations.
|
||||
func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
|
||||
return &b.defaultObjectACL
|
||||
}
|
||||
|
||||
// Object returns an ObjectHandle, which provides operations on the named object.
|
||||
// This call does not perform any network operations.
|
||||
//
|
||||
// name must consist entirely of valid UTF-8-encoded runes. The full specification
|
||||
// for valid object names can be found at:
|
||||
// https://cloud.google.com/storage/docs/bucket-naming
|
||||
func (b *BucketHandle) Object(name string) *ObjectHandle {
|
||||
return &ObjectHandle{
|
||||
c: b.c,
|
||||
bucket: b.name,
|
||||
object: name,
|
||||
acl: ACLHandle{
|
||||
c: b.c,
|
||||
bucket: b.name,
|
||||
object: name,
|
||||
userProject: b.userProject,
|
||||
},
|
||||
gen: -1,
|
||||
userProject: b.userProject,
|
||||
}
|
||||
}
|
||||
|
||||
// Attrs returns the metadata for the bucket.
|
||||
func (b *BucketHandle) Attrs(ctx context.Context) (_ *BucketAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
req, err := b.newGetCall()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resp *raw.Bucket
|
||||
err = runWithRetry(ctx, func() error {
|
||||
resp, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
||||
return nil, ErrBucketNotExist
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(resp)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
|
||||
req := b.c.raw.Buckets.Get(b.name).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (_ *BucketAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
req, err := b.newPatchCall(&uattrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(jba): retry iff metagen is set?
|
||||
rb, err := req.Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newBucket(rb)
|
||||
}
|
||||
|
||||
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
|
||||
rb := uattrs.toRawBucket()
|
||||
req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full")
|
||||
setClientHeader(req.Header())
|
||||
if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b.userProject != "" {
|
||||
req.UserProject(b.userProject)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// BucketAttrs represents the metadata for a Google Cloud Storage bucket.
|
||||
// Read-only fields are ignored by BucketHandle.Create.
|
||||
type BucketAttrs struct {
|
||||
// Name is the name of the bucket.
|
||||
// This field is read-only.
|
||||
Name string
|
||||
|
||||
// ACL is the list of access control rules on the bucket.
|
||||
ACL []ACLRule
|
||||
|
||||
// DefaultObjectACL is the list of access controls to
|
||||
// apply to new objects when no object ACL is provided.
|
||||
DefaultObjectACL []ACLRule
|
||||
|
||||
// Location is the location of the bucket. It defaults to "US".
|
||||
Location string
|
||||
|
||||
// MetaGeneration is the metadata generation of the bucket.
|
||||
// This field is read-only.
|
||||
MetaGeneration int64
|
||||
|
||||
// StorageClass is the default storage class of the bucket. This defines
|
||||
// how objects in the bucket are stored and determines the SLA
|
||||
// and the cost of storage. Typical values are "MULTI_REGIONAL",
|
||||
// "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and
|
||||
// "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which
|
||||
// is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on
|
||||
// the bucket's location settings.
|
||||
StorageClass string
|
||||
|
||||
// Created is the creation time of the bucket.
|
||||
// This field is read-only.
|
||||
Created time.Time
|
||||
|
||||
// VersioningEnabled reports whether this bucket has versioning enabled.
|
||||
VersioningEnabled bool
|
||||
|
||||
// Labels are the bucket's labels.
|
||||
Labels map[string]string
|
||||
|
||||
// RequesterPays reports whether the bucket is a Requester Pays bucket.
|
||||
// Clients performing operations on Requester Pays buckets must provide
|
||||
// a user project (see BucketHandle.UserProject), which will be billed
|
||||
// for the operations.
|
||||
RequesterPays bool
|
||||
|
||||
// Lifecycle is the lifecycle configuration for objects in the bucket.
|
||||
Lifecycle Lifecycle
|
||||
|
||||
// Retention policy enforces a minimum retention time for all objects
|
||||
// contained in the bucket. A RetentionPolicy of nil implies the bucket
|
||||
// has no minimum data retention.
|
||||
//
|
||||
// This feature is in private alpha release. It is not currently available to
|
||||
// most customers. It might be changed in backwards-incompatible ways and is not
|
||||
// subject to any SLA or deprecation policy.
|
||||
RetentionPolicy *RetentionPolicy
|
||||
|
||||
// The bucket's Cross-Origin Resource Sharing (CORS) configuration.
|
||||
CORS []CORS
|
||||
}
|
||||
|
||||
// Lifecycle is the lifecycle configuration for objects in the bucket.
|
||||
type Lifecycle struct {
|
||||
Rules []LifecycleRule
|
||||
}
|
||||
|
||||
// Retention policy enforces a minimum retention time for all objects
|
||||
// contained in the bucket.
|
||||
//
|
||||
// Any attempt to overwrite or delete objects younger than the retention
|
||||
// period will result in an error. An unlocked retention policy can be
|
||||
// modified or removed from the bucket via the Update method. A
|
||||
// locked retention policy cannot be removed or shortened in duration
|
||||
// for the lifetime of the bucket.
|
||||
//
|
||||
// This feature is in private alpha release. It is not currently available to
|
||||
// most customers. It might be changed in backwards-incompatible ways and is not
|
||||
// subject to any SLA or deprecation policy.
|
||||
type RetentionPolicy struct {
|
||||
// RetentionPeriod specifies the duration that objects need to be
|
||||
// retained. Retention duration must be greater than zero and less than
|
||||
// 100 years. Note that enforcement of retention periods less than a day
|
||||
// is not guaranteed. Such periods should only be used for testing
|
||||
// purposes.
|
||||
RetentionPeriod time.Duration
|
||||
|
||||
// EffectiveTime is the time from which the policy was enforced and
|
||||
// effective. This field is read-only.
|
||||
EffectiveTime time.Time
|
||||
}
|
||||
|
||||
const (
|
||||
// RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule.
|
||||
rfc3339Date = "2006-01-02"
|
||||
|
||||
// DeleteAction is a lifecycle action that deletes a live and/or archived
|
||||
// objects. Takes precendence over SetStorageClass actions.
|
||||
DeleteAction = "Delete"
|
||||
|
||||
// SetStorageClassAction changes the storage class of live and/or archived
|
||||
// objects.
|
||||
SetStorageClassAction = "SetStorageClass"
|
||||
)
|
||||
|
||||
// LifecycleRule is a lifecycle configuration rule.
|
||||
//
|
||||
// When all the configured conditions are met by an object in the bucket, the
|
||||
// configured action will automatically be taken on that object.
|
||||
type LifecycleRule struct {
|
||||
// Action is the action to take when all of the associated conditions are
|
||||
// met.
|
||||
Action LifecycleAction
|
||||
|
||||
// Condition is the set of conditions that must be met for the associated
|
||||
// action to be taken.
|
||||
Condition LifecycleCondition
|
||||
}
|
||||
|
||||
// LifecycleAction is a lifecycle configuration action.
|
||||
type LifecycleAction struct {
|
||||
// Type is the type of action to take on matching objects.
|
||||
//
|
||||
// Acceptable values are "Delete" to delete matching objects and
|
||||
// "SetStorageClass" to set the storage class defined in StorageClass on
|
||||
// matching objects.
|
||||
Type string
|
||||
|
||||
// StorageClass is the storage class to set on matching objects if the Action
|
||||
// is "SetStorageClass".
|
||||
StorageClass string
|
||||
}
|
||||
|
||||
// Liveness specifies whether the object is live or not.
|
||||
type Liveness int
|
||||
|
||||
const (
|
||||
// LiveAndArchived includes both live and archived objects.
|
||||
LiveAndArchived Liveness = iota
|
||||
// Live specifies that the object is still live.
|
||||
Live
|
||||
// Archived specifies that the object is archived.
|
||||
Archived
|
||||
)
|
||||
|
||||
// LifecycleCondition is a set of conditions used to match objects and take an
|
||||
// action automatically.
|
||||
//
|
||||
// All configured conditions must be met for the associated action to be taken.
|
||||
type LifecycleCondition struct {
|
||||
// AgeInDays is the age of the object in days.
|
||||
AgeInDays int64
|
||||
|
||||
// CreatedBefore is the time the object was created.
|
||||
//
|
||||
// This condition is satisfied when an object is created before midnight of
|
||||
// the specified date in UTC.
|
||||
CreatedBefore time.Time
|
||||
|
||||
// Liveness specifies the object's liveness. Relevant only for versioned objects
|
||||
Liveness Liveness
|
||||
|
||||
// MatchesStorageClasses is the condition matching the object's storage
|
||||
// class.
|
||||
//
|
||||
// Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE",
|
||||
// "STANDARD", and "DURABLE_REDUCED_AVAILABILITY".
|
||||
MatchesStorageClasses []string
|
||||
|
||||
// NumNewerVersions is the condition matching objects with a number of newer versions.
|
||||
//
|
||||
// If the value is N, this condition is satisfied when there are at least N
|
||||
// versions (including the live version) newer than this version of the
|
||||
// object.
|
||||
NumNewerVersions int64
|
||||
}
|
||||
|
||||
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
|
||||
if b == nil {
|
||||
return nil, nil
|
||||
}
|
||||
rp, err := toRetentionPolicy(b.RetentionPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bucket := &BucketAttrs{
|
||||
Name: b.Name,
|
||||
Location: b.Location,
|
||||
MetaGeneration: b.Metageneration,
|
||||
StorageClass: b.StorageClass,
|
||||
Created: convertTime(b.TimeCreated),
|
||||
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
|
||||
Labels: b.Labels,
|
||||
RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
|
||||
Lifecycle: toLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: rp,
|
||||
CORS: toCORS(b.Cors),
|
||||
}
|
||||
acl := make([]ACLRule, len(b.Acl))
|
||||
for i, rule := range b.Acl {
|
||||
acl[i] = ACLRule{
|
||||
Entity: ACLEntity(rule.Entity),
|
||||
Role: ACLRole(rule.Role),
|
||||
}
|
||||
}
|
||||
bucket.ACL = acl
|
||||
objACL := make([]ACLRule, len(b.DefaultObjectAcl))
|
||||
for i, rule := range b.DefaultObjectAcl {
|
||||
objACL[i] = ACLRule{
|
||||
Entity: ACLEntity(rule.Entity),
|
||||
Role: ACLRole(rule.Role),
|
||||
}
|
||||
}
|
||||
bucket.DefaultObjectACL = objACL
|
||||
return bucket, nil
|
||||
}
|
||||
|
||||
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
|
||||
func (b *BucketAttrs) toRawBucket() *raw.Bucket {
|
||||
var acl []*raw.BucketAccessControl
|
||||
if len(b.ACL) > 0 {
|
||||
acl = make([]*raw.BucketAccessControl, len(b.ACL))
|
||||
for i, rule := range b.ACL {
|
||||
acl[i] = &raw.BucketAccessControl{
|
||||
Entity: string(rule.Entity),
|
||||
Role: string(rule.Role),
|
||||
}
|
||||
}
|
||||
}
|
||||
dACL := toRawObjectACL(b.DefaultObjectACL)
|
||||
// Copy label map.
|
||||
var labels map[string]string
|
||||
if len(b.Labels) > 0 {
|
||||
labels = make(map[string]string, len(b.Labels))
|
||||
for k, v := range b.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
}
|
||||
// Ignore VersioningEnabled if it is false. This is OK because
|
||||
// we only call this method when creating a bucket, and by default
|
||||
// new buckets have versioning off.
|
||||
var v *raw.BucketVersioning
|
||||
if b.VersioningEnabled {
|
||||
v = &raw.BucketVersioning{Enabled: true}
|
||||
}
|
||||
var bb *raw.BucketBilling
|
||||
if b.RequesterPays {
|
||||
bb = &raw.BucketBilling{RequesterPays: true}
|
||||
}
|
||||
return &raw.Bucket{
|
||||
Name: b.Name,
|
||||
DefaultObjectAcl: dACL,
|
||||
Location: b.Location,
|
||||
StorageClass: b.StorageClass,
|
||||
Acl: acl,
|
||||
Versioning: v,
|
||||
Labels: labels,
|
||||
Billing: bb,
|
||||
Lifecycle: toRawLifecycle(b.Lifecycle),
|
||||
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
|
||||
Cors: toRawCORS(b.CORS),
|
||||
}
|
||||
}
|
||||
|
||||
// The bucket's Cross-Origin Resource Sharing (CORS) configuration.
|
||||
type CORS struct {
|
||||
// MaxAge is the value to return in the Access-Control-Max-Age
|
||||
// header used in preflight responses.
|
||||
MaxAge time.Duration
|
||||
|
||||
// Methods is the list of HTTP methods on which to include CORS response
|
||||
// headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
|
||||
// of methods, and means "any method".
|
||||
Methods []string
|
||||
|
||||
// Origins is the list of Origins eligible to receive CORS response
|
||||
// headers. Note: "*" is permitted in the list of origins, and means
|
||||
// "any Origin".
|
||||
Origins []string
|
||||
|
||||
// ResponseHeaders is the list of HTTP headers other than the simple
|
||||
// response headers to give permission for the user-agent to share
|
||||
// across domains.
|
||||
ResponseHeaders []string
|
||||
}
|
||||
|
||||
type BucketAttrsToUpdate struct {
|
||||
// VersioningEnabled, if set, updates whether the bucket uses versioning.
|
||||
VersioningEnabled optional.Bool
|
||||
|
||||
// RequesterPays, if set, updates whether the bucket is a Requester Pays bucket.
|
||||
RequesterPays optional.Bool
|
||||
|
||||
// RetentionPolicy, if set, updates the retention policy of the bucket. Using
|
||||
// RetentionPolicy.RetentionPeriod = 0 will delete the existing policy.
|
||||
//
|
||||
// This feature is in private alpha release. It is not currently available to
|
||||
// most customers. It might be changed in backwards-incompatible ways and is not
|
||||
// subject to any SLA or deprecation policy.
|
||||
RetentionPolicy *RetentionPolicy
|
||||
|
||||
// CORS, if set, replaces the CORS configuration with a new configuration.
|
||||
// When an empty slice is provided, all CORS policies are removed; when nil
|
||||
// is provided, the value is ignored in the update.
|
||||
CORS []CORS
|
||||
|
||||
setLabels map[string]string
|
||||
deleteLabels map[string]bool
|
||||
}
|
||||
|
||||
// SetLabel causes a label to be added or modified when ua is used
|
||||
// in a call to Bucket.Update.
|
||||
func (ua *BucketAttrsToUpdate) SetLabel(name, value string) {
|
||||
if ua.setLabels == nil {
|
||||
ua.setLabels = map[string]string{}
|
||||
}
|
||||
ua.setLabels[name] = value
|
||||
}
|
||||
|
||||
// DeleteLabel causes a label to be deleted when ua is used in a
|
||||
// call to Bucket.Update.
|
||||
func (ua *BucketAttrsToUpdate) DeleteLabel(name string) {
|
||||
if ua.deleteLabels == nil {
|
||||
ua.deleteLabels = map[string]bool{}
|
||||
}
|
||||
ua.deleteLabels[name] = true
|
||||
}
|
||||
|
||||
func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
|
||||
rb := &raw.Bucket{}
|
||||
if ua.CORS != nil {
|
||||
rb.Cors = toRawCORS(ua.CORS)
|
||||
rb.ForceSendFields = append(rb.ForceSendFields, "Cors")
|
||||
}
|
||||
if ua.RetentionPolicy != nil {
|
||||
if ua.RetentionPolicy.RetentionPeriod == 0 {
|
||||
rb.NullFields = append(rb.NullFields, "RetentionPolicy")
|
||||
rb.RetentionPolicy = nil
|
||||
} else {
|
||||
rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy()
|
||||
}
|
||||
}
|
||||
if ua.VersioningEnabled != nil {
|
||||
rb.Versioning = &raw.BucketVersioning{
|
||||
Enabled: optional.ToBool(ua.VersioningEnabled),
|
||||
ForceSendFields: []string{"Enabled"},
|
||||
}
|
||||
}
|
||||
if ua.RequesterPays != nil {
|
||||
rb.Billing = &raw.BucketBilling{
|
||||
RequesterPays: optional.ToBool(ua.RequesterPays),
|
||||
ForceSendFields: []string{"RequesterPays"},
|
||||
}
|
||||
}
|
||||
if ua.setLabels != nil || ua.deleteLabels != nil {
|
||||
rb.Labels = map[string]string{}
|
||||
for k, v := range ua.setLabels {
|
||||
rb.Labels[k] = v
|
||||
}
|
||||
if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 {
|
||||
rb.ForceSendFields = append(rb.ForceSendFields, "Labels")
|
||||
}
|
||||
for l := range ua.deleteLabels {
|
||||
rb.NullFields = append(rb.NullFields, "Labels."+l)
|
||||
}
|
||||
}
|
||||
return rb
|
||||
}
|
||||
|
||||
// If returns a new BucketHandle that applies a set of preconditions.
|
||||
// Preconditions already set on the BucketHandle are ignored.
|
||||
// Operations on the new handle will only occur if the preconditions are
|
||||
// satisfied. The only valid preconditions for buckets are MetagenerationMatch
|
||||
// and MetagenerationNotMatch.
|
||||
func (b *BucketHandle) If(conds BucketConditions) *BucketHandle {
|
||||
b2 := *b
|
||||
b2.conds = &conds
|
||||
return &b2
|
||||
}
|
||||
|
||||
// BucketConditions constrain bucket methods to act on specific metagenerations.
|
||||
//
|
||||
// The zero value is an empty set of constraints.
|
||||
type BucketConditions struct {
|
||||
// MetagenerationMatch specifies that the bucket must have the given
|
||||
// metageneration for the operation to occur.
|
||||
// If MetagenerationMatch is zero, it has no effect.
|
||||
MetagenerationMatch int64
|
||||
|
||||
// MetagenerationNotMatch specifies that the bucket must not have the given
|
||||
// metageneration for the operation to occur.
|
||||
// If MetagenerationNotMatch is zero, it has no effect.
|
||||
MetagenerationNotMatch int64
|
||||
}
|
||||
|
||||
func (c *BucketConditions) validate(method string) error {
|
||||
if *c == (BucketConditions{}) {
|
||||
return fmt.Errorf("storage: %s: empty conditions", method)
|
||||
}
|
||||
if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 {
|
||||
return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UserProject returns a new BucketHandle that passes the project ID as the user
|
||||
// project for all subsequent calls. Calls with a user project will be billed to that
|
||||
// project rather than to the bucket's owning project.
|
||||
//
|
||||
// A user project is required for all operations on Requester Pays buckets.
|
||||
func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
|
||||
b2 := *b
|
||||
b2.userProject = projectID
|
||||
b2.acl.userProject = projectID
|
||||
b2.defaultObjectACL.userProject = projectID
|
||||
return &b2
|
||||
}
|
||||
|
||||
// LockRetentionPolicy locks a bucket's retention policy until a previously-configured
|
||||
// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less
|
||||
// than a day, the retention policy is treated as a development configuration and locking
|
||||
// will have no effect. The BucketHandle must have a metageneration condition that
|
||||
// matches the bucket's metageneration. See BucketHandle.If.
|
||||
//
|
||||
// This feature is in private alpha release. It is not currently available to
|
||||
// most customers. It might be changed in backwards-incompatible ways and is not
|
||||
// subject to any SLA or deprecation policy.
|
||||
func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
|
||||
var metageneration int64
|
||||
if b.conds != nil {
|
||||
metageneration = b.conds.MetagenerationMatch
|
||||
}
|
||||
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
|
||||
_, err := req.Context(ctx).Do()
|
||||
return err
|
||||
}
|
||||
|
||||
// applyBucketConds modifies the provided call using the conditions in conds.
|
||||
// call is something that quacks like a *raw.WhateverCall.
|
||||
func applyBucketConds(method string, conds *BucketConditions, call interface{}) error {
|
||||
if conds == nil {
|
||||
return nil
|
||||
}
|
||||
if err := conds.validate(method); err != nil {
|
||||
return err
|
||||
}
|
||||
cval := reflect.ValueOf(call)
|
||||
switch {
|
||||
case conds.MetagenerationMatch != 0:
|
||||
if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
|
||||
return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
|
||||
}
|
||||
case conds.MetagenerationNotMatch != 0:
|
||||
if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
|
||||
return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy {
|
||||
if rp == nil {
|
||||
return nil
|
||||
}
|
||||
return &raw.BucketRetentionPolicy{
|
||||
RetentionPeriod: int64(rp.RetentionPeriod / time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) {
|
||||
if rp == nil {
|
||||
return nil, nil
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, rp.EffectiveTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &RetentionPolicy{
|
||||
RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second,
|
||||
EffectiveTime: t,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func toRawCORS(c []CORS) []*raw.BucketCors {
|
||||
var out []*raw.BucketCors
|
||||
for _, v := range c {
|
||||
out = append(out, &raw.BucketCors{
|
||||
MaxAgeSeconds: int64(v.MaxAge / time.Second),
|
||||
Method: v.Methods,
|
||||
Origin: v.Origins,
|
||||
ResponseHeader: v.ResponseHeaders,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func toCORS(rc []*raw.BucketCors) []CORS {
|
||||
var out []CORS
|
||||
for _, v := range rc {
|
||||
out = append(out, CORS{
|
||||
MaxAge: time.Duration(v.MaxAgeSeconds) * time.Second,
|
||||
Methods: v.Method,
|
||||
Origins: v.Origin,
|
||||
ResponseHeaders: v.ResponseHeader,
|
||||
})
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
|
||||
var rl raw.BucketLifecycle
|
||||
if len(l.Rules) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, r := range l.Rules {
|
||||
rr := &raw.BucketLifecycleRule{
|
||||
Action: &raw.BucketLifecycleRuleAction{
|
||||
Type: r.Action.Type,
|
||||
StorageClass: r.Action.StorageClass,
|
||||
},
|
||||
Condition: &raw.BucketLifecycleRuleCondition{
|
||||
Age: r.Condition.AgeInDays,
|
||||
MatchesStorageClass: r.Condition.MatchesStorageClasses,
|
||||
NumNewerVersions: r.Condition.NumNewerVersions,
|
||||
},
|
||||
}
|
||||
|
||||
switch r.Condition.Liveness {
|
||||
case LiveAndArchived:
|
||||
rr.Condition.IsLive = nil
|
||||
case Live:
|
||||
rr.Condition.IsLive = googleapi.Bool(true)
|
||||
case Archived:
|
||||
rr.Condition.IsLive = googleapi.Bool(false)
|
||||
}
|
||||
|
||||
if !r.Condition.CreatedBefore.IsZero() {
|
||||
rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date)
|
||||
}
|
||||
rl.Rule = append(rl.Rule, rr)
|
||||
}
|
||||
return &rl
|
||||
}
|
||||
|
||||
func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
|
||||
var l Lifecycle
|
||||
if rl == nil {
|
||||
return l
|
||||
}
|
||||
for _, rr := range rl.Rule {
|
||||
r := LifecycleRule{
|
||||
Action: LifecycleAction{
|
||||
Type: rr.Action.Type,
|
||||
StorageClass: rr.Action.StorageClass,
|
||||
},
|
||||
Condition: LifecycleCondition{
|
||||
AgeInDays: rr.Condition.Age,
|
||||
MatchesStorageClasses: rr.Condition.MatchesStorageClass,
|
||||
NumNewerVersions: rr.Condition.NumNewerVersions,
|
||||
},
|
||||
}
|
||||
|
||||
switch {
|
||||
case rr.Condition.IsLive == nil:
|
||||
r.Condition.Liveness = LiveAndArchived
|
||||
case *rr.Condition.IsLive == true:
|
||||
r.Condition.Liveness = Live
|
||||
case *rr.Condition.IsLive == false:
|
||||
r.Condition.Liveness = Archived
|
||||
}
|
||||
|
||||
if rr.Condition.CreatedBefore != "" {
|
||||
r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore)
|
||||
}
|
||||
l.Rules = append(l.Rules, r)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Objects returns an iterator over the objects in the bucket that match the Query q.
|
||||
// If q is nil, no filtering is done.
|
||||
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
|
||||
it := &ObjectIterator{
|
||||
ctx: ctx,
|
||||
bucket: b,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
if q != nil {
|
||||
it.query = *q
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
// An ObjectIterator is an iterator over ObjectAttrs.
|
||||
type ObjectIterator struct {
|
||||
ctx context.Context
|
||||
bucket *BucketHandle
|
||||
query Query
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
items []*ObjectAttrs
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if
|
||||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
||||
// calls will return iterator.Done.
|
||||
//
|
||||
// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will
|
||||
// have a non-empty Prefix field, and a zero value for all other fields. These
|
||||
// represent prefixes.
|
||||
func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item := it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
req := it.bucket.c.raw.Objects.List(it.bucket.name)
|
||||
setClientHeader(req.Header())
|
||||
req.Projection("full")
|
||||
req.Delimiter(it.query.Delimiter)
|
||||
req.Prefix(it.query.Prefix)
|
||||
req.Versions(it.query.Versions)
|
||||
req.PageToken(pageToken)
|
||||
if it.bucket.userProject != "" {
|
||||
req.UserProject(it.bucket.userProject)
|
||||
}
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Objects
|
||||
var err error
|
||||
err = runWithRetry(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
|
||||
err = ErrBucketNotExist
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
it.items = append(it.items, newObject(item))
|
||||
}
|
||||
for _, prefix := range resp.Prefixes {
|
||||
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
||||
// TODO(jbd): Add storage.buckets.update.
|
||||
|
||||
// Buckets returns an iterator over the buckets in the project. You may
|
||||
// optionally set the iterator's Prefix field to restrict the list to buckets
|
||||
// whose names begin with the prefix. By default, all buckets in the project
|
||||
// are returned.
|
||||
func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
|
||||
it := &BucketIterator{
|
||||
ctx: ctx,
|
||||
client: c,
|
||||
projectID: projectID,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.buckets) },
|
||||
func() interface{} { b := it.buckets; it.buckets = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// A BucketIterator is an iterator over BucketAttrs.
|
||||
type BucketIterator struct {
|
||||
// Prefix restricts the iterator to buckets whose names begin with it.
|
||||
Prefix string
|
||||
|
||||
ctx context.Context
|
||||
client *Client
|
||||
projectID string
|
||||
buckets []*BucketAttrs
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if
|
||||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
||||
// calls will return iterator.Done.
|
||||
func (it *BucketIterator) Next() (*BucketAttrs, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := it.buckets[0]
|
||||
it.buckets = it.buckets[1:]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *BucketIterator) fetch(pageSize int, pageToken string) (_ string, err error) {
|
||||
req := it.client.raw.Buckets.List(it.projectID)
|
||||
setClientHeader(req.Header())
|
||||
req.Projection("full")
|
||||
req.Prefix(it.Prefix)
|
||||
req.PageToken(pageToken)
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var resp *raw.Buckets
|
||||
err = runWithRetry(it.ctx, func() error {
|
||||
resp, err = req.Context(it.ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, item := range resp.Items {
|
||||
b, err := newBucket(item)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.buckets = append(it.buckets, b)
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,207 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// CopierFrom creates a Copier that can copy src to dst.
|
||||
// You can immediately call Run on the returned Copier, or
|
||||
// you can configure it first.
|
||||
//
|
||||
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
|
||||
// in which case the user project of src is billed.
|
||||
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
|
||||
return &Copier{dst: dst, src: src}
|
||||
}
|
||||
|
||||
// A Copier copies a source object to a destination.
|
||||
type Copier struct {
|
||||
// ObjectAttrs are optional attributes to set on the destination object.
|
||||
// Any attributes must be initialized before any calls on the Copier. Nil
|
||||
// or zero-valued attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
// RewriteToken can be set before calling Run to resume a copy
|
||||
// operation. After Run returns a non-nil error, RewriteToken will
|
||||
// have been updated to contain the value needed to resume the copy.
|
||||
RewriteToken string
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
|
||||
// operation. If ProgressFunc is not nil and copying requires multiple
|
||||
// calls to the underlying service (see
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
|
||||
// ProgressFunc will be invoked after each call with the number of bytes of
|
||||
// content copied so far and the total size in bytes of the source object.
|
||||
//
|
||||
// ProgressFunc is intended to make upload progress available to the
|
||||
// application. For example, the implementation of ProgressFunc may update
|
||||
// a progress bar in the application's UI, or log the result of
|
||||
// float64(copiedBytes)/float64(totalBytes).
|
||||
//
|
||||
// ProgressFunc should return quickly without blocking.
|
||||
ProgressFunc func(copiedBytes, totalBytes uint64)
|
||||
|
||||
dst, src *ObjectHandle
|
||||
}
|
||||
|
||||
// Run performs the copy.
|
||||
func (c *Copier) Run(ctx context.Context) (_ *ObjectAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if err := c.src.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Convert destination attributes to raw form, omitting the bucket.
|
||||
// If the bucket is included but name or content-type aren't, the service
|
||||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
||||
// does not cause any problems.
|
||||
rawObject := c.ObjectAttrs.toRawObject("")
|
||||
for {
|
||||
res, err := c.callRewrite(ctx, rawObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.ProgressFunc != nil {
|
||||
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
|
||||
}
|
||||
if res.Done { // Finished successfully.
|
||||
return newObject(res.Resource), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
|
||||
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
|
||||
|
||||
call.Context(ctx).Projection("full")
|
||||
if c.RewriteToken != "" {
|
||||
call.RewriteToken(c.RewriteToken)
|
||||
}
|
||||
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
} else if c.src.userProject != "" {
|
||||
call.UserProject(c.src.userProject)
|
||||
}
|
||||
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res *raw.RewriteResponse
|
||||
var err error
|
||||
setClientHeader(call.Header())
|
||||
err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.RewriteToken = res.RewriteToken
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ComposerFrom creates a Composer that can compose srcs into dst.
|
||||
// You can immediately call Run on the returned Composer, or you can
|
||||
// configure it first.
|
||||
//
|
||||
// The encryption key for the destination object will be used to decrypt all
|
||||
// source objects and encrypt the destination object. It is an error
|
||||
// to specify an encryption key for any of the source objects.
|
||||
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
|
||||
return &Composer{dst: dst, srcs: srcs}
|
||||
}
|
||||
|
||||
// A Composer composes source objects into a destination object.
|
||||
//
|
||||
// For Requester Pays buckets, the user project of dst is billed.
|
||||
type Composer struct {
|
||||
// ObjectAttrs are optional attributes to set on the destination object.
|
||||
// Any attributes must be initialized before any calls on the Composer. Nil
|
||||
// or zero-valued attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
dst *ObjectHandle
|
||||
srcs []*ObjectHandle
|
||||
}
|
||||
|
||||
// Run performs the compose operation.
|
||||
func (c *Composer) Run(ctx context.Context) (_ *ObjectAttrs, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if err := c.dst.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(c.srcs) == 0 {
|
||||
return nil, errors.New("storage: at least one source object must be specified")
|
||||
}
|
||||
|
||||
req := &raw.ComposeRequest{}
|
||||
// Compose requires a non-empty Destination, so we always set it,
|
||||
// even if the caller-provided ObjectAttrs is the zero value.
|
||||
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
|
||||
for _, src := range c.srcs {
|
||||
if err := src.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if src.bucket != c.dst.bucket {
|
||||
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
|
||||
}
|
||||
if src.encryptionKey != nil {
|
||||
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
|
||||
}
|
||||
srcObj := &raw.ComposeRequestSourceObjects{
|
||||
Name: src.object,
|
||||
}
|
||||
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.SourceObjects = append(req.SourceObjects, srcObj)
|
||||
}
|
||||
|
||||
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
|
||||
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.dst.userProject != "" {
|
||||
call.UserProject(c.dst.userProject)
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var obj *raw.Object
|
||||
setClientHeader(call.Header())
|
||||
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject(obj), nil
|
||||
}
|
||||
|
|
@ -0,0 +1,167 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package storage provides an easy way to work with Google Cloud Storage.
|
||||
Google Cloud Storage stores data in named objects, which are grouped into buckets.
|
||||
|
||||
More information about Google Cloud Storage is available at
|
||||
https://cloud.google.com/storage/docs.
|
||||
|
||||
All of the methods of this package use exponential backoff to retry calls
|
||||
that fail with certain errors, as described in
|
||||
https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
|
||||
|
||||
Creating a Client
|
||||
|
||||
To start working with this package, create a client:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
The client will use your default application credentials.
|
||||
|
||||
If you only wish to access public data, you can create
|
||||
an unauthenticated client with
|
||||
|
||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||
|
||||
Buckets
|
||||
|
||||
A Google Cloud Storage bucket is a collection of objects. To work with a
|
||||
bucket, make a bucket handle:
|
||||
|
||||
bkt := client.Bucket(bucketName)
|
||||
|
||||
A handle is a reference to a bucket. You can have a handle even if the
|
||||
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
|
||||
call Create on the handle:
|
||||
|
||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Note that although buckets are associated with projects, bucket names are
|
||||
global across all projects.
|
||||
|
||||
Each bucket has associated metadata, represented in this package by
|
||||
BucketAttrs. The third argument to BucketHandle.Create allows you to set
|
||||
the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
|
||||
Attrs:
|
||||
|
||||
attrs, err := bkt.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
||||
|
||||
Objects
|
||||
|
||||
An object holds arbitrary data as a sequence of bytes, like a file. You
|
||||
refer to objects using a handle, just as with buckets, but unlike buckets
|
||||
you don't explicitly create an object. Instead, the first time you write
|
||||
to an object it will be created. You can use the standard Go io.Reader
|
||||
and io.Writer interfaces to read and write object data:
|
||||
|
||||
obj := bkt.Object("data")
|
||||
// Write something to obj.
|
||||
// w implements io.Writer.
|
||||
w := obj.NewWriter(ctx)
|
||||
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Close, just like writing a file.
|
||||
if err := w.Close(); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
r, err := obj.NewReader(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
defer r.Close()
|
||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Prints "This object contains text."
|
||||
|
||||
Objects also have attributes, which you can fetch with Attrs:
|
||||
|
||||
objAttrs, err := obj.Attrs(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
||||
|
||||
ACLs
|
||||
|
||||
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
|
||||
ACLRules, each of which specifies the role of a user, group or project. ACLs
|
||||
are suitable for fine-grained control, but you may prefer using IAM to control
|
||||
access at the project level (see
|
||||
https://cloud.google.com/storage/docs/access-control/iam).
|
||||
|
||||
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
|
||||
|
||||
acls, err := obj.ACL().List(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, rule := range acls {
|
||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
||||
}
|
||||
|
||||
You can also set and delete ACLs.
|
||||
|
||||
Conditions
|
||||
|
||||
Every object has a generation and a metageneration. The generation changes
|
||||
whenever the content changes, and the metageneration changes whenever the
|
||||
metadata changes. Conditions let you check these values before an operation;
|
||||
the operation only executes if the conditions match. You can use conditions to
|
||||
prevent race conditions in read-modify-write operations.
|
||||
|
||||
For example, say you've read an object's metadata into objAttrs. Now
|
||||
you want to write to that object, but only if its contents haven't changed
|
||||
since you read it. Here is how to express that:
|
||||
|
||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
||||
// Proceed with writing as above.
|
||||
|
||||
Signed URLs
|
||||
|
||||
You can obtain a URL that lets anyone read or write an object for a limited time.
|
||||
You don't need to create a client to do this. See the documentation of
|
||||
SignedURL for details.
|
||||
|
||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(url)
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
*/
|
||||
package storage // import "cloud.google.com/go/storage"
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.10
|
||||
|
||||
package storage
|
||||
|
||||
import "google.golang.org/api/googleapi"
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
switch e := err.(type) {
|
||||
case *googleapi.Error:
|
||||
// Retry on 429 and 5xx, according to
|
||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func withContext(r *http.Request, ctx context.Context) *http.Request {
|
||||
return r.WithContext(ctx)
|
||||
}
|
||||
|
|
@ -0,0 +1,129 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/iam"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
)
|
||||
|
||||
// IAM provides access to IAM access control for the bucket.
|
||||
func (b *BucketHandle) IAM() *iam.Handle {
|
||||
return iam.InternalNewHandleClient(&iamClient{
|
||||
raw: b.c.raw,
|
||||
userProject: b.userProject,
|
||||
}, b.name)
|
||||
}
|
||||
|
||||
// iamClient implements the iam.client interface.
|
||||
type iamClient struct {
|
||||
raw *raw.Service
|
||||
userProject string
|
||||
}
|
||||
|
||||
func (c *iamClient) Get(ctx context.Context, resource string) (_ *iampb.Policy, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := c.raw.Buckets.GetIamPolicy(resource)
|
||||
setClientHeader(call.Header())
|
||||
if c.userProject != "" {
|
||||
call.UserProject(c.userProject)
|
||||
}
|
||||
var rp *raw.Policy
|
||||
err = runWithRetry(ctx, func() error {
|
||||
rp, err = call.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return iamFromStoragePolicy(rp), nil
|
||||
}
|
||||
|
||||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
rp := iamToStoragePolicy(p)
|
||||
call := c.raw.Buckets.SetIamPolicy(resource, rp)
|
||||
setClientHeader(call.Header())
|
||||
if c.userProject != "" {
|
||||
call.UserProject(c.userProject)
|
||||
}
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := call.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (_ []string, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := c.raw.Buckets.TestIamPermissions(resource, perms)
|
||||
setClientHeader(call.Header())
|
||||
if c.userProject != "" {
|
||||
call.UserProject(c.userProject)
|
||||
}
|
||||
var res *raw.TestIamPermissionsResponse
|
||||
err = runWithRetry(ctx, func() error {
|
||||
res, err = call.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res.Permissions, nil
|
||||
}
|
||||
|
||||
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
|
||||
return &raw.Policy{
|
||||
Bindings: iamToStorageBindings(ip.Bindings),
|
||||
Etag: string(ip.Etag),
|
||||
}
|
||||
}
|
||||
|
||||
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings {
|
||||
var rbs []*raw.PolicyBindings
|
||||
for _, ib := range ibs {
|
||||
rbs = append(rbs, &raw.PolicyBindings{
|
||||
Role: ib.Role,
|
||||
Members: ib.Members,
|
||||
})
|
||||
}
|
||||
return rbs
|
||||
}
|
||||
|
||||
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy {
|
||||
return &iampb.Policy{
|
||||
Bindings: iamFromStorageBindings(rp.Bindings),
|
||||
Etag: []byte(rp.Etag),
|
||||
}
|
||||
}
|
||||
|
||||
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding {
|
||||
var ibs []*iampb.Binding
|
||||
for _, rb := range rbs {
|
||||
ibs = append(ibs, &iampb.Binding{
|
||||
Role: rb.Role,
|
||||
Members: rb.Members,
|
||||
})
|
||||
}
|
||||
return ibs
|
||||
}
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/internal"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
// the context is done.
|
||||
func runWithRetry(ctx context.Context, call func() error) error {
|
||||
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
||||
err = call()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if shouldRetry(err) {
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
})
|
||||
}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.10
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func shouldRetry(err error) bool {
|
||||
switch e := err.(type) {
|
||||
case *googleapi.Error:
|
||||
// Retry on 429 and 5xx, according to
|
||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
||||
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
||||
case *url.Error:
|
||||
// Retry on REFUSED_STREAM.
|
||||
// Unfortunately the error type is unexported, so we resort to string
|
||||
// matching.
|
||||
return strings.Contains(e.Error(), "REFUSED_STREAM")
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func withContext(r *http.Request, _ interface{}) *http.Request {
|
||||
// In Go 1.6 and below, ignore the context.
|
||||
return r
|
||||
}
|
||||
|
|
@ -0,0 +1,188 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// A Notification describes how to send Cloud PubSub messages when certain
|
||||
// events occur in a bucket.
|
||||
type Notification struct {
|
||||
//The ID of the notification.
|
||||
ID string
|
||||
|
||||
// The ID of the topic to which this subscription publishes.
|
||||
TopicID string
|
||||
|
||||
// The ID of the project to which the topic belongs.
|
||||
TopicProjectID string
|
||||
|
||||
// Only send notifications about listed event types. If empty, send notifications
|
||||
// for all event types.
|
||||
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
|
||||
EventTypes []string
|
||||
|
||||
// If present, only apply this notification configuration to object names that
|
||||
// begin with this prefix.
|
||||
ObjectNamePrefix string
|
||||
|
||||
// An optional list of additional attributes to attach to each Cloud PubSub
|
||||
// message published for this notification subscription.
|
||||
CustomAttributes map[string]string
|
||||
|
||||
// The contents of the message payload.
|
||||
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
|
||||
PayloadFormat string
|
||||
}
|
||||
|
||||
// Values for Notification.PayloadFormat.
|
||||
const (
|
||||
// Send no payload with notification messages.
|
||||
NoPayload = "NONE"
|
||||
|
||||
// Send object metadata as JSON with notification messages.
|
||||
JSONPayload = "JSON_API_V1"
|
||||
)
|
||||
|
||||
// Values for Notification.EventTypes.
|
||||
const (
|
||||
// Event that occurs when an object is successfully created.
|
||||
ObjectFinalizeEvent = "OBJECT_FINALIZE"
|
||||
|
||||
// Event that occurs when the metadata of an existing object changes.
|
||||
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE"
|
||||
|
||||
// Event that occurs when an object is permanently deleted.
|
||||
ObjectDeleteEvent = "OBJECT_DELETE"
|
||||
|
||||
// Event that occurs when the live version of an object becomes an
|
||||
// archived version.
|
||||
ObjectArchiveEvent = "OBJECT_ARCHIVE"
|
||||
)
|
||||
|
||||
func toNotification(rn *raw.Notification) *Notification {
|
||||
n := &Notification{
|
||||
ID: rn.Id,
|
||||
EventTypes: rn.EventTypes,
|
||||
ObjectNamePrefix: rn.ObjectNamePrefix,
|
||||
CustomAttributes: rn.CustomAttributes,
|
||||
PayloadFormat: rn.PayloadFormat,
|
||||
}
|
||||
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic)
|
||||
return n
|
||||
}
|
||||
|
||||
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
|
||||
|
||||
// parseNotificationTopic extracts the project and topic IDs from from the full
|
||||
// resource name returned by the service. If the name is malformed, it returns
|
||||
// "?" for both IDs.
|
||||
func parseNotificationTopic(nt string) (projectID, topicID string) {
|
||||
matches := topicRE.FindStringSubmatch(nt)
|
||||
if matches == nil {
|
||||
return "?", "?"
|
||||
}
|
||||
return matches[1], matches[2]
|
||||
}
|
||||
|
||||
func toRawNotification(n *Notification) *raw.Notification {
|
||||
return &raw.Notification{
|
||||
Id: n.ID,
|
||||
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
|
||||
n.TopicProjectID, n.TopicID),
|
||||
EventTypes: n.EventTypes,
|
||||
ObjectNamePrefix: n.ObjectNamePrefix,
|
||||
CustomAttributes: n.CustomAttributes,
|
||||
PayloadFormat: string(n.PayloadFormat),
|
||||
}
|
||||
}
|
||||
|
||||
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
|
||||
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
|
||||
// returned Notification's ID can be used to refer to it.
|
||||
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (_ *Notification, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if n.ID != "" {
|
||||
return nil, errors.New("storage: AddNotification: ID must not be set")
|
||||
}
|
||||
if n.TopicProjectID == "" {
|
||||
return nil, errors.New("storage: AddNotification: missing TopicProjectID")
|
||||
}
|
||||
if n.TopicID == "" {
|
||||
return nil, errors.New("storage: AddNotification: missing TopicID")
|
||||
}
|
||||
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n))
|
||||
setClientHeader(call.Header())
|
||||
if b.userProject != "" {
|
||||
call.UserProject(b.userProject)
|
||||
}
|
||||
rn, err := call.Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return toNotification(rn), nil
|
||||
}
|
||||
|
||||
// Notifications returns all the Notifications configured for this bucket, as a map
|
||||
// indexed by notification ID.
|
||||
func (b *BucketHandle) Notifications(ctx context.Context) (_ map[string]*Notification, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := b.c.raw.Notifications.List(b.name)
|
||||
setClientHeader(call.Header())
|
||||
if b.userProject != "" {
|
||||
call.UserProject(b.userProject)
|
||||
}
|
||||
var res *raw.Notifications
|
||||
err = runWithRetry(ctx, func() error {
|
||||
res, err = call.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return notificationsToMap(res.Items), nil
|
||||
}
|
||||
|
||||
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
|
||||
m := map[string]*Notification{}
|
||||
for _, rn := range rns {
|
||||
m[rn.Id] = toNotification(rn)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// DeleteNotification deletes the notification with the given ID.
|
||||
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
call := b.c.raw.Notifications.Delete(b.name, id)
|
||||
setClientHeader(call.Header())
|
||||
if b.userProject != "" {
|
||||
call.UserProject(b.userProject)
|
||||
}
|
||||
return call.Context(ctx).Do()
|
||||
}
|
||||
|
|
@ -0,0 +1,245 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// NewReader creates a new Reader to read the contents of the
|
||||
// object.
|
||||
// ErrObjectNotExist will be returned if the object is not found.
|
||||
//
|
||||
// The caller must call Close on the returned Reader when done reading.
|
||||
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
|
||||
return o.NewRangeReader(ctx, 0, -1)
|
||||
}
|
||||
|
||||
// NewRangeReader reads part of an object, reading at most length bytes
|
||||
// starting at the given offset. If length is negative, the object is read
|
||||
// until the end.
|
||||
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (_ *Reader, err error) {
|
||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
|
||||
defer func() { trace.EndSpan(ctx, err) }()
|
||||
|
||||
if err := o.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if offset < 0 {
|
||||
return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
|
||||
}
|
||||
if o.conds != nil {
|
||||
if err := o.conds.validate("NewRangeReader"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
u := &url.URL{
|
||||
Scheme: "https",
|
||||
Host: "storage.googleapis.com",
|
||||
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
|
||||
RawQuery: conditionsQuery(o.gen, o.conds),
|
||||
}
|
||||
verb := "GET"
|
||||
if length == 0 {
|
||||
verb = "HEAD"
|
||||
}
|
||||
req, err := http.NewRequest(verb, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = withContext(req, ctx)
|
||||
if length < 0 && offset > 0 {
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
|
||||
} else if length > 0 {
|
||||
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
|
||||
}
|
||||
if o.userProject != "" {
|
||||
req.Header.Set("X-Goog-User-Project", o.userProject)
|
||||
}
|
||||
if o.readCompressed {
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
}
|
||||
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var res *http.Response
|
||||
err = runWithRetry(ctx, func() error {
|
||||
res, err = o.c.hc.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
res.Body.Close()
|
||||
return ErrObjectNotExist
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
body, _ := ioutil.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return &googleapi.Error{
|
||||
Code: res.StatusCode,
|
||||
Header: res.Header,
|
||||
Body: string(body),
|
||||
}
|
||||
}
|
||||
if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
|
||||
res.Body.Close()
|
||||
return errors.New("storage: partial request not satisfied")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var size int64 // total size of object, even if a range was requested.
|
||||
if res.StatusCode == http.StatusPartialContent {
|
||||
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
|
||||
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||
}
|
||||
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
|
||||
}
|
||||
} else {
|
||||
size = res.ContentLength
|
||||
}
|
||||
|
||||
remain := res.ContentLength
|
||||
body := res.Body
|
||||
if length == 0 {
|
||||
remain = 0
|
||||
body.Close()
|
||||
body = emptyBody
|
||||
}
|
||||
var (
|
||||
checkCRC bool
|
||||
crc uint32
|
||||
)
|
||||
// Even if there is a CRC header, we can't compute the hash on partial data.
|
||||
if remain == size {
|
||||
crc, checkCRC = parseCRC32c(res)
|
||||
}
|
||||
return &Reader{
|
||||
body: body,
|
||||
size: size,
|
||||
remain: remain,
|
||||
contentType: res.Header.Get("Content-Type"),
|
||||
contentEncoding: res.Header.Get("Content-Encoding"),
|
||||
cacheControl: res.Header.Get("Cache-Control"),
|
||||
wantCRC: crc,
|
||||
checkCRC: checkCRC,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseCRC32c(res *http.Response) (uint32, bool) {
|
||||
const prefix = "crc32c="
|
||||
for _, spec := range res.Header["X-Goog-Hash"] {
|
||||
if strings.HasPrefix(spec, prefix) {
|
||||
c, err := decodeUint32(spec[len(prefix):])
|
||||
if err == nil {
|
||||
return c, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
|
||||
|
||||
// Reader reads a Cloud Storage object.
|
||||
// It implements io.Reader.
|
||||
//
|
||||
// Typically, a Reader computes the CRC of the downloaded content and compares it to
|
||||
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
|
||||
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
|
||||
type Reader struct {
|
||||
body io.ReadCloser
|
||||
remain, size int64
|
||||
contentType string
|
||||
contentEncoding string
|
||||
cacheControl string
|
||||
checkCRC bool // should we check the CRC?
|
||||
wantCRC uint32 // the CRC32c value the server sent in the header
|
||||
gotCRC uint32 // running crc
|
||||
checkedCRC bool // did we check the CRC? (For tests.)
|
||||
}
|
||||
|
||||
// Close closes the Reader. It must be called when done reading.
|
||||
func (r *Reader) Close() error {
|
||||
return r.body.Close()
|
||||
}
|
||||
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := r.body.Read(p)
|
||||
if r.remain != -1 {
|
||||
r.remain -= int64(n)
|
||||
}
|
||||
if r.checkCRC {
|
||||
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
|
||||
// Check CRC here. It would be natural to check it in Close, but
|
||||
// everybody defers Close on the assumption that it doesn't return
|
||||
// anything worth looking at.
|
||||
if r.remain == 0 { // Only check if we have Content-Length.
|
||||
r.checkedCRC = true
|
||||
if r.gotCRC != r.wantCRC {
|
||||
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
|
||||
r.gotCRC, r.wantCRC)
|
||||
}
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Size returns the size of the object in bytes.
|
||||
// The returned value is always the same and is not affected by
|
||||
// calls to Read or Close.
|
||||
func (r *Reader) Size() int64 {
|
||||
return r.size
|
||||
}
|
||||
|
||||
// Remain returns the number of bytes left to read, or -1 if unknown.
|
||||
func (r *Reader) Remain() int64 {
|
||||
return r.remain
|
||||
}
|
||||
|
||||
// ContentType returns the content type of the object.
|
||||
func (r *Reader) ContentType() string {
|
||||
return r.contentType
|
||||
}
|
||||
|
||||
// ContentEncoding returns the content encoding of the object.
|
||||
func (r *Reader) ContentEncoding() string {
|
||||
return r.contentEncoding
|
||||
}
|
||||
|
||||
// CacheControl returns the cache control of the object.
|
||||
func (r *Reader) CacheControl() string {
|
||||
return r.cacheControl
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,218 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
raw "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
// A Writer writes a Cloud Storage object.
|
||||
type Writer struct {
|
||||
// ObjectAttrs are optional attributes to set on the object. Any attributes
|
||||
// must be initialized before the first Write call. Nil or zero-valued
|
||||
// attributes are ignored.
|
||||
ObjectAttrs
|
||||
|
||||
// SendCRC specifies whether to transmit a CRC32C field. It should be set
|
||||
// to true in addition to setting the Writer's CRC32C field, because zero
|
||||
// is a valid CRC and normally a zero would not be transmitted.
|
||||
// If a CRC32C is sent, and the data written does not match the checksum,
|
||||
// the write will be rejected.
|
||||
SendCRC32C bool
|
||||
|
||||
// ChunkSize controls the maximum number of bytes of the object that the
|
||||
// Writer will attempt to send to the server in a single request. Objects
|
||||
// smaller than the size will be sent in a single request, while larger
|
||||
// objects will be split over multiple requests. The size will be rounded up
|
||||
// to the nearest multiple of 256K. If zero, chunking will be disabled and
|
||||
// the object will be uploaded in a single request.
|
||||
//
|
||||
// ChunkSize will default to a reasonable value. If you perform many concurrent
|
||||
// writes of small objects, you may wish set ChunkSize to a value that matches
|
||||
// your objects' sizes to avoid consuming large amounts of memory.
|
||||
//
|
||||
// ChunkSize must be set before the first Write call.
|
||||
ChunkSize int
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a large write.
|
||||
// operation. If ProgressFunc is not nil and writing requires multiple
|
||||
// calls to the underlying service (see
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
|
||||
// then ProgressFunc will be invoked after each call with the number of bytes of
|
||||
// content copied so far.
|
||||
//
|
||||
// ProgressFunc should return quickly without blocking.
|
||||
ProgressFunc func(int64)
|
||||
|
||||
ctx context.Context
|
||||
o *ObjectHandle
|
||||
|
||||
opened bool
|
||||
pw *io.PipeWriter
|
||||
|
||||
donec chan struct{} // closed after err and obj are set.
|
||||
obj *ObjectAttrs
|
||||
|
||||
mu sync.Mutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (w *Writer) open() error {
|
||||
attrs := w.ObjectAttrs
|
||||
// Check the developer didn't change the object Name (this is unfortunate, but
|
||||
// we don't want to store an object under the wrong name).
|
||||
if attrs.Name != w.o.object {
|
||||
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
|
||||
}
|
||||
if !utf8.ValidString(attrs.Name) {
|
||||
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
w.pw = pw
|
||||
w.opened = true
|
||||
|
||||
if w.ChunkSize < 0 {
|
||||
return errors.New("storage: Writer.ChunkSize must be non-negative")
|
||||
}
|
||||
mediaOpts := []googleapi.MediaOption{
|
||||
googleapi.ChunkSize(w.ChunkSize),
|
||||
}
|
||||
if c := attrs.ContentType; c != "" {
|
||||
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(w.donec)
|
||||
|
||||
rawObj := attrs.toRawObject(w.o.bucket)
|
||||
if w.SendCRC32C {
|
||||
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
|
||||
}
|
||||
if w.MD5 != nil {
|
||||
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
|
||||
}
|
||||
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
|
||||
Media(pr, mediaOpts...).
|
||||
Projection("full").
|
||||
Context(w.ctx)
|
||||
if w.ProgressFunc != nil {
|
||||
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
|
||||
w.mu.Lock()
|
||||
w.err = err
|
||||
w.mu.Unlock()
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
var resp *raw.Object
|
||||
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
|
||||
if err == nil {
|
||||
if w.o.userProject != "" {
|
||||
call.UserProject(w.o.userProject)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
// If the chunk size is zero, then no chunking is done on the Reader,
|
||||
// which means we cannot retry: the first call will read the data, and if
|
||||
// it fails, there is no way to re-read.
|
||||
if w.ChunkSize == 0 {
|
||||
resp, err = call.Do()
|
||||
} else {
|
||||
// We will only retry here if the initial POST, which obtains a URI for
|
||||
// the resumable upload, fails with a retryable error. The upload itself
|
||||
// has its own retry logic.
|
||||
err = runWithRetry(w.ctx, func() error {
|
||||
var err2 error
|
||||
resp, err2 = call.Do()
|
||||
return err2
|
||||
})
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
w.mu.Lock()
|
||||
w.err = err
|
||||
w.mu.Unlock()
|
||||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
w.obj = newObject(resp)
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write appends to w. It implements the io.Writer interface.
|
||||
//
|
||||
// Since writes happen asynchronously, Write may return a nil
|
||||
// error even though the write failed (or will fail). Always
|
||||
// use the error returned from Writer.Close to determine if
|
||||
// the upload was successful.
|
||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
w.mu.Lock()
|
||||
werr := w.err
|
||||
w.mu.Unlock()
|
||||
if werr != nil {
|
||||
return 0, werr
|
||||
}
|
||||
if !w.opened {
|
||||
if err := w.open(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return w.pw.Write(p)
|
||||
}
|
||||
|
||||
// Close completes the write operation and flushes any buffered data.
|
||||
// If Close doesn't return an error, metadata about the written object
|
||||
// can be retrieved by calling Attrs.
|
||||
func (w *Writer) Close() error {
|
||||
if !w.opened {
|
||||
if err := w.open(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := w.pw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
<-w.donec
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.err
|
||||
}
|
||||
|
||||
// CloseWithError aborts the write operation with the provided error.
|
||||
// CloseWithError always returns nil.
|
||||
//
|
||||
// Deprecated: cancel the context passed to NewWriter instead.
|
||||
func (w *Writer) CloseWithError(err error) error {
|
||||
if !w.opened {
|
||||
return nil
|
||||
}
|
||||
return w.pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// Attrs returns metadata about a successfully-written object.
|
||||
// It's only valid to call it after Close returns nil.
|
||||
func (w *Writer) Attrs() *ObjectAttrs {
|
||||
return w.obj
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,92 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
||||
if format == OverlayWhiteoutFormat {
|
||||
return overlayWhiteoutConverter{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type overlayWhiteoutConverter struct{}
|
||||
|
||||
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
|
||||
// convert whiteouts to AUFS format
|
||||
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
|
||||
// we just rename the file and make it normal
|
||||
dir, filename := filepath.Split(hdr.Name)
|
||||
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
|
||||
hdr.Mode = 0600
|
||||
hdr.Typeflag = tar.TypeReg
|
||||
hdr.Size = 0
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeDir != 0 {
|
||||
// convert opaque dirs to AUFS format by writing an empty file with the prefix
|
||||
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||
if hdr.Xattrs != nil {
|
||||
delete(hdr.Xattrs, "trusted.overlay.opaque")
|
||||
}
|
||||
|
||||
// create a header for the whiteout file
|
||||
// it should inherit some properties from the parent, but be a regular file
|
||||
wo = &tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: hdr.Mode & int64(os.ModePerm),
|
||||
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
|
||||
Size: 0,
|
||||
Uid: hdr.Uid,
|
||||
Uname: hdr.Uname,
|
||||
Gid: hdr.Gid,
|
||||
Gname: hdr.Gname,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
|
||||
base := filepath.Base(path)
|
||||
dir := filepath.Dir(path)
|
||||
|
||||
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
|
||||
if base == WhiteoutOpaqueDir {
|
||||
err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
|
||||
// don't write the file itself
|
||||
return false, err
|
||||
}
|
||||
|
||||
// if a file was deleted and we are using overlay, we need to create a character device
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
|
||||
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// don't write the file itself
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
// +build !linux
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,114 @@
|
|||
// +build !windows
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
return srcPath
|
||||
}
|
||||
|
||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||
// We use a separate function as this is platform specific. On Linux, we
|
||||
// can't use filepath.Join(srcPath,include) because this will clean away
|
||||
// a trailing "." or "/" which may be important.
|
||||
func getWalkRoot(srcPath string, include string) string {
|
||||
return srcPath + string(filepath.Separator) + include
|
||||
}
|
||||
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) (string, error) {
|
||||
return p, nil // already unix-style
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
// on the platform the archival is done.
|
||||
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
// Currently go does not fill in the major/minors
|
||||
if s.Mode&unix.S_IFBLK != 0 ||
|
||||
s.Mode&unix.S_IFCHR != 0 {
|
||||
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
|
||||
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
inode = s.Ino
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if !ok {
|
||||
return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||
}
|
||||
return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
if rsystem.RunningInUserNS() {
|
||||
// cannot create a device if running in user namespace
|
||||
return nil
|
||||
}
|
||||
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= unix.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= unix.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= unix.S_IFIFO
|
||||
}
|
||||
|
||||
return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
return longpath.AddPrefix(srcPath)
|
||||
}
|
||||
|
||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||
// We use a separate function as this is platform specific.
|
||||
func getWalkRoot(srcPath string, include string) string {
|
||||
return filepath.Join(srcPath, include)
|
||||
}
|
||||
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) (string, error) {
|
||||
// windows: convert windows style relative path with backslashes
|
||||
// into forward slashes. Since windows does not allow '/' or '\'
|
||||
// in file names, it is mostly safe to replace however we must
|
||||
// check just in case
|
||||
if strings.Contains(p, "/") {
|
||||
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
|
||||
}
|
||||
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
||||
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
// on the platform the archival is done.
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
|
||||
permPart := perm & os.ModePerm
|
||||
noPermPart := perm &^ os.ModePerm
|
||||
// Add the x bit: make everything +x from windows
|
||||
permPart |= 0111
|
||||
permPart &= 0755
|
||||
|
||||
return noPermPart | permPart
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||
// do nothing. no notion of Rdev, Nlink in stat on Windows
|
||||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
// do nothing. no notion of Inode in stat on Windows
|
||||
return
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||
// no notion of file ownership mapping yet on Windows
|
||||
return idtools.IDPair{0, 0}, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,441 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ChangeType represents the change type.
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
// ChangeModify represents the modify operation.
|
||||
ChangeModify = iota
|
||||
// ChangeAdd represents the add operation.
|
||||
ChangeAdd
|
||||
// ChangeDelete represents the delete operation.
|
||||
ChangeDelete
|
||||
)
|
||||
|
||||
func (c ChangeType) String() string {
|
||||
switch c {
|
||||
case ChangeModify:
|
||||
return "C"
|
||||
case ChangeAdd:
|
||||
return "A"
|
||||
case ChangeDelete:
|
||||
return "D"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Change represents a change, it wraps the change type and path.
|
||||
// It describes changes of the files in the path respect to the
|
||||
// parent layers. The change could be modify, add, delete.
|
||||
// This is used for layer diff.
|
||||
type Change struct {
|
||||
Path string
|
||||
Kind ChangeType
|
||||
}
|
||||
|
||||
func (change *Change) String() string {
|
||||
return fmt.Sprintf("%s %s", change.Kind, change.Path)
|
||||
}
|
||||
|
||||
// for sort.Sort
|
||||
type changesByPath []Change
|
||||
|
||||
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
||||
func (c changesByPath) Len() int { return len(c) }
|
||||
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||
|
||||
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||
// precision, which is problematic when we apply changes via tar
|
||||
// files, we handle this by comparing for exact times, *or* same
|
||||
// second count and either a or b having exactly 0 nanoseconds
|
||||
func sameFsTime(a, b time.Time) bool {
|
||||
return a == b ||
|
||||
(a.Unix() == b.Unix() &&
|
||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||
}
|
||||
|
||||
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||
return a.Sec == b.Sec &&
|
||||
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
||||
}
|
||||
|
||||
// Changes walks the path rw and determines changes for the files in the path,
|
||||
// with respect to the parent layers
|
||||
func Changes(layers []string, rw string) ([]Change, error) {
|
||||
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
|
||||
}
|
||||
|
||||
func aufsMetadataSkip(path string) (skip bool, err error) {
|
||||
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
|
||||
if err != nil {
|
||||
skip = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||
f := filepath.Base(path)
|
||||
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(f, WhiteoutPrefix) {
|
||||
originalFile := f[len(WhiteoutPrefix):]
|
||||
return filepath.Join(filepath.Dir(path), originalFile), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
type skipChange func(string) (bool, error)
|
||||
type deleteChange func(string, string, os.FileInfo) (string, error)
|
||||
|
||||
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
|
||||
var (
|
||||
changes []Change
|
||||
changedDirs = make(map[string]struct{})
|
||||
)
|
||||
|
||||
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(rw, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
path = filepath.Join(string(os.PathSeparator), path)
|
||||
|
||||
// Skip root
|
||||
if path == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if sc != nil {
|
||||
if skip, err := sc(path); skip {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
change := Change{
|
||||
Path: path,
|
||||
}
|
||||
|
||||
deletedFile, err := dc(rw, path, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find out what kind of modification happened
|
||||
if deletedFile != "" {
|
||||
change.Path = deletedFile
|
||||
change.Kind = ChangeDelete
|
||||
} else {
|
||||
// Otherwise, the file was added
|
||||
change.Kind = ChangeAdd
|
||||
|
||||
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||
for _, layer := range layers {
|
||||
stat, err := os.Stat(filepath.Join(layer, path))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
// The file existed in the top layer, so that's a modification
|
||||
|
||||
// However, if it's a directory, maybe it wasn't actually modified.
|
||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||
if stat.IsDir() && f.IsDir() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||
// Both directories are the same, don't record the change
|
||||
return nil
|
||||
}
|
||||
}
|
||||
change.Kind = ChangeModify
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
||||
// This block is here to ensure the change is recorded even if the
|
||||
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
|
||||
// Check https://github.com/docker/docker/pull/13590 for details.
|
||||
if f.IsDir() {
|
||||
changedDirs[path] = struct{}{}
|
||||
}
|
||||
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
|
||||
parent := filepath.Dir(path)
|
||||
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
||||
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
|
||||
changedDirs[parent] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Record change
|
||||
changes = append(changes, change)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// FileInfo describes the information of a file.
|
||||
type FileInfo struct {
|
||||
parent *FileInfo
|
||||
name string
|
||||
stat *system.StatT
|
||||
children map[string]*FileInfo
|
||||
capability []byte
|
||||
added bool
|
||||
}
|
||||
|
||||
// LookUp looks up the file information of a file.
|
||||
func (info *FileInfo) LookUp(path string) *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
parent := info
|
||||
if path == string(os.PathSeparator) {
|
||||
return info
|
||||
}
|
||||
|
||||
pathElements := strings.Split(path, string(os.PathSeparator))
|
||||
for _, elem := range pathElements {
|
||||
if elem != "" {
|
||||
child := parent.children[elem]
|
||||
if child == nil {
|
||||
return nil
|
||||
}
|
||||
parent = child
|
||||
}
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
func (info *FileInfo) path() string {
|
||||
if info.parent == nil {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
return string(os.PathSeparator)
|
||||
}
|
||||
return filepath.Join(info.parent.path(), info.name)
|
||||
}
|
||||
|
||||
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
|
||||
sizeAtEntry := len(*changes)
|
||||
|
||||
if oldInfo == nil {
|
||||
// add
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeAdd,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
info.added = true
|
||||
}
|
||||
|
||||
// We make a copy so we can modify it to detect additions
|
||||
// also, we only recurse on the old dir if the new info is a directory
|
||||
// otherwise any previous delete/change is considered recursive
|
||||
oldChildren := make(map[string]*FileInfo)
|
||||
if oldInfo != nil && info.isDir() {
|
||||
for k, v := range oldInfo.children {
|
||||
oldChildren[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for name, newChild := range info.children {
|
||||
oldChild := oldChildren[name]
|
||||
if oldChild != nil {
|
||||
// change?
|
||||
oldStat := oldChild.stat
|
||||
newStat := newChild.stat
|
||||
// Note: We can't compare inode or ctime or blocksize here, because these change
|
||||
// when copying a file into a container. However, that is not generally a problem
|
||||
// because any content change will change mtime, and any status change should
|
||||
// be visible when actually comparing the stat fields. The only time this
|
||||
// breaks down is if some code intentionally hides a change by setting
|
||||
// back mtime
|
||||
if statDifferent(oldStat, newStat) ||
|
||||
!bytes.Equal(oldChild.capability, newChild.capability) {
|
||||
change := Change{
|
||||
Path: newChild.path(),
|
||||
Kind: ChangeModify,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
newChild.added = true
|
||||
}
|
||||
|
||||
// Remove from copy so we can detect deletions
|
||||
delete(oldChildren, name)
|
||||
}
|
||||
|
||||
newChild.addChanges(oldChild, changes)
|
||||
}
|
||||
for _, oldChild := range oldChildren {
|
||||
// delete
|
||||
change := Change{
|
||||
Path: oldChild.path(),
|
||||
Kind: ChangeDelete,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
}
|
||||
|
||||
// If there were changes inside this directory, we need to add it, even if the directory
|
||||
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeModify,
|
||||
}
|
||||
// Let's insert the directory entry before the recently added entries located inside this dir
|
||||
*changes = append(*changes, change) // just to resize the slice, will be overwritten
|
||||
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
||||
(*changes)[sizeAtEntry] = change
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Changes add changes to file information.
|
||||
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||
var changes []Change
|
||||
|
||||
info.addChanges(oldInfo, &changes)
|
||||
|
||||
return changes
|
||||
}
|
||||
|
||||
func newRootFileInfo() *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
root := &FileInfo{
|
||||
name: string(os.PathSeparator),
|
||||
children: make(map[string]*FileInfo),
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
|
||||
// If oldDir is "", then all files in newDir will be Add-Changes.
|
||||
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
)
|
||||
if oldDir == "" {
|
||||
emptyDir, err := ioutil.TempDir("", "empty")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.Remove(emptyDir)
|
||||
oldDir = emptyDir
|
||||
}
|
||||
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newRoot.Changes(oldRoot), nil
|
||||
}
|
||||
|
||||
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
|
||||
func ChangesSize(newDir string, changes []Change) int64 {
|
||||
var (
|
||||
size int64
|
||||
sf = make(map[uint64]struct{})
|
||||
)
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||
file := filepath.Join(newDir, change.Path)
|
||||
fileInfo, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
logrus.Errorf("Can not stat %q: %s", file, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if fileInfo != nil && !fileInfo.IsDir() {
|
||||
if hasHardlinks(fileInfo) {
|
||||
inode := getIno(fileInfo)
|
||||
if _, ok := sf[inode]; !ok {
|
||||
size += fileInfo.Size()
|
||||
sf[inode] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
size += fileInfo.Size()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
||||
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
|
||||
|
||||
// this buffer is needed for the duration of this piped stream
|
||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||
|
||||
sort.Sort(changesByPath(changes))
|
||||
|
||||
// In general we log errors here but ignore them because
|
||||
// during e.g. a diff operation the container can continue
|
||||
// mutating the filesystem and we can see transient errors
|
||||
// from this
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeDelete {
|
||||
whiteOutDir := filepath.Dir(change.Path)
|
||||
whiteOutBase := filepath.Base(change.Path)
|
||||
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
|
||||
timestamp := time.Now()
|
||||
hdr := &tar.Header{
|
||||
Name: whiteOut[1:],
|
||||
Size: 0,
|
||||
ModTime: timestamp,
|
||||
AccessTime: timestamp,
|
||||
ChangeTime: timestamp,
|
||||
}
|
||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||
logrus.Debugf("Can't write whiteout header: %s", err)
|
||||
}
|
||||
} else {
|
||||
path := filepath.Join(dir, change.Path)
|
||||
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
|
||||
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
if err := ta.TarWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close layer: %s", err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
logrus.Debugf("failed close Changes writer: %s", err)
|
||||
}
|
||||
}()
|
||||
return reader, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,313 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
||||
// method in general returns the entire contents of two directory trees, we
|
||||
// optimize some FS calls out on linux. In particular, we take advantage of the
|
||||
// fact that getdents(2) returns the inode of each file in the directory being
|
||||
// walked, which, when walking two trees in parallel to generate a list of
|
||||
// changes, can be used to prune subtrees without ever having to lstat(2) them
|
||||
// directly. Eliminating stat calls in this way can save up to seconds on large
|
||||
// images.
|
||||
type walker struct {
|
||||
dir1 string
|
||||
dir2 string
|
||||
root1 *FileInfo
|
||||
root2 *FileInfo
|
||||
}
|
||||
|
||||
// collectFileInfoForChanges returns a complete representation of the trees
|
||||
// rooted at dir1 and dir2, with one important exception: any subtree or
|
||||
// leaf where the inode and device numbers are an exact match between dir1
|
||||
// and dir2 will be pruned from the results. This method is *only* to be used
|
||||
// to generating a list of changes between the two directories, as it does not
|
||||
// reflect the full contents.
|
||||
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
|
||||
w := &walker{
|
||||
dir1: dir1,
|
||||
dir2: dir2,
|
||||
root1: newRootFileInfo(),
|
||||
root2: newRootFileInfo(),
|
||||
}
|
||||
|
||||
i1, err := os.Lstat(w.dir1)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
i2, err := os.Lstat(w.dir2)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := w.walk("/", i1, i2); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return w.root1, w.root2, nil
|
||||
}
|
||||
|
||||
// Given a FileInfo, its path info, and a reference to the root of the tree
|
||||
// being constructed, register this file with the tree.
|
||||
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||
if fi == nil {
|
||||
return nil
|
||||
}
|
||||
parent := root.LookUp(filepath.Dir(path))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
|
||||
}
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(path),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
cpath := filepath.Join(dir, path)
|
||||
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = stat
|
||||
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
||||
parent.children[info.name] = info
|
||||
return nil
|
||||
}
|
||||
|
||||
// Walk a subtree rooted at the same path in both trees being iterated. For
|
||||
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
|
||||
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
|
||||
// Register these nodes with the return trees, unless we're still at the
|
||||
// (already-created) roots:
|
||||
if path != "/" {
|
||||
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
is1Dir := i1 != nil && i1.IsDir()
|
||||
is2Dir := i2 != nil && i2.IsDir()
|
||||
|
||||
sameDevice := false
|
||||
if i1 != nil && i2 != nil {
|
||||
si1 := i1.Sys().(*syscall.Stat_t)
|
||||
si2 := i2.Sys().(*syscall.Stat_t)
|
||||
if si1.Dev == si2.Dev {
|
||||
sameDevice = true
|
||||
}
|
||||
}
|
||||
|
||||
// If these files are both non-existent, or leaves (non-dirs), we are done.
|
||||
if !is1Dir && !is2Dir {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch the names of all the files contained in both directories being walked:
|
||||
var names1, names2 []nameIno
|
||||
if is1Dir {
|
||||
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if is2Dir {
|
||||
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We have lists of the files contained in both parallel directories, sorted
|
||||
// in the same order. Walk them in parallel, generating a unique merged list
|
||||
// of all items present in either or both directories.
|
||||
var names []string
|
||||
ix1 := 0
|
||||
ix2 := 0
|
||||
|
||||
for {
|
||||
if ix1 >= len(names1) {
|
||||
break
|
||||
}
|
||||
if ix2 >= len(names2) {
|
||||
break
|
||||
}
|
||||
|
||||
ni1 := names1[ix1]
|
||||
ni2 := names2[ix2]
|
||||
|
||||
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
|
||||
case -1: // ni1 < ni2 -- advance ni1
|
||||
// we will not encounter ni1 in names2
|
||||
names = append(names, ni1.name)
|
||||
ix1++
|
||||
case 0: // ni1 == ni2
|
||||
if ni1.ino != ni2.ino || !sameDevice {
|
||||
names = append(names, ni1.name)
|
||||
}
|
||||
ix1++
|
||||
ix2++
|
||||
case 1: // ni1 > ni2 -- advance ni2
|
||||
// we will not encounter ni2 in names1
|
||||
names = append(names, ni2.name)
|
||||
ix2++
|
||||
}
|
||||
}
|
||||
for ix1 < len(names1) {
|
||||
names = append(names, names1[ix1].name)
|
||||
ix1++
|
||||
}
|
||||
for ix2 < len(names2) {
|
||||
names = append(names, names2[ix2].name)
|
||||
ix2++
|
||||
}
|
||||
|
||||
// For each of the names present in either or both of the directories being
|
||||
// iterated, stat the name under each root, and recurse the pair of them:
|
||||
for _, name := range names {
|
||||
fname := filepath.Join(path, name)
|
||||
var cInfo1, cInfo2 os.FileInfo
|
||||
if is1Dir {
|
||||
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if is2Dir {
|
||||
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// {name,inode} pairs used to support the early-pruning logic of the walker type
|
||||
type nameIno struct {
|
||||
name string
|
||||
ino uint64
|
||||
}
|
||||
|
||||
type nameInoSlice []nameIno
|
||||
|
||||
func (s nameInoSlice) Len() int { return len(s) }
|
||||
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
|
||||
|
||||
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
|
||||
// numbers further up the stack when reading directory contents. Unlike
|
||||
// os.Readdirnames, which returns a list of filenames, this function returns a
|
||||
// list of {filename,inode} pairs.
|
||||
func readdirnames(dirname string) (names []nameIno, err error) {
|
||||
var (
|
||||
size = 100
|
||||
buf = make([]byte, 4096)
|
||||
nbuf int
|
||||
bufp int
|
||||
nb int
|
||||
)
|
||||
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
names = make([]nameIno, 0, size) // Empty with room to grow.
|
||||
for {
|
||||
// Refill the buffer if necessary
|
||||
if bufp >= nbuf {
|
||||
bufp = 0
|
||||
nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
||||
if nbuf < 0 {
|
||||
nbuf = 0
|
||||
}
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("readdirent", err)
|
||||
}
|
||||
if nbuf <= 0 {
|
||||
break // EOF
|
||||
}
|
||||
}
|
||||
|
||||
// Drain the buffer
|
||||
nb, names = parseDirent(buf[bufp:nbuf], names)
|
||||
bufp += nb
|
||||
}
|
||||
|
||||
sl := nameInoSlice(names)
|
||||
sort.Sort(sl)
|
||||
return sl, nil
|
||||
}
|
||||
|
||||
// parseDirent is a minor modification of unix.ParseDirent (linux version)
|
||||
// which returns {name,inode} pairs instead of just names.
|
||||
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
||||
origlen := len(buf)
|
||||
for len(buf) > 0 {
|
||||
dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
|
||||
buf = buf[dirent.Reclen:]
|
||||
if dirent.Ino == 0 { // File absent in directory.
|
||||
continue
|
||||
}
|
||||
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||
var name = string(bytes[0:clen(bytes[:])])
|
||||
if name == "." || name == ".." { // Useless names
|
||||
continue
|
||||
}
|
||||
names = append(names, nameIno{name, dirent.Ino})
|
||||
}
|
||||
return origlen - len(buf), names
|
||||
}
|
||||
|
||||
func clen(n []byte) int {
|
||||
for i := 0; i < len(n); i++ {
|
||||
if n[i] == 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(n)
|
||||
}
|
||||
|
||||
// OverlayChanges walks the path rw and determines changes for the files in the path,
|
||||
// with respect to the parent layers
|
||||
func OverlayChanges(layers []string, rw string) ([]Change, error) {
|
||||
return changes(layers, rw, overlayDeletedFile, nil)
|
||||
}
|
||||
|
||||
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||
if fi.Mode()&os.ModeCharDevice != 0 {
|
||||
s := fi.Sys().(*syscall.Stat_t)
|
||||
if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
if fi.Mode()&os.ModeDir != 0 {
|
||||
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
// +build !linux
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
err1, err2 error
|
||||
errs = make(chan error, 2)
|
||||
)
|
||||
go func() {
|
||||
oldRoot, err1 = collectFileInfo(oldDir)
|
||||
errs <- err1
|
||||
}()
|
||||
go func() {
|
||||
newRoot, err2 = collectFileInfo(newDir)
|
||||
errs <- err2
|
||||
}()
|
||||
|
||||
// block until both routines have returned
|
||||
for i := 0; i < 2; i++ {
|
||||
if err := <-errs; err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return oldRoot, newRoot, nil
|
||||
}
|
||||
|
||||
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||
root := newRootFileInfo()
|
||||
|
||||
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
relPath, err := filepath.Rel(sourceDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
relPath = filepath.Join(string(os.PathSeparator), relPath)
|
||||
|
||||
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
|
||||
// Temporary workaround. If the returned path starts with two backslashes,
|
||||
// trim it down to a single backslash. Only relevant on Windows.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.HasPrefix(relPath, `\\`) {
|
||||
relPath = relPath[1:]
|
||||
}
|
||||
}
|
||||
|
||||
if relPath == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent := root.LookUp(filepath.Dir(relPath))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||
}
|
||||
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(relPath),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
|
||||
s, err := system.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = s
|
||||
|
||||
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||
|
||||
parent.children[info.name] = info
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return root, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
// +build !windows
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.UID() != newStat.UID() ||
|
||||
oldStat.GID() != newStat.GID() ||
|
||||
oldStat.Rdev() != newStat.Rdev() ||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
|
||||
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
|
||||
}
|
||||
|
||||
func getIno(fi os.FileInfo) uint64 {
|
||||
return fi.Sys().(*syscall.Stat_t).Ino
|
||||
}
|
||||
|
||||
func hasHardlinks(fi os.FileInfo) bool {
|
||||
return fi.Sys().(*syscall.Stat_t).Nlink > 1
|
||||
}
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.Mtim() != newStat.Mtim() ||
|
||||
oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode().IsDir()
|
||||
}
|
||||
|
||||
func getIno(fi os.FileInfo) (inode uint64) {
|
||||
return
|
||||
}
|
||||
|
||||
func hasHardlinks(fi os.FileInfo) bool {
|
||||
return false
|
||||
}
|
||||
|
|
@ -0,0 +1,472 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Errors used or returned by this file.
|
||||
var (
|
||||
ErrNotDirectory = errors.New("not a directory")
|
||||
ErrDirNotExists = errors.New("no such directory")
|
||||
ErrCannotCopyDir = errors.New("cannot copy directory")
|
||||
ErrInvalidCopySource = errors.New("invalid copy source content")
|
||||
)
|
||||
|
||||
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
|
||||
// processing using any utility functions from the path or filepath stdlib
|
||||
// packages) and appends a trailing `/.` or `/` if its corresponding original
|
||||
// path (from before being processed by utility functions from the path or
|
||||
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
|
||||
// path already ends in a `.` path segment, then another is not added. If the
|
||||
// clean path already ends in the separator, then another is not added.
|
||||
func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
|
||||
// Ensure paths are in platform semantics
|
||||
cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
|
||||
originalPath = strings.Replace(originalPath, "/", string(sep), -1)
|
||||
|
||||
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
|
||||
if !hasTrailingPathSeparator(cleanedPath, sep) {
|
||||
// Add a separator if it doesn't already end with one (a cleaned
|
||||
// path would only end in a separator if it is the root).
|
||||
cleanedPath += string(sep)
|
||||
}
|
||||
cleanedPath += "."
|
||||
}
|
||||
|
||||
if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
|
||||
cleanedPath += string(sep)
|
||||
}
|
||||
|
||||
return cleanedPath
|
||||
}
|
||||
|
||||
// assertsDirectory returns whether the given path is
|
||||
// asserted to be a directory, i.e., the path ends with
|
||||
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
||||
func assertsDirectory(path string, sep byte) bool {
|
||||
return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
|
||||
}
|
||||
|
||||
// hasTrailingPathSeparator returns whether the given
|
||||
// path ends with the system's path separator character.
|
||||
func hasTrailingPathSeparator(path string, sep byte) bool {
|
||||
return len(path) > 0 && path[len(path)-1] == sep
|
||||
}
|
||||
|
||||
// specifiesCurrentDir returns whether the given path specifies
|
||||
// a "current directory", i.e., the last path segment is `.`.
|
||||
func specifiesCurrentDir(path string) bool {
|
||||
return filepath.Base(path) == "."
|
||||
}
|
||||
|
||||
// SplitPathDirEntry splits the given path between its directory name and its
|
||||
// basename by first cleaning the path but preserves a trailing "." if the
|
||||
// original path specified the current directory.
|
||||
func SplitPathDirEntry(path string) (dir, base string) {
|
||||
cleanedPath := filepath.Clean(filepath.FromSlash(path))
|
||||
|
||||
if specifiesCurrentDir(path) {
|
||||
cleanedPath += string(os.PathSeparator) + "."
|
||||
}
|
||||
|
||||
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
||||
}
|
||||
|
||||
// TarResource archives the resource described by the given CopyInfo to a Tar
|
||||
// archive. A non-nil error is returned if sourcePath does not exist or is
|
||||
// asserted to be a directory but exists as another type of file.
|
||||
//
|
||||
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||
// requires a directory as the source path. TarResource accepts either a
|
||||
// directory or a file path and correctly sets the Tar options.
|
||||
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
|
||||
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
||||
}
|
||||
|
||||
// TarResourceRebase is like TarResource but renames the first path element of
|
||||
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
|
||||
sourcePath = normalizePath(sourcePath)
|
||||
if _, err = os.Lstat(sourcePath); err != nil {
|
||||
// Catches the case where the source does not exist or is not a
|
||||
// directory if asserted to be a directory, as this also causes an
|
||||
// error.
|
||||
return
|
||||
}
|
||||
|
||||
// Separate the source path between its directory and
|
||||
// the entry in that directory which we are archiving.
|
||||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||
opts := TarResourceRebaseOpts(sourceBase, rebaseName)
|
||||
|
||||
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
||||
return TarWithOptions(sourceDir, opts)
|
||||
}
|
||||
|
||||
// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
|
||||
// parameters to be sent to TarWithOptions (the TarOptions struct)
|
||||
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
|
||||
filter := []string{sourceBase}
|
||||
return &TarOptions{
|
||||
Compression: Uncompressed,
|
||||
IncludeFiles: filter,
|
||||
IncludeSourceDir: true,
|
||||
RebaseNames: map[string]string{
|
||||
sourceBase: rebaseName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CopyInfo holds basic info about the source
|
||||
// or destination path of a copy operation.
|
||||
type CopyInfo struct {
|
||||
Path string
|
||||
Exists bool
|
||||
IsDir bool
|
||||
RebaseName string
|
||||
}
|
||||
|
||||
// CopyInfoSourcePath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the source of an archive copy
|
||||
// operation. The given path should be an absolute local path. A source path
|
||||
// has all symlinks evaluated that appear before the last path separator ("/"
|
||||
// on Unix). As it is to be a copy source, the path must exist.
|
||||
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
|
||||
// normalize the file path and then evaluate the symbol link
|
||||
// we will use the target file instead of the symbol link if
|
||||
// followLink is set
|
||||
path = normalizePath(path)
|
||||
|
||||
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
return CopyInfo{
|
||||
Path: resolvedPath,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
RebaseName: rebaseName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CopyInfoDestinationPath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the destination of an archive copy
|
||||
// operation. The given path should be an absolute local path.
|
||||
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
||||
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
|
||||
path = normalizePath(path)
|
||||
originalPath := path
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
|
||||
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
|
||||
// The path exists and is not a symlink.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// While the path is a symlink.
|
||||
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
|
||||
if n > maxSymlinkIter {
|
||||
// Don't follow symlinks more than this arbitrary number of times.
|
||||
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
|
||||
}
|
||||
|
||||
// The path is a symbolic link. We need to evaluate it so that the
|
||||
// destination of the copy operation is the link target and not the
|
||||
// link itself. This is notably different than CopyInfoSourcePath which
|
||||
// only evaluates symlinks before the last appearing path separator.
|
||||
// Also note that it is okay if the last path element is a broken
|
||||
// symlink as the copy operation should create the target.
|
||||
var linkTarget string
|
||||
|
||||
linkTarget, err = os.Readlink(path)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
if !system.IsAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||
}
|
||||
|
||||
path = linkTarget
|
||||
stat, err = os.Lstat(path)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// It's okay if the destination path doesn't exist. We can still
|
||||
// continue the copy operation if the parent directory exists.
|
||||
if !os.IsNotExist(err) {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
// Ensure destination parent dir exists.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
|
||||
parentDirStat, err := os.Stat(dstParent)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
if !parentDirStat.IsDir() {
|
||||
return CopyInfo{}, ErrNotDirectory
|
||||
}
|
||||
|
||||
return CopyInfo{Path: path}, nil
|
||||
}
|
||||
|
||||
// The path exists after resolving symlinks.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
||||
// contain the archived resource described by srcInfo, to the destination
|
||||
// described by dstInfo. Returns the possibly modified content archive along
|
||||
// with the path to the destination directory which it should be extracted to.
|
||||
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
|
||||
// Ensure in platform semantics
|
||||
srcInfo.Path = normalizePath(srcInfo.Path)
|
||||
dstInfo.Path = normalizePath(dstInfo.Path)
|
||||
|
||||
// Separate the destination path between its directory and base
|
||||
// components in case the source archive contents need to be rebased.
|
||||
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
|
||||
_, srcBase := SplitPathDirEntry(srcInfo.Path)
|
||||
|
||||
switch {
|
||||
case dstInfo.Exists && dstInfo.IsDir:
|
||||
// The destination exists as a directory. No alteration
|
||||
// to srcContent is needed as its contents can be
|
||||
// simply extracted to the destination directory.
|
||||
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
||||
case dstInfo.Exists && srcInfo.IsDir:
|
||||
// The destination exists as some type of file and the source
|
||||
// content is a directory. This is an error condition since
|
||||
// you cannot copy a directory to an existing file location.
|
||||
return "", nil, ErrCannotCopyDir
|
||||
case dstInfo.Exists:
|
||||
// The destination exists as some type of file and the source content
|
||||
// is also a file. The source content entry will have to be renamed to
|
||||
// have a basename which matches the destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case srcInfo.IsDir:
|
||||
// The destination does not exist and the source content is an archive
|
||||
// of a directory. The archive should be extracted to the parent of
|
||||
// the destination path instead, and when it is, the directory that is
|
||||
// created as a result should take the name of the destination path.
|
||||
// The source content entries will have to be renamed to have a
|
||||
// basename which matches the destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case assertsDirectory(dstInfo.Path, os.PathSeparator):
|
||||
// The destination does not exist and is asserted to be created as a
|
||||
// directory, but the source content is not a directory. This is an
|
||||
// error condition since you cannot create a directory from a file
|
||||
// source.
|
||||
return "", nil, ErrDirNotExists
|
||||
default:
|
||||
// The last remaining case is when the destination does not exist, is
|
||||
// not asserted to be a directory, and the source content is not an
|
||||
// archive of a directory. It this case, the destination file will need
|
||||
// to be created when the archive is extracted and the source content
|
||||
// entry will have to be renamed to have a basename which matches the
|
||||
// destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||
// an occurrence of oldBase with newBase at the beginning of entry names.
|
||||
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
|
||||
if oldBase == string(os.PathSeparator) {
|
||||
// If oldBase specifies the root directory, use an empty string as
|
||||
// oldBase instead so that newBase doesn't replace the path separator
|
||||
// that all paths will start with.
|
||||
oldBase = ""
|
||||
}
|
||||
|
||||
rebased, w := io.Pipe()
|
||||
|
||||
go func() {
|
||||
srcTar := tar.NewReader(srcContent)
|
||||
rebasedTar := tar.NewWriter(w)
|
||||
|
||||
for {
|
||||
hdr, err := srcTar.Next()
|
||||
if err == io.EOF {
|
||||
// Signals end of archive.
|
||||
rebasedTar.Close()
|
||||
w.Close()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
|
||||
}
|
||||
|
||||
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return rebased
|
||||
}
|
||||
|
||||
// TODO @gupta-ak. These might have to be changed in the future to be
|
||||
// continuity driver aware as well to support LCOW.
|
||||
|
||||
// CopyResource performs an archive copy from the given source path to the
|
||||
// given destination path. The source path MUST exist and the destination
|
||||
// path's parent directory must exist.
|
||||
func CopyResource(srcPath, dstPath string, followLink bool) error {
|
||||
var (
|
||||
srcInfo CopyInfo
|
||||
err error
|
||||
)
|
||||
|
||||
// Ensure in platform semantics
|
||||
srcPath = normalizePath(srcPath)
|
||||
dstPath = normalizePath(dstPath)
|
||||
|
||||
// Clean the source and destination paths.
|
||||
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
|
||||
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
|
||||
|
||||
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
content, err := TarResource(srcInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
return CopyTo(content, srcInfo, dstPath)
|
||||
}
|
||||
|
||||
// CopyTo handles extracting the given content whose
|
||||
// entries should be sourced from srcInfo to dstPath.
|
||||
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
|
||||
// The destination path need not exist, but CopyInfoDestinationPath will
|
||||
// ensure that at least the parent directory exists.
|
||||
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer copyArchive.Close()
|
||||
|
||||
options := &TarOptions{
|
||||
NoLchown: true,
|
||||
NoOverwriteDirNonDir: true,
|
||||
}
|
||||
|
||||
return Untar(copyArchive, dstDir, options)
|
||||
}
|
||||
|
||||
// ResolveHostSourcePath decides real path need to be copied with parameters such as
|
||||
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
|
||||
// link target of any symbol link file, else it will only resolve symlink of directory
|
||||
// but return symbol link file itself without resolving.
|
||||
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
|
||||
if followLink {
|
||||
resolvedPath, err = filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
|
||||
} else {
|
||||
dirPath, basePath := filepath.Split(path)
|
||||
|
||||
// if not follow symbol link, then resolve symbol link of parent dir
|
||||
var resolvedDirPath string
|
||||
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||
if hasTrailingPathSeparator(path, os.PathSeparator) &&
|
||||
filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
}
|
||||
return resolvedPath, rebaseName, nil
|
||||
}
|
||||
|
||||
// GetRebaseName normalizes and compares path and resolvedPath,
|
||||
// return completed resolved path and rebased file name
|
||||
func GetRebaseName(path, resolvedPath string) (string, string) {
|
||||
// linkTarget will have been cleaned (no trailing path separators and dot) so
|
||||
// we can manually join it with them
|
||||
var rebaseName string
|
||||
if specifiesCurrentDir(path) &&
|
||||
!specifiesCurrentDir(resolvedPath) {
|
||||
resolvedPath += string(filepath.Separator) + "."
|
||||
}
|
||||
|
||||
if hasTrailingPathSeparator(path, os.PathSeparator) &&
|
||||
!hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
|
||||
resolvedPath += string(filepath.Separator)
|
||||
}
|
||||
|
||||
if filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
// In the case where the path had a trailing separator and a symlink
|
||||
// evaluation has changed the last path component, we will need to
|
||||
// rebase the name in the archive that is being copied to match the
|
||||
// originally requested name.
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
return resolvedPath, rebaseName
|
||||
}
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
// +build !windows
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.ToSlash(path)
|
||||
}
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.FromSlash(path)
|
||||
}
|
||||
|
|
@ -0,0 +1,256 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
|
||||
tr := tar.NewReader(layer)
|
||||
trBuf := pools.BufioReader32KPool.Get(tr)
|
||||
defer pools.BufioReader32KPool.Put(trBuf)
|
||||
|
||||
var dirs []*tar.Header
|
||||
unpackedPaths := make(map[string]struct{})
|
||||
|
||||
if options == nil {
|
||||
options = &TarOptions{}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
|
||||
aufsTempdir := ""
|
||||
aufsHardlinks := make(map[string]*tar.Header)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
size += hdr.Size
|
||||
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
// Windows does not support filenames with colons in them. Ignore
|
||||
// these files. This is not a problem though (although it might
|
||||
// appear that it is). Let's suppose a client is running docker pull.
|
||||
// The daemon it points to is Windows. Would it make sense for the
|
||||
// client to be doing a docker pull Ubuntu for example (which has files
|
||||
// with colons in the name under /usr/share/man/man3)? No, absolutely
|
||||
// not as it would really only make sense that they were pulling a
|
||||
// Windows image. However, for development, it is necessary to be able
|
||||
// to pull Linux images which are in the repository.
|
||||
//
|
||||
// TODO Windows. Once the registry is aware of what images are Windows-
|
||||
// specific or Linux-specific, this warning should be changed to an error
|
||||
// to cater for the situation where someone does manage to upload a Linux
|
||||
// image but have it tagged as Windows inadvertently.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.Contains(hdr.Name, ":") {
|
||||
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
// Not the root directory, ensure that the parent directory exists.
|
||||
// This happened in some tests where an image had a tarfile without any
|
||||
// parent directories.
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = system.MkdirAll(parentPath, 0600, "")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip AUFS metadata dirs
|
||||
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
|
||||
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
||||
// We don't want this directory, but we need the files in them so that
|
||||
// such hardlinks can be resolved.
|
||||
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
|
||||
basename := filepath.Base(hdr.Name)
|
||||
aufsHardlinks[basename] = hdr
|
||||
if aufsTempdir == "" {
|
||||
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer os.RemoveAll(aufsTempdir)
|
||||
}
|
||||
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Name != WhiteoutOpaqueDir {
|
||||
continue
|
||||
}
|
||||
}
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
rel, err := filepath.Rel(dest, path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||
}
|
||||
base := filepath.Base(path)
|
||||
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
dir := filepath.Dir(path)
|
||||
if base == WhiteoutOpaqueDir {
|
||||
_, err := os.Lstat(dir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = nil // parent was deleted
|
||||
}
|
||||
return err
|
||||
}
|
||||
if path == dir {
|
||||
return nil
|
||||
}
|
||||
if _, exists := unpackedPaths[path]; !exists {
|
||||
err := os.RemoveAll(path)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
if err := os.RemoveAll(originalPath); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If path exits we almost always just want to remove and replace it.
|
||||
// The only exception is when it is a directory *and* the file from
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trBuf.Reset(tr)
|
||||
srcData := io.Reader(trBuf)
|
||||
srcHdr := hdr
|
||||
|
||||
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
||||
// we manually retarget these into the temporary files we extracted them into
|
||||
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
|
||||
linkBasename := filepath.Base(hdr.Linkname)
|
||||
srcHdr = aufsHardlinks[linkBasename]
|
||||
if srcHdr == nil {
|
||||
return 0, fmt.Errorf("Invalid aufs hardlink")
|
||||
}
|
||||
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
srcData = tmpFile
|
||||
}
|
||||
|
||||
if err := remapIDs(idMappings, srcHdr); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Directory mtimes must be handled at the end to avoid further
|
||||
// file creation in them to modify the directory mtime
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
dirs = append(dirs, hdr)
|
||||
}
|
||||
unpackedPaths[path] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, hdr := range dirs {
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||
// and applies it to the directory `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
||||
}
|
||||
|
||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||
// can only be uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, options, false)
|
||||
}
|
||||
|
||||
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
// We need to be able to set any perms
|
||||
oldmask, err := system.Umask(0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
|
||||
|
||||
if decompress {
|
||||
layer, err = DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return UnpackLayer(dest, layer, options)
|
||||
}
|
||||
|
|
@ -0,0 +1,97 @@
|
|||
// +build ignore
|
||||
|
||||
// Simple tool to create an archive stream from an old and new directory
|
||||
//
|
||||
// By default it will stream the comparison of two temporary directories with junk files
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
flDebug = flag.Bool("D", false, "debugging output")
|
||||
flNewDir = flag.String("newdir", "", "")
|
||||
flOldDir = flag.String("olddir", "", "")
|
||||
log = logrus.New()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
log.Out = os.Stderr
|
||||
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
var newDir, oldDir string
|
||||
|
||||
if len(*flNewDir) == 0 {
|
||||
var err error
|
||||
newDir, err = ioutil.TempDir("", "docker-test-newDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(newDir)
|
||||
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
newDir = *flNewDir
|
||||
}
|
||||
|
||||
if len(*flOldDir) == 0 {
|
||||
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(oldDir)
|
||||
} else {
|
||||
oldDir = *flOldDir
|
||||
}
|
||||
|
||||
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
a, err := archive.ExportChanges(newDir, changes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer a.Close()
|
||||
|
||||
i, err := io.Copy(os.Stdout, a)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||
}
|
||||
|
||||
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||
fileData := []byte("fooo")
|
||||
for n := 0; n < numberOfFiles; n++ {
|
||||
fileName := fmt.Sprintf("file-%d", n)
|
||||
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if makeLinks {
|
||||
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
totalSize := numberOfFiles * len(fileData)
|
||||
return totalSize, nil
|
||||
}
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
if time.IsZero() {
|
||||
// Return UTIME_OMIT special value
|
||||
ts.Sec = 0
|
||||
ts.Nsec = ((1 << 30) - 2)
|
||||
return
|
||||
}
|
||||
return syscall.NsecToTimespec(time.UnixNano())
|
||||
}
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
// +build !linux
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
nsec := int64(0)
|
||||
if !time.IsZero() {
|
||||
nsec = time.UnixNano()
|
||||
}
|
||||
return syscall.NsecToTimespec(nsec)
|
||||
}
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
// Whiteouts are files with a special meaning for the layered filesystem.
|
||||
// Docker uses AUFS whiteout files inside exported archives. In other
|
||||
// filesystems these files are generated/handled on tar creation/extraction.
|
||||
|
||||
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||
// filename this means that file has been removed from the base layer.
|
||||
const WhiteoutPrefix = ".wh."
|
||||
|
||||
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||
// for removing an actual file. Normally these files are excluded from exported
|
||||
// archives.
|
||||
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
|
||||
|
||||
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
|
||||
// layers. Normally these should not go into exported archives and all changed
|
||||
// hardlinks should be copied to the top layer.
|
||||
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
|
||||
|
||||
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
|
||||
// readdir calls to this directory do not follow to lower layers.
|
||||
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Generate generates a new archive from the content provided
|
||||
// as input.
|
||||
//
|
||||
// `files` is a sequence of path/content pairs. A new file is
|
||||
// added to the archive for each pair.
|
||||
// If the last pair is incomplete, the file is created with an
|
||||
// empty content. For example:
|
||||
//
|
||||
// Generate("foo.txt", "hello world", "emptyfile")
|
||||
//
|
||||
// The above call will return an archive with 2 files:
|
||||
// * ./foo.txt with content "hello world"
|
||||
// * ./empty with empty content
|
||||
//
|
||||
// FIXME: stream content instead of buffering
|
||||
// FIXME: specify permissions and other archive metadata
|
||||
func Generate(input ...string) (io.Reader, error) {
|
||||
files := parseStringPairs(input...)
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
for _, file := range files {
|
||||
name, content := file[0], file[1]
|
||||
hdr := &tar.Header{
|
||||
Name: name,
|
||||
Size: int64(len(content)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := tw.Write([]byte(content)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func parseStringPairs(input ...string) (output [][2]string) {
|
||||
output = make([][2]string, 0, len(input)/2+1)
|
||||
for i := 0; i < len(input); i += 2 {
|
||||
var pair [2]string
|
||||
pair[0] = input[i]
|
||||
if i+1 < len(input) {
|
||||
pair[1] = input[i+1]
|
||||
}
|
||||
output = append(output, pair)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -0,0 +1,298 @@
|
|||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/scanner"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// PatternMatcher allows checking paths agaist a list of patterns
|
||||
type PatternMatcher struct {
|
||||
patterns []*Pattern
|
||||
exclusions bool
|
||||
}
|
||||
|
||||
// NewPatternMatcher creates a new matcher object for specific patterns that can
|
||||
// be used later to match against patterns against paths
|
||||
func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
|
||||
pm := &PatternMatcher{
|
||||
patterns: make([]*Pattern, 0, len(patterns)),
|
||||
}
|
||||
for _, p := range patterns {
|
||||
// Eliminate leading and trailing whitespace.
|
||||
p = strings.TrimSpace(p)
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
p = filepath.Clean(p)
|
||||
newp := &Pattern{}
|
||||
if p[0] == '!' {
|
||||
if len(p) == 1 {
|
||||
return nil, errors.New("illegal exclusion pattern: \"!\"")
|
||||
}
|
||||
newp.exclusion = true
|
||||
p = p[1:]
|
||||
pm.exclusions = true
|
||||
}
|
||||
// Do some syntax checking on the pattern.
|
||||
// filepath's Match() has some really weird rules that are inconsistent
|
||||
// so instead of trying to dup their logic, just call Match() for its
|
||||
// error state and if there is an error in the pattern return it.
|
||||
// If this becomes an issue we can remove this since its really only
|
||||
// needed in the error (syntax) case - which isn't really critical.
|
||||
if _, err := filepath.Match(p, "."); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newp.cleanedPattern = p
|
||||
newp.dirs = strings.Split(p, string(os.PathSeparator))
|
||||
pm.patterns = append(pm.patterns, newp)
|
||||
}
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
// Matches matches path against all the patterns. Matches is not safe to be
|
||||
// called concurrently
|
||||
func (pm *PatternMatcher) Matches(file string) (bool, error) {
|
||||
matched := false
|
||||
file = filepath.FromSlash(file)
|
||||
parentPath := filepath.Dir(file)
|
||||
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||
|
||||
for _, pattern := range pm.patterns {
|
||||
negative := false
|
||||
|
||||
if pattern.exclusion {
|
||||
negative = true
|
||||
}
|
||||
|
||||
match, err := pattern.match(file)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !match && parentPath != "." {
|
||||
// Check to see if the pattern matches one of our parent dirs.
|
||||
if len(pattern.dirs) <= len(parentPathDirs) {
|
||||
match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator)))
|
||||
}
|
||||
}
|
||||
|
||||
if match {
|
||||
matched = !negative
|
||||
}
|
||||
}
|
||||
|
||||
if matched {
|
||||
logrus.Debugf("Skipping excluded path: %s", file)
|
||||
}
|
||||
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
// Exclusions returns true if any of the patterns define exclusions
|
||||
func (pm *PatternMatcher) Exclusions() bool {
|
||||
return pm.exclusions
|
||||
}
|
||||
|
||||
// Patterns returns array of active patterns
|
||||
func (pm *PatternMatcher) Patterns() []*Pattern {
|
||||
return pm.patterns
|
||||
}
|
||||
|
||||
// Pattern defines a single regexp used used to filter file paths.
|
||||
type Pattern struct {
|
||||
cleanedPattern string
|
||||
dirs []string
|
||||
regexp *regexp.Regexp
|
||||
exclusion bool
|
||||
}
|
||||
|
||||
func (p *Pattern) String() string {
|
||||
return p.cleanedPattern
|
||||
}
|
||||
|
||||
// Exclusion returns true if this pattern defines exclusion
|
||||
func (p *Pattern) Exclusion() bool {
|
||||
return p.exclusion
|
||||
}
|
||||
|
||||
func (p *Pattern) match(path string) (bool, error) {
|
||||
|
||||
if p.regexp == nil {
|
||||
if err := p.compile(); err != nil {
|
||||
return false, filepath.ErrBadPattern
|
||||
}
|
||||
}
|
||||
|
||||
b := p.regexp.MatchString(path)
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (p *Pattern) compile() error {
|
||||
regStr := "^"
|
||||
pattern := p.cleanedPattern
|
||||
// Go through the pattern and convert it to a regexp.
|
||||
// We use a scanner so we can support utf-8 chars.
|
||||
var scan scanner.Scanner
|
||||
scan.Init(strings.NewReader(pattern))
|
||||
|
||||
sl := string(os.PathSeparator)
|
||||
escSL := sl
|
||||
if sl == `\` {
|
||||
escSL += `\`
|
||||
}
|
||||
|
||||
for scan.Peek() != scanner.EOF {
|
||||
ch := scan.Next()
|
||||
|
||||
if ch == '*' {
|
||||
if scan.Peek() == '*' {
|
||||
// is some flavor of "**"
|
||||
scan.Next()
|
||||
|
||||
// Treat **/ as ** so eat the "/"
|
||||
if string(scan.Peek()) == sl {
|
||||
scan.Next()
|
||||
}
|
||||
|
||||
if scan.Peek() == scanner.EOF {
|
||||
// is "**EOF" - to align with .gitignore just accept all
|
||||
regStr += ".*"
|
||||
} else {
|
||||
// is "**"
|
||||
// Note that this allows for any # of /'s (even 0) because
|
||||
// the .* will eat everything, even /'s
|
||||
regStr += "(.*" + escSL + ")?"
|
||||
}
|
||||
} else {
|
||||
// is "*" so map it to anything but "/"
|
||||
regStr += "[^" + escSL + "]*"
|
||||
}
|
||||
} else if ch == '?' {
|
||||
// "?" is any char except "/"
|
||||
regStr += "[^" + escSL + "]"
|
||||
} else if ch == '.' || ch == '$' {
|
||||
// Escape some regexp special chars that have no meaning
|
||||
// in golang's filepath.Match
|
||||
regStr += `\` + string(ch)
|
||||
} else if ch == '\\' {
|
||||
// escape next char. Note that a trailing \ in the pattern
|
||||
// will be left alone (but need to escape it)
|
||||
if sl == `\` {
|
||||
// On windows map "\" to "\\", meaning an escaped backslash,
|
||||
// and then just continue because filepath.Match on
|
||||
// Windows doesn't allow escaping at all
|
||||
regStr += escSL
|
||||
continue
|
||||
}
|
||||
if scan.Peek() != scanner.EOF {
|
||||
regStr += `\` + string(scan.Next())
|
||||
} else {
|
||||
regStr += `\`
|
||||
}
|
||||
} else {
|
||||
regStr += string(ch)
|
||||
}
|
||||
}
|
||||
|
||||
regStr += "$"
|
||||
|
||||
re, err := regexp.Compile(regStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.regexp = re
|
||||
return nil
|
||||
}
|
||||
|
||||
// Matches returns true if file matches any of the patterns
|
||||
// and isn't excluded by any of the subsequent patterns.
|
||||
func Matches(file string, patterns []string) (bool, error) {
|
||||
pm, err := NewPatternMatcher(patterns)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
file = filepath.Clean(file)
|
||||
|
||||
if file == "." {
|
||||
// Don't let them exclude everything, kind of silly.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return pm.Matches(file)
|
||||
}
|
||||
|
||||
// CopyFile copies from src to dst until either EOF is reached
|
||||
// on src or an error occurs. It verifies src exists and removes
|
||||
// the dst if it exists.
|
||||
func CopyFile(src, dst string) (int64, error) {
|
||||
cleanSrc := filepath.Clean(src)
|
||||
cleanDst := filepath.Clean(dst)
|
||||
if cleanSrc == cleanDst {
|
||||
return 0, nil
|
||||
}
|
||||
sf, err := os.Open(cleanSrc)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer sf.Close()
|
||||
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
|
||||
return 0, err
|
||||
}
|
||||
df, err := os.Create(cleanDst)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer df.Close()
|
||||
return io.Copy(df, sf)
|
||||
}
|
||||
|
||||
// ReadSymlinkedDirectory returns the target directory of a symlink.
|
||||
// The target of the symbolic link may not be a file.
|
||||
func ReadSymlinkedDirectory(path string) (string, error) {
|
||||
var realPath string
|
||||
var err error
|
||||
if realPath, err = filepath.Abs(path); err != nil {
|
||||
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
|
||||
}
|
||||
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
||||
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
|
||||
}
|
||||
realPathInfo, err := os.Stat(realPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
|
||||
}
|
||||
if !realPathInfo.Mode().IsDir() {
|
||||
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
|
||||
}
|
||||
return realPath, nil
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates a file or a directory only if it does not already exist.
|
||||
func CreateIfNotExists(path string, isDir bool) error {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if isDir {
|
||||
return os.MkdirAll(path, 0755)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetTotalUsedFds returns the number of used File Descriptors by
|
||||
// executing `lsof -p PID`
|
||||
func GetTotalUsedFds() int {
|
||||
pid := os.Getpid()
|
||||
|
||||
cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
outputStr := strings.TrimSpace(string(output))
|
||||
|
||||
fds := strings.Split(outputStr, "\n")
|
||||
|
||||
return len(fds) - 1
|
||||
}
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
// +build linux freebsd
|
||||
|
||||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||
// reading it via /proc filesystem.
|
||||
func GetTotalUsedFds() int {
|
||||
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||
} else {
|
||||
return len(fds)
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
|
||||
// on Windows.
|
||||
func GetTotalUsedFds() int {
|
||||
return -1
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var errBufferFull = errors.New("buffer is full")
|
||||
|
||||
type fixedBuffer struct {
|
||||
buf []byte
|
||||
pos int
|
||||
lastRead int
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Write(p []byte) (int, error) {
|
||||
n := copy(b.buf[b.pos:cap(b.buf)], p)
|
||||
b.pos += n
|
||||
|
||||
if n < len(p) {
|
||||
if b.pos == cap(b.buf) {
|
||||
return n, errBufferFull
|
||||
}
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Read(p []byte) (int, error) {
|
||||
n := copy(p, b.buf[b.lastRead:b.pos])
|
||||
b.lastRead += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Len() int {
|
||||
return b.pos - b.lastRead
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Cap() int {
|
||||
return cap(b.buf)
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) Reset() {
|
||||
b.pos = 0
|
||||
b.lastRead = 0
|
||||
b.buf = b.buf[:0]
|
||||
}
|
||||
|
||||
func (b *fixedBuffer) String() string {
|
||||
return string(b.buf[b.lastRead:b.pos])
|
||||
}
|
||||
|
|
@ -0,0 +1,186 @@
|
|||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// maxCap is the highest capacity to use in byte slices that buffer data.
|
||||
const maxCap = 1e6
|
||||
|
||||
// minCap is the lowest capacity to use in byte slices that buffer data
|
||||
const minCap = 64
|
||||
|
||||
// blockThreshold is the minimum number of bytes in the buffer which will cause
|
||||
// a write to BytesPipe to block when allocating a new slice.
|
||||
const blockThreshold = 1e6
|
||||
|
||||
var (
|
||||
// ErrClosed is returned when Write is called on a closed BytesPipe.
|
||||
ErrClosed = errors.New("write to closed BytesPipe")
|
||||
|
||||
bufPools = make(map[int]*sync.Pool)
|
||||
bufPoolsLock sync.Mutex
|
||||
)
|
||||
|
||||
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
|
||||
// All written data may be read at most once. Also, BytesPipe allocates
|
||||
// and releases new byte slices to adjust to current needs, so the buffer
|
||||
// won't be overgrown after peak loads.
|
||||
type BytesPipe struct {
|
||||
mu sync.Mutex
|
||||
wait *sync.Cond
|
||||
buf []*fixedBuffer
|
||||
bufLen int
|
||||
closeErr error // error to return from next Read. set to nil if not closed.
|
||||
}
|
||||
|
||||
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
|
||||
// If buf is nil, then it will be initialized with slice which cap is 64.
|
||||
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
|
||||
func NewBytesPipe() *BytesPipe {
|
||||
bp := &BytesPipe{}
|
||||
bp.buf = append(bp.buf, getBuffer(minCap))
|
||||
bp.wait = sync.NewCond(&bp.mu)
|
||||
return bp
|
||||
}
|
||||
|
||||
// Write writes p to BytesPipe.
|
||||
// It can allocate new []byte slices in a process of writing.
|
||||
func (bp *BytesPipe) Write(p []byte) (int, error) {
|
||||
bp.mu.Lock()
|
||||
|
||||
written := 0
|
||||
loop0:
|
||||
for {
|
||||
if bp.closeErr != nil {
|
||||
bp.mu.Unlock()
|
||||
return written, ErrClosed
|
||||
}
|
||||
|
||||
if len(bp.buf) == 0 {
|
||||
bp.buf = append(bp.buf, getBuffer(64))
|
||||
}
|
||||
// get the last buffer
|
||||
b := bp.buf[len(bp.buf)-1]
|
||||
|
||||
n, err := b.Write(p)
|
||||
written += n
|
||||
bp.bufLen += n
|
||||
|
||||
// errBufferFull is an error we expect to get if the buffer is full
|
||||
if err != nil && err != errBufferFull {
|
||||
bp.wait.Broadcast()
|
||||
bp.mu.Unlock()
|
||||
return written, err
|
||||
}
|
||||
|
||||
// if there was enough room to write all then break
|
||||
if len(p) == n {
|
||||
break
|
||||
}
|
||||
|
||||
// more data: write to the next slice
|
||||
p = p[n:]
|
||||
|
||||
// make sure the buffer doesn't grow too big from this write
|
||||
for bp.bufLen >= blockThreshold {
|
||||
bp.wait.Wait()
|
||||
if bp.closeErr != nil {
|
||||
continue loop0
|
||||
}
|
||||
}
|
||||
|
||||
// add new byte slice to the buffers slice and continue writing
|
||||
nextCap := b.Cap() * 2
|
||||
if nextCap > maxCap {
|
||||
nextCap = maxCap
|
||||
}
|
||||
bp.buf = append(bp.buf, getBuffer(nextCap))
|
||||
}
|
||||
bp.wait.Broadcast()
|
||||
bp.mu.Unlock()
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// CloseWithError causes further reads from a BytesPipe to return immediately.
|
||||
func (bp *BytesPipe) CloseWithError(err error) error {
|
||||
bp.mu.Lock()
|
||||
if err != nil {
|
||||
bp.closeErr = err
|
||||
} else {
|
||||
bp.closeErr = io.EOF
|
||||
}
|
||||
bp.wait.Broadcast()
|
||||
bp.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close causes further reads from a BytesPipe to return immediately.
|
||||
func (bp *BytesPipe) Close() error {
|
||||
return bp.CloseWithError(nil)
|
||||
}
|
||||
|
||||
// Read reads bytes from BytesPipe.
|
||||
// Data could be read only once.
|
||||
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
|
||||
bp.mu.Lock()
|
||||
if bp.bufLen == 0 {
|
||||
if bp.closeErr != nil {
|
||||
bp.mu.Unlock()
|
||||
return 0, bp.closeErr
|
||||
}
|
||||
bp.wait.Wait()
|
||||
if bp.bufLen == 0 && bp.closeErr != nil {
|
||||
err := bp.closeErr
|
||||
bp.mu.Unlock()
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
for bp.bufLen > 0 {
|
||||
b := bp.buf[0]
|
||||
read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
|
||||
n += read
|
||||
bp.bufLen -= read
|
||||
|
||||
if b.Len() == 0 {
|
||||
// it's empty so return it to the pool and move to the next one
|
||||
returnBuffer(b)
|
||||
bp.buf[0] = nil
|
||||
bp.buf = bp.buf[1:]
|
||||
}
|
||||
|
||||
if len(p) == read {
|
||||
break
|
||||
}
|
||||
|
||||
p = p[read:]
|
||||
}
|
||||
|
||||
bp.wait.Broadcast()
|
||||
bp.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func returnBuffer(b *fixedBuffer) {
|
||||
b.Reset()
|
||||
bufPoolsLock.Lock()
|
||||
pool := bufPools[b.Cap()]
|
||||
bufPoolsLock.Unlock()
|
||||
if pool != nil {
|
||||
pool.Put(b)
|
||||
}
|
||||
}
|
||||
|
||||
func getBuffer(size int) *fixedBuffer {
|
||||
bufPoolsLock.Lock()
|
||||
pool, ok := bufPools[size]
|
||||
if !ok {
|
||||
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
|
||||
bufPools[size] = pool
|
||||
}
|
||||
bufPoolsLock.Unlock()
|
||||
return pool.Get().(*fixedBuffer)
|
||||
}
|
||||
|
|
@ -0,0 +1,162 @@
|
|||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
|
||||
// temporary file and closing it atomically changes the temporary file to
|
||||
// destination path. Writing and closing concurrently is not allowed.
|
||||
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
|
||||
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
abspath, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &atomicFileWriter{
|
||||
f: f,
|
||||
fn: abspath,
|
||||
perm: perm,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// AtomicWriteFile atomically writes data to a file named by filename.
|
||||
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
f, err := NewAtomicFileWriter(filename, perm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err := f.Write(data)
|
||||
if err == nil && n < len(data) {
|
||||
err = io.ErrShortWrite
|
||||
f.(*atomicFileWriter).writeErr = err
|
||||
}
|
||||
if err1 := f.Close(); err == nil {
|
||||
err = err1
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type atomicFileWriter struct {
|
||||
f *os.File
|
||||
fn string
|
||||
writeErr error
|
||||
perm os.FileMode
|
||||
}
|
||||
|
||||
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
|
||||
n, err := w.f.Write(dt)
|
||||
if err != nil {
|
||||
w.writeErr = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *atomicFileWriter) Close() (retErr error) {
|
||||
defer func() {
|
||||
if retErr != nil || w.writeErr != nil {
|
||||
os.Remove(w.f.Name())
|
||||
}
|
||||
}()
|
||||
if err := w.f.Sync(); err != nil {
|
||||
w.f.Close()
|
||||
return err
|
||||
}
|
||||
if err := w.f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Chmod(w.f.Name(), w.perm); err != nil {
|
||||
return err
|
||||
}
|
||||
if w.writeErr == nil {
|
||||
return os.Rename(w.f.Name(), w.fn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AtomicWriteSet is used to atomically write a set
|
||||
// of files and ensure they are visible at the same time.
|
||||
// Must be committed to a new directory.
|
||||
type AtomicWriteSet struct {
|
||||
root string
|
||||
}
|
||||
|
||||
// NewAtomicWriteSet creates a new atomic write set to
|
||||
// atomically create a set of files. The given directory
|
||||
// is used as the base directory for storing files before
|
||||
// commit. If no temporary directory is given the system
|
||||
// default is used.
|
||||
func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) {
|
||||
td, err := ioutil.TempDir(tmpDir, "write-set-")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AtomicWriteSet{
|
||||
root: td,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WriteFile writes a file to the set, guaranteeing the file
|
||||
// has been synced.
|
||||
func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err := f.Write(data)
|
||||
if err == nil && n < len(data) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err1 := f.Close(); err == nil {
|
||||
err = err1
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type syncFileCloser struct {
|
||||
*os.File
|
||||
}
|
||||
|
||||
func (w syncFileCloser) Close() error {
|
||||
err := w.File.Sync()
|
||||
if err1 := w.File.Close(); err == nil {
|
||||
err = err1
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// FileWriter opens a file writer inside the set. The file
|
||||
// should be synced and closed before calling commit.
|
||||
func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) {
|
||||
f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return syncFileCloser{f}, nil
|
||||
}
|
||||
|
||||
// Cancel cancels the set and removes all temporary data
|
||||
// created in the set.
|
||||
func (ws *AtomicWriteSet) Cancel() error {
|
||||
return os.RemoveAll(ws.root)
|
||||
}
|
||||
|
||||
// Commit moves all created files to the target directory. The
|
||||
// target directory must not exist and the parent of the target
|
||||
// directory must exist.
|
||||
func (ws *AtomicWriteSet) Commit(target string) error {
|
||||
return os.Rename(ws.root, target)
|
||||
}
|
||||
|
||||
// String returns the location the set is writing to.
|
||||
func (ws *AtomicWriteSet) String() string {
|
||||
return ws.root
|
||||
}
|
||||
|
|
@ -0,0 +1,158 @@
|
|||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser
|
||||
// It calls the given callback function when closed. It should be constructed
|
||||
// with NewReadCloserWrapper
|
||||
type ReadCloserWrapper struct {
|
||||
io.Reader
|
||||
closer func() error
|
||||
}
|
||||
|
||||
// Close calls back the passed closer function
|
||||
func (r *ReadCloserWrapper) Close() error {
|
||||
return r.closer()
|
||||
}
|
||||
|
||||
// NewReadCloserWrapper returns a new io.ReadCloser.
|
||||
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
|
||||
return &ReadCloserWrapper{
|
||||
Reader: r,
|
||||
closer: closer,
|
||||
}
|
||||
}
|
||||
|
||||
type readerErrWrapper struct {
|
||||
reader io.Reader
|
||||
closer func()
|
||||
}
|
||||
|
||||
func (r *readerErrWrapper) Read(p []byte) (int, error) {
|
||||
n, err := r.reader.Read(p)
|
||||
if err != nil {
|
||||
r.closer()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// NewReaderErrWrapper returns a new io.Reader.
|
||||
func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
|
||||
return &readerErrWrapper{
|
||||
reader: r,
|
||||
closer: closer,
|
||||
}
|
||||
}
|
||||
|
||||
// HashData returns the sha256 sum of src.
|
||||
func HashData(src io.Reader) (string, error) {
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, src); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// OnEOFReader wraps an io.ReadCloser and a function
|
||||
// the function will run at the end of file or close the file.
|
||||
type OnEOFReader struct {
|
||||
Rc io.ReadCloser
|
||||
Fn func()
|
||||
}
|
||||
|
||||
func (r *OnEOFReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.Rc.Read(p)
|
||||
if err == io.EOF {
|
||||
r.runFunc()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the file and run the function.
|
||||
func (r *OnEOFReader) Close() error {
|
||||
err := r.Rc.Close()
|
||||
r.runFunc()
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *OnEOFReader) runFunc() {
|
||||
if fn := r.Fn; fn != nil {
|
||||
fn()
|
||||
r.Fn = nil
|
||||
}
|
||||
}
|
||||
|
||||
// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
|
||||
// operations.
|
||||
type cancelReadCloser struct {
|
||||
cancel func()
|
||||
pR *io.PipeReader // Stream to read from
|
||||
pW *io.PipeWriter
|
||||
}
|
||||
|
||||
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
|
||||
// context is cancelled. The returned io.ReadCloser must be closed when it is
|
||||
// no longer needed.
|
||||
func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
|
||||
pR, pW := io.Pipe()
|
||||
|
||||
// Create a context used to signal when the pipe is closed
|
||||
doneCtx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
p := &cancelReadCloser{
|
||||
cancel: cancel,
|
||||
pR: pR,
|
||||
pW: pW,
|
||||
}
|
||||
|
||||
go func() {
|
||||
_, err := io.Copy(pW, in)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// If the context was closed, p.closeWithError
|
||||
// was already called. Calling it again would
|
||||
// change the error that Read returns.
|
||||
default:
|
||||
p.closeWithError(err)
|
||||
}
|
||||
in.Close()
|
||||
}()
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
p.closeWithError(ctx.Err())
|
||||
case <-doneCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// Read wraps the Read method of the pipe that provides data from the wrapped
|
||||
// ReadCloser.
|
||||
func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
|
||||
return p.pR.Read(buf)
|
||||
}
|
||||
|
||||
// closeWithError closes the wrapper and its underlying reader. It will
|
||||
// cause future calls to Read to return err.
|
||||
func (p *cancelReadCloser) closeWithError(err error) {
|
||||
p.pW.CloseWithError(err)
|
||||
p.cancel()
|
||||
}
|
||||
|
||||
// Close closes the wrapper its underlying reader. It will cause
|
||||
// future calls to Read to return io.EOF.
|
||||
func (p *cancelReadCloser) Close() error {
|
||||
p.closeWithError(io.EOF)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
// +build !windows
|
||||
|
||||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||
|
||||
import "io/ioutil"
|
||||
|
||||
// TempDir on Unix systems is equivalent to ioutil.TempDir.
|
||||
func TempDir(dir, prefix string) (string, error) {
|
||||
return ioutil.TempDir(dir, prefix)
|
||||
}
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
|
||||
func TempDir(dir, prefix string) (string, error) {
|
||||
tempDir, err := ioutil.TempDir(dir, prefix)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return longpath.AddPrefix(tempDir), nil
|
||||
}
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// WriteFlusher wraps the Write and Flush operation ensuring that every write
|
||||
// is a flush. In addition, the Close method can be called to intercept
|
||||
// Read/Write calls if the targets lifecycle has already ended.
|
||||
type WriteFlusher struct {
|
||||
w io.Writer
|
||||
flusher flusher
|
||||
flushed chan struct{}
|
||||
flushedOnce sync.Once
|
||||
closed chan struct{}
|
||||
closeLock sync.Mutex
|
||||
}
|
||||
|
||||
type flusher interface {
|
||||
Flush()
|
||||
}
|
||||
|
||||
var errWriteFlusherClosed = io.EOF
|
||||
|
||||
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
|
||||
select {
|
||||
case <-wf.closed:
|
||||
return 0, errWriteFlusherClosed
|
||||
default:
|
||||
}
|
||||
|
||||
n, err = wf.w.Write(b)
|
||||
wf.Flush() // every write is a flush.
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Flush the stream immediately.
|
||||
func (wf *WriteFlusher) Flush() {
|
||||
select {
|
||||
case <-wf.closed:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
wf.flushedOnce.Do(func() {
|
||||
close(wf.flushed)
|
||||
})
|
||||
wf.flusher.Flush()
|
||||
}
|
||||
|
||||
// Flushed returns the state of flushed.
|
||||
// If it's flushed, return true, or else it return false.
|
||||
func (wf *WriteFlusher) Flushed() bool {
|
||||
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
|
||||
// be used to detect whether or a response code has been issued or not.
|
||||
// Another hook should be used instead.
|
||||
var flushed bool
|
||||
select {
|
||||
case <-wf.flushed:
|
||||
flushed = true
|
||||
default:
|
||||
}
|
||||
return flushed
|
||||
}
|
||||
|
||||
// Close closes the write flusher, disallowing any further writes to the
|
||||
// target. After the flusher is closed, all calls to write or flush will
|
||||
// result in an error.
|
||||
func (wf *WriteFlusher) Close() error {
|
||||
wf.closeLock.Lock()
|
||||
defer wf.closeLock.Unlock()
|
||||
|
||||
select {
|
||||
case <-wf.closed:
|
||||
return errWriteFlusherClosed
|
||||
default:
|
||||
close(wf.closed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewWriteFlusher returns a new WriteFlusher.
|
||||
func NewWriteFlusher(w io.Writer) *WriteFlusher {
|
||||
var fl flusher
|
||||
if f, ok := w.(flusher); ok {
|
||||
fl = f
|
||||
} else {
|
||||
fl = &NopFlusher{}
|
||||
}
|
||||
return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
|
||||
}
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
package ioutils // import "github.com/docker/docker/pkg/ioutils"
|
||||
|
||||
import "io"
|
||||
|
||||
// NopWriter represents a type which write operation is nop.
|
||||
type NopWriter struct{}
|
||||
|
||||
func (*NopWriter) Write(buf []byte) (int, error) {
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
type nopWriteCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (w *nopWriteCloser) Close() error { return nil }
|
||||
|
||||
// NopWriteCloser returns a nopWriteCloser.
|
||||
func NopWriteCloser(w io.Writer) io.WriteCloser {
|
||||
return &nopWriteCloser{w}
|
||||
}
|
||||
|
||||
// NopFlusher represents a type which flush operation is nop.
|
||||
type NopFlusher struct{}
|
||||
|
||||
// Flush is a nop operation.
|
||||
func (f *NopFlusher) Flush() {}
|
||||
|
||||
type writeCloserWrapper struct {
|
||||
io.Writer
|
||||
closer func() error
|
||||
}
|
||||
|
||||
func (r *writeCloserWrapper) Close() error {
|
||||
return r.closer()
|
||||
}
|
||||
|
||||
// NewWriteCloserWrapper returns a new io.WriteCloser.
|
||||
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
|
||||
return &writeCloserWrapper{
|
||||
Writer: r,
|
||||
closer: closer,
|
||||
}
|
||||
}
|
||||
|
||||
// WriteCounter wraps a concrete io.Writer and hold a count of the number
|
||||
// of bytes written to the writer during a "session".
|
||||
// This can be convenient when write return is masked
|
||||
// (e.g., json.Encoder.Encode())
|
||||
type WriteCounter struct {
|
||||
Count int64
|
||||
Writer io.Writer
|
||||
}
|
||||
|
||||
// NewWriteCounter returns a new WriteCounter.
|
||||
func NewWriteCounter(w io.Writer) *WriteCounter {
|
||||
return &WriteCounter{
|
||||
Writer: w,
|
||||
}
|
||||
}
|
||||
|
||||
func (wc *WriteCounter) Write(p []byte) (count int, err error) {
|
||||
count, err = wc.Writer.Write(p)
|
||||
wc.Count += int64(count)
|
||||
return
|
||||
}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
// longpath introduces some constants and helper functions for handling long paths
|
||||
// in Windows, which are expected to be prepended with `\\?\` and followed by either
|
||||
// a drive letter, a UNC server\share, or a volume identifier.
|
||||
|
||||
package longpath // import "github.com/docker/docker/pkg/longpath"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Prefix is the longpath prefix for Windows file paths.
|
||||
const Prefix = `\\?\`
|
||||
|
||||
// AddPrefix will add the Windows long path prefix to the path provided if
|
||||
// it does not already have it.
|
||||
func AddPrefix(path string) string {
|
||||
if !strings.HasPrefix(path, Prefix) {
|
||||
if strings.HasPrefix(path, `\\`) {
|
||||
// This is a UNC path, so we need to add 'UNC' to the path as well.
|
||||
path = Prefix + `UNC` + path[1:]
|
||||
} else {
|
||||
path = Prefix + path
|
||||
}
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
|
@ -0,0 +1,137 @@
|
|||
// Package pools provides a collection of pools which provide various
|
||||
// data types with buffers. These can be used to lower the number of
|
||||
// memory allocations and reuse buffers.
|
||||
//
|
||||
// New pools should be added to this package to allow them to be
|
||||
// shared across packages.
|
||||
//
|
||||
// Utility functions which operate on pools should be added to this
|
||||
// package to allow them to be reused.
|
||||
package pools // import "github.com/docker/docker/pkg/pools"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
)
|
||||
|
||||
const buffer32K = 32 * 1024
|
||||
|
||||
var (
|
||||
// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
|
||||
BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
|
||||
// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
|
||||
BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
|
||||
buffer32KPool = newBufferPoolWithSize(buffer32K)
|
||||
)
|
||||
|
||||
// BufioReaderPool is a bufio reader that uses sync.Pool.
|
||||
type BufioReaderPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// newBufioReaderPoolWithSize is unexported because new pools should be
|
||||
// added here to be shared where required.
|
||||
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
|
||||
return &BufioReaderPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
|
||||
func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
|
||||
buf := bufPool.pool.Get().(*bufio.Reader)
|
||||
buf.Reset(r)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Put puts the bufio.Reader back into the pool.
|
||||
func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
|
||||
b.Reset(nil)
|
||||
bufPool.pool.Put(b)
|
||||
}
|
||||
|
||||
type bufferPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func newBufferPoolWithSize(size int) *bufferPool {
|
||||
return &bufferPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} { return make([]byte, size) },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *bufferPool) Get() []byte {
|
||||
return bp.pool.Get().([]byte)
|
||||
}
|
||||
|
||||
func (bp *bufferPool) Put(b []byte) {
|
||||
bp.pool.Put(b)
|
||||
}
|
||||
|
||||
// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
|
||||
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||
buf := buffer32KPool.Get()
|
||||
written, err = io.CopyBuffer(dst, src, buf)
|
||||
buffer32KPool.Put(buf)
|
||||
return
|
||||
}
|
||||
|
||||
// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
|
||||
// into the pool and closes the reader if it's an io.ReadCloser.
|
||||
func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
|
||||
return ioutils.NewReadCloserWrapper(r, func() error {
|
||||
if readCloser, ok := r.(io.ReadCloser); ok {
|
||||
readCloser.Close()
|
||||
}
|
||||
bufPool.Put(buf)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// BufioWriterPool is a bufio writer that uses sync.Pool.
|
||||
type BufioWriterPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// newBufioWriterPoolWithSize is unexported because new pools should be
|
||||
// added here to be shared where required.
|
||||
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
|
||||
return &BufioWriterPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
|
||||
func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
|
||||
buf := bufPool.pool.Get().(*bufio.Writer)
|
||||
buf.Reset(w)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Put puts the bufio.Writer back into the pool.
|
||||
func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
|
||||
b.Reset(nil)
|
||||
bufPool.pool.Put(b)
|
||||
}
|
||||
|
||||
// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
|
||||
// into the pool and closes the writer if it's an io.Writecloser.
|
||||
func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
|
||||
return ioutils.NewWriteCloserWrapper(w, func() error {
|
||||
buf.Flush()
|
||||
if writeCloser, ok := w.(io.WriteCloser); ok {
|
||||
writeCloser.Close()
|
||||
}
|
||||
bufPool.Put(buf)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
2215
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
2215
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,139 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package ptypes
|
||||
|
||||
// This file implements functions to marshal proto.Message to/from
|
||||
// google.protobuf.Any message.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
)
|
||||
|
||||
const googleApis = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||
//
|
||||
// Note that regular type assertions should be done using the Is
|
||||
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||
// sequence of Any messages based on a set of allowed message type names.
|
||||
func AnyMessageName(any *any.Any) (string, error) {
|
||||
if any == nil {
|
||||
return "", fmt.Errorf("message is nil")
|
||||
}
|
||||
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||
if slash < 0 {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return any.TypeUrl[slash+1:], nil
|
||||
}
|
||||
|
||||
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||
func MarshalAny(pb proto.Message) (*any.Any, error) {
|
||||
value, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||
// message. The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
type DynamicAny struct {
|
||||
proto.Message
|
||||
}
|
||||
|
||||
// Empty returns a new proto.Message of the type specified in a
|
||||
// google.protobuf.Any message. It returns an error if corresponding message
|
||||
// type isn't linked in.
|
||||
func Empty(any *any.Any) (proto.Message, error) {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := proto.MessageType(aname)
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||
}
|
||||
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||
// message and places the decoded result in pb. It returns an error if type of
|
||||
// contents of Any message does not match type of pb message.
|
||||
//
|
||||
// pb can be a proto.Message, or a *DynamicAny.
|
||||
func UnmarshalAny(any *any.Any, pb proto.Message) error {
|
||||
if d, ok := pb.(*DynamicAny); ok {
|
||||
if d.Message == nil {
|
||||
var err error
|
||||
d.Message, err = Empty(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return UnmarshalAny(any, d.Message)
|
||||
}
|
||||
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mname := proto.MessageName(pb)
|
||||
if aname != mname {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, pb)
|
||||
}
|
||||
|
||||
// Is returns true if any value contains a given message type.
|
||||
func Is(any *any.Any, pb proto.Message) bool {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return aname == proto.MessageName(pb)
|
||||
}
|
||||
|
|
@ -0,0 +1,178 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/any.proto
|
||||
|
||||
/*
|
||||
Package any is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
google/protobuf/any.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Any
|
||||
*/
|
||||
package any
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// Example 4: Pack and unpack a message in Go
|
||||
//
|
||||
// foo := &pb.Foo{...}
|
||||
// any, err := ptypes.MarshalAny(foo)
|
||||
// ...
|
||||
// foo := &pb.Foo{}
|
||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
type Any struct {
|
||||
// A URL/resource name whose content describes the type of the
|
||||
// serialized protocol buffer message.
|
||||
//
|
||||
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||
// following restrictions and interpretations apply:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * The last segment of the URL's path must represent the fully
|
||||
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||
// The name should be in a canonical form (e.g., leading "." is
|
||||
// not accepted).
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Any) Reset() { *m = Any{} }
|
||||
func (m *Any) String() string { return proto.CompactTextString(m) }
|
||||
func (*Any) ProtoMessage() {}
|
||||
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||
|
||||
func (m *Any) GetTypeUrl() string {
|
||||
if m != nil {
|
||||
return m.TypeUrl
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Any) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) }
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 185 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
|
||||
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
|
||||
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
|
||||
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
|
||||
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
|
||||
0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
|
||||
0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
|
||||
0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
|
||||
0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
|
||||
0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
|
||||
0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue