Merge branch 'master' into credentials-pass

This commit is contained in:
Carlos Sanchez 2018-05-15 09:14:44 +02:00 committed by GitHub
commit 085b5c59bf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2188 changed files with 365333 additions and 99791 deletions

View File

@ -2,7 +2,7 @@ language: go
os: linux
go:
- 1.9.x
- 1.10.x
go_import_path: github.com/GoogleContainerTools/kaniko
script:

642
Gopkg.lock generated
View File

@ -12,8 +12,17 @@
"internal/version",
"storage"
]
revision = "29f476ffa9c4cd4fd14336b6043090ac1ad76733"
version = "v0.21.0"
revision = "056a55f54a6cc77b440b31a56a5e7c3982d32811"
version = "v0.22.0"
[[projects]]
branch = "master"
name = "github.com/Azure/go-ansiterm"
packages = [
".",
"winterm"
]
revision = "d6e3b3328b783f23731bc4d058875b0371ff8109"
[[projects]]
name = "github.com/BurntSushi/toml"
@ -21,32 +30,35 @@
revision = "b26d9c308763d68093482582cea63d69be07a0f0"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "github.com/GoogleContainerTools/container-diff"
packages = [
"cmd/util/output",
"pkg/image",
"pkg/util"
]
revision = "564361979f3124dd3e6d8bbec027279539c8e08b"
source = "github.com/GoogleContainerTools/container-diff"
[[projects]]
name = "github.com/Microsoft/go-winio"
packages = [
".",
"archive/tar",
"backuptar"
]
packages = ["."]
revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f"
version = "v0.4.7"
[[projects]]
name = "github.com/Microsoft/hcsshim"
packages = ["."]
revision = "45ef15484298b76abeb9513ea0ea0abd2b5b84b3"
version = "v0.6.8"
revision = "800683ae704ac360b2f3f47fa88f3a6c8c9091b5"
version = "v0.6.11"
[[projects]]
branch = "master"
name = "github.com/Nvveen/Gotty"
packages = ["."]
revision = "cd527374f1e5bff4938207604a14f2e38a9cf512"
[[projects]]
branch = "master"
name = "github.com/armon/go-metrics"
packages = ["."]
revision = "783273d703149aaeb9897cf58613d5af48861c25"
[[projects]]
branch = "master"
name = "github.com/armon/go-radix"
packages = ["."]
revision = "1fca145dffbcaa8fe914309b1ec0cfc67500fe61"
[[projects]]
branch = "master"
@ -54,101 +66,71 @@
packages = ["quantile"]
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
name = "github.com/boltdb/bolt"
packages = ["."]
revision = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8"
version = "v1.3.1"
[[projects]]
name = "github.com/containerd/containerd"
packages = [
"cio",
"defaults",
"log"
]
revision = "209a7fc3e4a32ef71a8c7b50c68fc8398415badf"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/containerd/continuity"
packages = ["pathdriver"]
packages = [
"devices",
"driver",
"fs",
"pathdriver",
"sysx"
]
revision = "c6cef34830231743494fe2969284df7b82cc0ad0"
[[projects]]
name = "github.com/containers/image"
packages = [
"copy",
"directory",
"directory/explicitfilepath",
"docker",
"docker/archive",
"docker/daemon",
"docker/policyconfiguration",
"docker/reference",
"docker/tarfile",
"image",
"internal/tmpdir",
"manifest",
"oci/archive",
"oci/internal",
"oci/layout",
"openshift",
"ostree",
"pkg/compression",
"pkg/docker/config",
"pkg/strslice",
"pkg/tlsclientconfig",
"signature",
"storage",
"tarball",
"transports",
"transports/alltransports",
"types",
"version"
]
revision = "495da41bd26c50be62fa07ae903ea2ee54c00283"
branch = "master"
name = "github.com/containerd/fifo"
packages = ["."]
revision = "3d5202aec260678c48179c56f40e6f38a095738c"
[[projects]]
name = "github.com/containers/storage"
name = "github.com/coreos/etcd"
packages = [
".",
"drivers",
"drivers/aufs",
"drivers/btrfs",
"drivers/devmapper",
"drivers/overlay",
"drivers/overlayutils",
"drivers/quota",
"drivers/register",
"drivers/vfs",
"drivers/windows",
"drivers/zfs",
"pkg/archive",
"pkg/chrootarchive",
"pkg/devicemapper",
"pkg/directory",
"pkg/dmesg",
"pkg/fileutils",
"pkg/fsutils",
"pkg/homedir",
"pkg/idtools",
"pkg/ioutils",
"pkg/locker",
"pkg/longpath",
"pkg/loopback",
"pkg/mount",
"pkg/parsers",
"pkg/parsers/kernel",
"pkg/pools",
"pkg/promise",
"pkg/reexec",
"pkg/stringid",
"pkg/system",
"pkg/truncindex"
"client",
"pkg/pathutil",
"pkg/srv",
"pkg/types",
"raft/raftpb",
"version"
]
revision = "1e5ce40cdb84ab66e26186435b1273e04b879fef"
source = "github.com/containers/storage"
revision = "70c8726202dd91e482fb4029fd14af1d4ed1d5af"
version = "v3.3.5"
[[projects]]
name = "github.com/coreos/go-semver"
packages = ["semver"]
revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6"
version = "v0.2.0"
[[projects]]
name = "github.com/deckarep/golang-set"
packages = ["."]
revision = "1d4478f51bed434f1dadf96dcd9b43aabac66795"
version = "v1.7"
[[projects]]
name = "github.com/docker/distribution"
packages = [
".",
"digestset",
"metrics",
"reference",
"registry/api/errcode",
"registry/api/v2",
"registry/client",
"registry/client/auth/challenge",
"registry/client/transport",
"registry/storage/cache",
"registry/storage/cache/memory"
"reference"
]
revision = "83389a148052d74ac602f5f1d62f86ff2f3c4aa5"
source = "github.com/docker/distribution"
@ -158,47 +140,92 @@
packages = [
"api",
"api/types",
"api/types/backend",
"api/types/blkiodev",
"api/types/container",
"api/types/events",
"api/types/filters",
"api/types/image",
"api/types/mount",
"api/types/network",
"api/types/plugins/logdriver",
"api/types/registry",
"api/types/strslice",
"api/types/swarm",
"api/types/swarm/runtime",
"api/types/time",
"api/types/versions",
"api/types/volume",
"builder",
"builder/dockerfile",
"builder/dockerfile/command",
"builder/dockerfile/instructions",
"builder/dockerfile/parser",
"builder/dockerfile/shell",
"client",
"builder/dockerignore",
"builder/fscache",
"builder/remotecontext",
"builder/remotecontext/git",
"container",
"container/stream",
"daemon/cluster/provider",
"daemon/exec",
"daemon/graphdriver",
"daemon/logger",
"daemon/logger/jsonfilelog",
"daemon/logger/jsonfilelog/jsonlog",
"daemon/logger/loggerutils",
"daemon/logger/loggerutils/multireader",
"daemon/logger/templates",
"daemon/network",
"dockerversion",
"errdefs",
"image",
"layer",
"oci",
"opts",
"pkg/archive",
"pkg/broadcaster",
"pkg/chrootarchive",
"pkg/containerfs",
"pkg/directory",
"pkg/discovery",
"pkg/discovery/kv",
"pkg/filenotify",
"pkg/fileutils",
"pkg/homedir",
"pkg/idtools",
"pkg/ioutils",
"pkg/jsonmessage",
"pkg/locker",
"pkg/longpath",
"pkg/mount",
"pkg/parsers",
"pkg/parsers/kernel",
"pkg/plugingetter",
"pkg/plugins",
"pkg/plugins/transport",
"pkg/pools",
"pkg/system"
"pkg/progress",
"pkg/pubsub",
"pkg/reexec",
"pkg/signal",
"pkg/streamformatter",
"pkg/stringid",
"pkg/symlink",
"pkg/sysinfo",
"pkg/system",
"pkg/tailfile",
"pkg/tarsum",
"pkg/term",
"pkg/term/windows",
"pkg/urlutil",
"pkg/useragent",
"plugin/v2",
"restartmanager",
"runconfig",
"runconfig/opts",
"volume",
"volume/mounts"
]
revision = "b1a1234c60cf87048814aa37da523b03a7b0d344"
revision = "dfde597fbbb5de4a7559a68980401c8c405aa9af"
source = "github.com/docker/docker"
[[projects]]
name = "github.com/docker/docker-credential-helpers"
packages = [
"client",
"credentials"
]
revision = "d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1"
version = "v0.6.0"
[[projects]]
name = "github.com/docker/go-connections"
packages = [
@ -209,6 +236,12 @@
revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "github.com/docker/go-events"
packages = ["."]
revision = "9461782956ad83b30282bf90e31fa6a70c255ba9"
[[projects]]
branch = "master"
name = "github.com/docker/go-metrics"
@ -221,11 +254,90 @@
revision = "47565b4f722fb6ceae66b95f853feed578a4a51c"
version = "v0.3.3"
[[projects]]
name = "github.com/docker/libkv"
packages = [
".",
"store",
"store/boltdb",
"store/consul",
"store/etcd",
"store/zookeeper"
]
revision = "aabc039ad04deb721e234f99cd1b4aa28ac71a40"
version = "v0.2.1"
[[projects]]
branch = "master"
name = "github.com/docker/libtrust"
name = "github.com/docker/libnetwork"
packages = [
".",
"bitseq",
"cluster",
"common",
"config",
"datastore",
"diagnostic",
"discoverapi",
"driverapi",
"drivers/bridge",
"drivers/host",
"drivers/ipvlan",
"drivers/macvlan",
"drivers/null",
"drivers/overlay",
"drivers/remote",
"drivers/remote/api",
"drivers/windows",
"drivers/windows/overlay",
"drvregistry",
"etchosts",
"hostdiscovery",
"idm",
"ipam",
"ipamapi",
"ipams/builtin",
"ipams/null",
"ipams/remote",
"ipams/remote/api",
"ipams/windowsipam",
"ipamutils",
"iptables",
"ipvs",
"netlabel",
"netutils",
"networkdb",
"ns",
"options",
"osl",
"portallocator",
"portmapper",
"resolvconf",
"resolvconf/dns",
"types"
]
revision = "d5818e7204d3886a246f3ea4ce39fb133067b194"
[[projects]]
branch = "master"
name = "github.com/docker/swarmkit"
packages = [
"agent/exec",
"api",
"api/deepcopy",
"api/equality",
"log",
"manager/raftselector",
"protobuf/plugin",
"protobuf/ptypes"
]
revision = "8aa9c33bcdff9ea38fc79e0b1d054199917513f3"
[[projects]]
name = "github.com/fsnotify/fsnotify"
packages = ["."]
revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20"
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
version = "v1.4.7"
[[projects]]
name = "github.com/genuinetools/amicontained"
@ -234,14 +346,20 @@
version = "v0.4.0"
[[projects]]
name = "github.com/ghodss/yaml"
name = "github.com/godbus/dbus"
packages = ["."]
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
version = "v1.0.0"
revision = "a389bdde4dd695d414e47b755e95e72b7826432c"
version = "v4.1.0"
[[projects]]
name = "github.com/gogo/protobuf"
packages = ["proto"]
packages = [
"gogoproto",
"proto",
"protoc-gen-gogo/descriptor",
"sortkeys",
"types"
]
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
@ -255,8 +373,27 @@
"ptypes/duration",
"ptypes/timestamp"
]
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/google/go-containerregistry"
packages = [
"authn",
"name",
"v1",
"v1/empty",
"v1/mutate",
"v1/partial",
"v1/random",
"v1/remote",
"v1/remote/transport",
"v1/tarball",
"v1/types",
"v1/v1util"
]
revision = "ee5a6c257df843b47a2666ff0fff3d31d484ebda"
[[projects]]
name = "github.com/googleapis/gax-go"
@ -265,22 +402,85 @@
version = "v2.0.0"
[[projects]]
name = "github.com/gorilla/context"
packages = ["."]
revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a"
version = "v1.1"
branch = "master"
name = "github.com/grpc-ecosystem/grpc-opentracing"
packages = ["go/otgrpc"]
revision = "8e809c8a86450a29b90dcc9efbf062d0fe6d9746"
[[projects]]
name = "github.com/gorilla/mux"
packages = ["."]
revision = "53c1911da2b537f792e7cafcb446b05ffe33b996"
version = "v1.6.1"
name = "github.com/hashicorp/consul"
packages = ["api"]
revision = "fb848fc48818f58690db09d14640513aa6bf3c02"
version = "v1.0.7"
[[projects]]
name = "github.com/imdario/mergo"
branch = "master"
name = "github.com/hashicorp/errwrap"
packages = ["."]
revision = "9d5f1277e9a8ed20c3684bda8fde67c05628518c"
version = "v0.3.4"
revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-cleanhttp"
packages = ["."]
revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-immutable-radix"
packages = ["."]
revision = "7f3cd4390caab3250a57f30efdb2a65dd7649ecf"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-memdb"
packages = ["."]
revision = "1289e7fffe71d8fd4d4d491ba9a412c50f244c44"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-msgpack"
packages = ["codec"]
revision = "fa3f63826f7c23912c15263591e65d54d080b458"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-multierror"
packages = ["."]
revision = "b7773ae218740a7be65057fc60b366a49b538a44"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-rootcerts"
packages = ["."]
revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-sockaddr"
packages = ["."]
revision = "6d291a969b86c4b633730bfc6b8b9d64c3aafed9"
[[projects]]
branch = "master"
name = "github.com/hashicorp/golang-lru"
packages = ["simplelru"]
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]]
name = "github.com/hashicorp/memberlist"
packages = ["."]
revision = "ce8abaa0c60c2d6bee7219f5ddf500e0a1457b28"
version = "v0.1.0"
[[projects]]
name = "github.com/hashicorp/serf"
packages = [
"coordinate",
"serf"
]
revision = "d6574a5bb1226678d7010325fb6c985db20ee458"
version = "v0.8.1"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
@ -289,10 +489,10 @@
version = "v1.0"
[[projects]]
name = "github.com/mattn/go-runewidth"
name = "github.com/ishidawataru/sctp"
packages = ["."]
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
revision = "07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb"
source = "github.com/ishidawataru/sctp"
[[projects]]
name = "github.com/mattn/go-shellwords"
@ -307,16 +507,26 @@
version = "v1.0.0"
[[projects]]
name = "github.com/mistifyio/go-zfs"
name = "github.com/miekg/dns"
packages = ["."]
revision = "cdc0f941c4d0e0e94d85348285568d921891e138"
version = "v2.1.1"
revision = "83c435cc65d2862736428b9b4d07d0ab10ad3e4d"
version = "v1.0.5"
[[projects]]
branch = "master"
name = "github.com/mtrmac/gpgme"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
revision = "b2432428689ca58c2b8e8dea9449d3295cf96fc9"
revision = "b8bc1bf767474819792c23f32d8286a45736f1c6"
[[projects]]
branch = "master"
name = "github.com/moby/buildkit"
packages = [
"identity",
"session",
"session/filesync"
]
revision = "b6fee5e09d7aa62d1cd950b6bbee2d09d049e3fd"
[[projects]]
name = "github.com/opencontainers/go-digest"
@ -336,11 +546,20 @@
[[projects]]
name = "github.com/opencontainers/runc"
packages = [
"libcontainer/cgroups",
"libcontainer/configs",
"libcontainer/devices",
"libcontainer/system",
"libcontainer/user"
]
revision = "baf6536d6259209c3edfa2b22237af82942d3dfa"
version = "v0.1.1"
revision = "4fc53a81fb7c994640722ac585fa9ca548971871"
source = "github.com/opencontainers/runc"
[[projects]]
name = "github.com/opencontainers/runtime-spec"
packages = ["specs-go"]
revision = "4e3b9264a330d094b0386c3703c5f379119711e8"
version = "v1.0.1"
[[projects]]
name = "github.com/opencontainers/selinux"
@ -352,13 +571,14 @@
version = "v1.0.0-rc1"
[[projects]]
branch = "master"
name = "github.com/ostreedev/ostree-go"
name = "github.com/opentracing/opentracing-go"
packages = [
"pkg/glibobject",
"pkg/otbuiltin"
".",
"ext",
"log"
]
revision = "cb6250d5a6a240b509609915842f763fd87b819d"
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
version = "v1.0.2"
[[projects]]
name = "github.com/pkg/errors"
@ -366,15 +586,6 @@
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
branch = "master"
name = "github.com/pquerna/ffjson"
packages = [
"fflib/v1",
"fflib/v1/internal"
]
revision = "d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac"
[[projects]]
name = "github.com/prometheus/client_golang"
packages = [
@ -398,7 +609,7 @@
"internal/bitbucket.org/ww/goautoneg",
"model"
]
revision = "d0f7cd64bda49e08b22ae8a730aa57aa0db125d6"
revision = "d811d2e9bf898806ecfb6ef6296774b13ffc314c"
[[projects]]
branch = "master"
@ -411,6 +622,18 @@
]
revision = "8b1c2da0d56deffdbb9e48d4414b4e674bd8083e"
[[projects]]
branch = "master"
name = "github.com/samuel/go-zookeeper"
packages = ["zk"]
revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47"
[[projects]]
branch = "master"
name = "github.com/sean-/seed"
packages = ["."]
revision = "e2103e2c35297fb7e17febb81e49b312087a2372"
[[projects]]
name = "github.com/sirupsen/logrus"
packages = ["."]
@ -436,10 +659,16 @@
revision = "33e07d32887e1e06b7c025f27ce52f62c7990bc0"
[[projects]]
name = "github.com/tchap/go-patricia"
packages = ["patricia"]
revision = "666120de432aea38ab06bd5c818f04f4129882c9"
version = "v2.2.6"
branch = "master"
name = "github.com/tonistiigi/fsutil"
packages = ["."]
revision = "93a0fd10b669d389e349ff54c48f13829708c9b0"
[[projects]]
name = "github.com/ugorji/go"
packages = ["codec"]
revision = "b4c50a2b199d93b13dc15e78929cfb23bfdf21ab"
version = "v1.1.1"
[[projects]]
name = "github.com/vbatts/tar-split"
@ -451,6 +680,21 @@
revision = "38ec4ddb06dedbea0a895c4848b248eb38af221b"
version = "v0.10.2"
[[projects]]
name = "github.com/vishvananda/netlink"
packages = [
".",
"nl"
]
revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e"
source = "github.com/vishvananda/netlink"
[[projects]]
branch = "master"
name = "github.com/vishvananda/netns"
packages = ["."]
revision = "be1fbeda19366dea804f00efff2dd73a1642fdcc"
[[projects]]
name = "go.opencensus.io"
packages = [
@ -467,41 +711,40 @@
"trace/internal",
"trace/propagation"
]
revision = "0095aec66ae14801c6711210f6f0716411cefdd3"
version = "v0.8.0"
revision = "10cec2c05ea2cfb8b0d856711daedc49d8a45c56"
version = "v0.9.0"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"cast5",
"openpgp",
"openpgp/armor",
"openpgp/elgamal",
"openpgp/errors",
"openpgp/packet",
"openpgp/s2k",
"ed25519",
"ed25519/internal/edwards25519",
"ssh/terminal"
]
revision = "e73bf333ef8920dbb52ad18d4bd38ad9d9bc76d7"
revision = "21052ae46654ecf18dfdba0f7c12701a1e2b3164"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"bpf",
"context",
"context/ctxhttp",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/iana",
"internal/socket",
"internal/socks",
"internal/timeseries",
"lex/httplex",
"ipv4",
"ipv6",
"proxy",
"trace"
]
revision = "5f9ae10d9af5b1c89ae6904293b14b064d4ada23"
revision = "f73e4c9ed3b7ebdd5f699a16a880c2b1994e50dd"
[[projects]]
branch = "master"
@ -513,16 +756,27 @@
"jws",
"jwt"
]
revision = "6881fee410a5daf86371371f9ad451b95e168b71"
revision = "cdc340f7c179dbbfa4afd43b7614e8fcadde4269"
[[projects]]
branch = "master"
name = "golang.org/x/sync"
packages = [
"errgroup",
"singleflight",
"syncmap"
]
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"unix",
"windows"
"windows",
"windows/registry"
]
revision = "79b0c6888797020a994db17c8510466c72fe75d9"
revision = "7db1c3b1a98089d0071c84f646ff5c96aad43682"
[[projects]]
name = "golang.org/x/text"
@ -545,6 +799,12 @@
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]]
branch = "master"
name = "google.golang.org/api"
@ -559,7 +819,7 @@
"storage/v1",
"transport/http"
]
revision = "b08f8989af4899034b9816f5291a5bbb1bd576bb"
revision = "4bd7f4beb291148443ed4553071c4e0697ff4afb"
[[projects]]
name = "google.golang.org/appengine"
@ -587,7 +847,7 @@
"googleapis/rpc/code",
"googleapis/rpc/status"
]
revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200"
revision = "86e600f69ee4704c6efbf6a2a40a5c10700e76c2"
[[projects]]
name = "google.golang.org/grpc"
@ -603,6 +863,8 @@
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"health",
"health/grpc_health_v1",
"internal",
"keepalive",
"metadata",
@ -616,14 +878,8 @@
"tap",
"transport"
]
revision = "d11072e7ca9811b1100b80ca0269ac831f06d024"
version = "v1.11.3"
[[projects]]
name = "gopkg.in/cheggaaa/pb.v1"
packages = ["."]
revision = "72b964305fba1230d3d818711138195f22b9ceea"
version = "v1.0.22"
revision = "8124abf74e7633d82a5b96585b0da487d0e6eed0"
source = "google.golang.org/grpc"
[[projects]]
name = "gopkg.in/yaml.v2"
@ -631,15 +887,9 @@
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[[projects]]
name = "k8s.io/client-go"
packages = ["util/homedir"]
revision = "23781f4d6632d88e869066eaebb743857aa1ef9b"
version = "v7.0.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "6d64e71de2909767e347f36c90936a5f77da94f74fe2dbf84d6d44fdbcdf4fba"
inputs-digest = "7dc0b12e9e50b1e09cfb6bf10cbeca2299a3c12ef5dd1074666215f683969b10"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -18,16 +18,31 @@
revision = "83389a148052d74ac602f5f1d62f86ff2f3c4aa5"
source = "github.com/docker/distribution"
[[constraint]]
name = "github.com/GoogleContainerTools/container-diff"
branch = "master"
source = "github.com/GoogleContainerTools/container-diff"
[[constraint]]
name = "github.com/docker/docker"
revision = "b1a1234c60cf87048814aa37da523b03a7b0d344"
revision = "dfde597fbbb5de4a7559a68980401c8c405aa9af"
source = "github.com/docker/docker"
[[override]]
name = "github.com/opencontainers/runc"
revision = "4fc53a81fb7c994640722ac585fa9ca548971871"
source = "github.com/opencontainers/runc"
[[override]]
name = "github.com/ishidawataru/sctp"
revision = "07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb"
source = "github.com/ishidawataru/sctp"
[[override]]
name = "github.com/vishvananda/netlink"
revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e"
source = "github.com/vishvananda/netlink"
[[override]]
name = "google.golang.org/grpc"
revision = "8124abf74e7633d82a5b96585b0da487d0e6eed0"
source = "google.golang.org/grpc"
[[constraint]]
name = "github.com/genuinetools/amicontained"
version = "0.4.0"

View File

@ -34,22 +34,20 @@ GO_LDFLAGS += -X $(VERSION_PACKAGE).version=$(VERSION)
GO_LDFLAGS += -w -s # Drop debugging symbols.
GO_LDFLAGS += '
GO_BUILD_TAGS := "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper exclude_graphdriver_btrfs exclude_graphdriver_overlay"
EXECUTOR_PACKAGE = $(REPOPATH)/cmd/executor
KANIKO_PROJECT = $(REPOPATH)/kaniko
out/executor: $(GO_FILES)
GOARCH=$(GOARCH) GOOS=linux CGO_ENABLED=0 go build -ldflags $(GO_LDFLAGS) -tags $(GO_BUILD_TAGS) -o $@ $(EXECUTOR_PACKAGE)
GOARCH=$(GOARCH) GOOS=linux CGO_ENABLED=0 go build -ldflags $(GO_LDFLAGS) -o $@ $(EXECUTOR_PACKAGE)
.PHONY: test
test: out/executor
@ ./test.sh
.PHONY: integration-test
integration-test: out/executor
integration-test:
@ ./integration-test.sh
.PHONY: images
images: out/executor
images:
docker build -t $(REGISTRY)/executor:latest -f deploy/Dockerfile .

129
README.md
View File

@ -3,21 +3,23 @@
kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster.
kaniko doesn't depend on a Docker daemon and executes each command within a Dockerfile completely in userspace.
This enables building container images in environments that can't easily or securely run a Docker daemon, such as a standard Kubernetes cluster.
This enables building container images in environments that can't easily or securely run a Docker daemon, such as a standard Kubernetes cluster.
We're currently in the process of building kaniko, so as of now it isn't production ready.
Please let us know if you have any feature requests or find any bugs!
- [Kaniko](#kaniko)
- [How does kaniko work?](#how-does-kaniko-work?)
- [How does kaniko work?](#how-does-kaniko-work)
- [Known Issues](#known-issues)
- [Demo](#demo)
- [Development](#development)
- [kaniko Build Contexts](#kaniko-build-contexts)
- [kaniko Build Contexts](#kaniko-build-contexts)
- [Running kaniko in a Kubernetes cluster](#running-kaniko-in-a-kubernetes-cluster)
- [Running kaniko in Google Container Builder](#running-kaniko-in-google-container-builder)
- [Running kaniko locally](#running-kaniko-locally)
- [Pushing to Different Registries](#pushing-to-different-registries)
- [Debug Image](#debug-image)
- [Security](#security)
- [Comparison with Other Tools](#comparison-with-other-tools)
- [Community](#community)
@ -30,21 +32,17 @@ We then execute the commands in the Dockerfile, snapshotting the filesystem in u
After each command, we append a layer of changed files to the base image (if there are any) and update image metadata.
### Known Issues
kaniko does not support building Windows containers.
The majority of Dockerfile commands can be executed with kaniko, but we're still working on supporting the following commands:
## Demo
* SHELL
* HEALTHCHECK
* STOPSIGNAL
* ARG
Multi-Stage Dockerfiles are also unsupported currently, but will be ready soon.
![Demo](/docs/demo.gif)
## Development
### kaniko Build Contexts
kaniko supports local directories and GCS buckets as build contexts. To specify a local directory, pass in the `--context` flag as an argument to the executor image.
To specify a GCS bucket, pass in the `--bucket` flag.
The GCS bucket should contain a compressed tar of the build context called `context.tar.gz`, which kaniko will unpack and use as the build context.
The GCS bucket should contain a compressed tar of the build context called `context.tar.gz`, which kaniko will unpack and use as the build context.
To create `context.tar.gz`, run the following command:
```shell
@ -68,7 +66,7 @@ Requirements:
* Standard Kubernetes cluster
* Kubernetes Secret
To run kaniko in a Kubernetes cluster, you will need a standard running Kubernetes cluster and a Kubernetes secret, which contains the auth required to push the final image.
To run kaniko in a Kubernetes cluster, you will need a standard running Kubernetes cluster and a Kubernetes secret, which contains the auth required to push the final image.
To create the secret, first you will need to create a service account in the Google Cloud Console project you want to push the final image to, with `Storage Admin` permissions.
You can download a JSON key for this service account, and rename it `kaniko-secret.json`.
@ -108,7 +106,7 @@ spec:
This example pulls the build context from a GCS bucket.
To use a local directory build context, you could consider using configMaps to mount in small build contexts.
### Running kaniko in Google Container Builder
### Running kaniko in Google Container Builder
To run kaniko in GCB, add it to your build config as a build step:
```yaml
@ -141,7 +139,7 @@ To run kaniko in Docker, run the following command:
kaniko uses Docker credential helpers to push images to a registry.
kaniko comes with support for Docker `config.json` and GCR, but configuring another credential helper should allow pushing to a different registry.
kaniko comes with support for GCR, Docker `config.json` and Amazon ECR, but configuring another credential helper should allow pushing to a different registry.
#### Pushing to Docker Hub
@ -165,20 +163,81 @@ Run kaniko with the `config.json` inside `/root/.docker/config.json`
docker run -ti --rm -v `pwd`:/workspace -v config.json:/root/.docker/config.json:ro gcr.io/kaniko-project/executor:latest --dockerfile=Dockerfile --destination=yourimagename
#### Pushing to Amazon ECR
The Amazon ECR [credential helper](https://github.com/awslabs/amazon-ecr-credential-helper) is built in to the kaniko executor image.
To configure credentials, you will need to do the following:
1. Update the `credHelpers` section of [config.json](https://github.com/GoogleContainerTools/kaniko/blob/master/files/config.json) with the specific URI of your ECR registry:
```json
{
"credHelpers": {
"aws_account_id.dkr.ecr.region.amazonaws.com": "ecr-login"
}
}
```
You can mount in the new config as a configMap:
```shell
kubectl create configmap docker-config --from-file=<path to config.json>
```
2. Create a Kubernetes secret for your `~/.aws/credentials` file so that credentials can be accessed within the cluster.
To create the secret, run:
```shell
kubectl create secret generic aws-secret --from-file=<path to .aws/credentials>
```
The Kubernetes Pod spec should look similar to this, with the args parameters filled in:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: kaniko
spec:
containers:
- name: kaniko
image: gcr.io/kaniko-project/executor:latest
args: ["--dockerfile=<path to Dockerfile>",
"--context=<path to build context>",
"--destination=<aws_account_id.dkr.ecr.region.amazonaws.com/my-repository:my-tag>"]
volumeMounts:
- name: aws-secret
mountPath: /root/.aws/
- name: docker-config
mountPath: /root/.docker/
restartPolicy: Never
volumes:
- name: aws-secret
secret:
secretName: aws-secret
- name: docker-config
configMap:
name: docker-config
```
### Debug Image
The kaniko executor image is based off of scratch and doesn't contain a shell.
We provide `gcr.io/kaniko-project/executor:debug`, a debug image which consists of the kaniko executor image along with a busybox shell to enter.
You can launch the debug image with a shell entrypoint:
```shell
docker run -it --entrypoint=/busybox/sh gcr.io/kaniko-project/executor:debug
```
## Security
kaniko by itself **does not** make it safe to run untrusted builds inside your cluster, or anywhere else.
kaniko relies on the security features of your container runtime to provide build security.
The minimum permissions kaniko needs inside your container are governed by a few things:
* The permissions required to unpack your base image into it's container
* The permissions required to execute the RUN commands inside the container
If you have a minimal base image (SCRATCH or similar) that doesn't require permissions to unpack, and your Dockerfile doesn't execute any commands as the root user,
you can run Kaniko without root permissions.
If you have a minimal base image (SCRATCH or similar) that doesn't require
permissions to unpack, and your Dockerfile doesn't execute any commands as the
root user, you can run Kaniko without root permissions. It should be noted that
Docker runs as root by default, so you still require (in a sense) privileges to
use Kaniko.
You may be able to achieve the same default seccomp profile that Docker uses in your Pod by setting [seccomp](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp) profiles with annotations on a [PodSecurityPolicy](https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies) to create or update security policies on your cluster.
@ -187,21 +246,37 @@ You may be able to achieve the same default seccomp profile that Docker uses in
Similar tools include:
* [img](https://github.com/genuinetools/img)
* [orca-build](https://github.com/cyphar/orca-build)
* [umoci](https://github.com/openSUSE/umoci)
* [buildah](https://github.com/projectatomic/buildah)
* [FTL](https://github.com/GoogleCloudPlatform/runtimes-common/tree/master/ftl)
* [Bazel rules_docker](https://github.com/bazelbuild/rules_docker)
All of these tools build container images with different approaches.
`img` can perform as a non root user from within a container, but requires that the `img` container has `RawProc` access to create nested containers.
`kaniko` does not actually create nested containers, so it does not require `RawProc` access.
`img` can perform as a non root user from within a container, but requires that
the `img` container has `RawProc` access to create nested containers. `kaniko`
does not actually create nested containers, so it does not require `RawProc`
access.
`orca-build` depends on `runC` to build images from Dockerfiles, which can not run inside a container. `kaniko` doesn't use runC so it doesn't require the use of kernel namespacing techniques.
`orca-build` depends on `runc` to build images from Dockerfiles, which can not
run inside a container (for similar reasons to `img` above). `kaniko` doesn't
use `runc` so it doesn't require the use of kernel namespacing techniques.
However, `orca-build` does not require Docker or any privileged daemon (so
builds can be done entirely without privilege).
`buildah` requires the same privileges as a Docker daemon does to run, while `kaniko` runs without any special privileges or permissions.
`umoci` works without any privileges, and also has no restrictions on the root
filesystem being extracted (though it requires additional handling if your
filesystem is sufficiently complicated). However it has no `Dockerfile`-like
build tooling (it's a slightly lower-level tool that can be used to build such
builders -- such as `orca-build`).
`FTL` and `Bazel` aim to achieve the fastest possible creation of Docker images for a subset of images.
These can be thought of as a special-case "fast path" that can be used in conjunction with the support for general Dockerfiles kaniko provides.
`buildah` requires the same privileges as a Docker daemon does to run, while
`kaniko` runs without any special privileges or permissions.
`FTL` and `Bazel` aim to achieve the fastest possible creation of Docker images
for a subset of images. These can be thought of as a special-case "fast path"
that can be used in conjunction with the support for general Dockerfiles kaniko
provides.
## Community

43
cmd/executor/cmd/args.go Normal file
View File

@ -0,0 +1,43 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"github.com/sirupsen/logrus"
"strings"
)
// The buildArg type is used to pass in multiple --build-arg flags
type buildArg []string
// Now, for our new type, implement the two methods of
// the flag.Value interface...
// The first method is String() string
func (b *buildArg) String() string {
return strings.Join(*b, ",")
}
// The second method is Set(value string) error
func (b *buildArg) Set(value string) error {
logrus.Infof("appending to build args %s", value)
*b = append(*b, value)
return nil
}
func (b *buildArg) Type() string {
return "build-arg type"
}

View File

@ -21,12 +21,10 @@ import (
"os"
"path/filepath"
"github.com/genuinetools/amicontained/container"
"github.com/GoogleContainerTools/kaniko/pkg/executor"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/executor"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/genuinetools/amicontained/container"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -40,6 +38,7 @@ var (
dockerInsecureSkipTLSVerify bool
logLevel string
force bool
buildArgs buildArg
)
func init() {
@ -47,7 +46,9 @@ func init() {
RootCmd.PersistentFlags().StringVarP(&srcContext, "context", "c", "/workspace/", "Path to the dockerfile build context.")
RootCmd.PersistentFlags().StringVarP(&bucket, "bucket", "b", "", "Name of the GCS bucket from which to access build context as tarball.")
RootCmd.PersistentFlags().StringVarP(&destination, "destination", "d", "", "Registry the final image should be pushed to (ex: gcr.io/test/example:latest)")
RootCmd.MarkPersistentFlagRequired("destination")
RootCmd.PersistentFlags().StringVarP(&snapshotMode, "snapshotMode", "", "full", "Set this flag to change the file attributes inspected during snapshotting")
RootCmd.PersistentFlags().VarP(&buildArgs, "build-arg", "", "This flag allows you to pass in ARG values at build time. Set it repeatedly for multiple values.")
RootCmd.PersistentFlags().BoolVarP(&dockerInsecureSkipTLSVerify, "insecure-skip-tls-verify", "", false, "Push to insecure registry ignoring TLS verify")
RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", constants.DefaultLogLevel, "Log level (debug, info, warn, error, fatal, panic")
RootCmd.PersistentFlags().BoolVarP(&force, "force", "", false, "Force building outside of a container")
@ -72,7 +73,16 @@ var RootCmd = &cobra.Command{
}
logrus.Warn("kaniko is being run outside of a container. This can have dangerous effects on your system")
}
if err := executor.DoBuild(dockerfilePath, srcContext, destination, snapshotMode, dockerInsecureSkipTLSVerify); err != nil {
if err := os.Chdir("/"); err != nil {
logrus.Error(err)
os.Exit(1)
}
ref, image, err := executor.DoBuild(dockerfilePath, srcContext, snapshotMode, buildArgs)
if err != nil {
logrus.Error(err)
os.Exit(1)
}
if err := executor.DoPush(ref, image, destination); err != nil {
logrus.Error(err)
os.Exit(1)
}

View File

@ -14,13 +14,26 @@
# Builds the static Go image to execute in a Kubernetes job
FROM golang:1.10
WORKDIR /go/src/github.com/GoogleContainerTools/kaniko
COPY . .
RUN make
# Get GCR credential helper
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3-static/docker-credential-gcr_linux_amd64-1.4.3.tar.gz /usr/local/bin/
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-1.4.3.tar.gz
# Get Amazon ECR credential helper
RUN go get -u github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login
RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64
FROM scratch
ADD out/executor /kaniko/executor
ADD files/ca-certificates.crt /kaniko/ssl/certs/
ADD files/docker-credential-gcr /usr/local/bin/
ADD files/config.json /root/.docker/
COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/executor /kaniko/executor
COPY --from=0 /usr/local/bin/docker-credential-gcr /usr/local/bin/docker-credential-gcr
COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /usr/local/bin/docker-credential-ecr-login
COPY files/ca-certificates.crt /kaniko/ssl/certs/
COPY files/config.json /root/.docker/
RUN ["docker-credential-gcr", "config", "--token-source=env"]
ENV HOME /root
ENV USER /root
ENV PATH /usr/local/bin
ENV SSL_CERT_DIR=/kaniko/ssl/certs
ENTRYPOINT ["/kaniko/executor"]

48
deploy/Dockerfile_debug Normal file
View File

@ -0,0 +1,48 @@
# Copyright 2018 Google, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Builds the static Go image to execute in a Kubernetes job
# Stage 0: Build the executor binary and get credential helpers
FROM golang:1.10
WORKDIR /go/src/github.com/GoogleContainerTools/kaniko
COPY . .
RUN make
# Get GCR credential helper
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3-static/docker-credential-gcr_linux_amd64-1.4.3.tar.gz /usr/local/bin/
RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-1.4.3.tar.gz
# Get Amazon ECR credential helper
RUN go get -u github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login
RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64
# Stage 1: Get the busybox shell
FROM gcr.io/cloud-builders/bazel:latest
RUN git clone https://github.com/GoogleContainerTools/distroless.git
WORKDIR /distroless
RUN bazel build busybox:busybox.tar
RUN tar -C /distroless/bazel-genfiles/busybox/ -xf /distroless/bazel-genfiles/busybox/busybox.tar
FROM scratch
COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/executor /kaniko/executor
COPY --from=0 /usr/local/bin/docker-credential-gcr /usr/local/bin/docker-credential-gcr
COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /usr/local/bin/docker-credential-ecr-login
COPY --from=1 /distroless/bazel-genfiles/busybox/busybox/ /busybox/
COPY files/ca-certificates.crt /kaniko/ssl/certs/
COPY files/config.json /root/.docker/
RUN ["docker-credential-gcr", "config", "--token-source=env"]
ENV HOME /root
ENV USER /root
ENV PATH /usr/local/bin:/busybox
ENV SSL_CERT_DIR=/kaniko/ssl/certs
ENTRYPOINT ["/kaniko/executor"]

View File

@ -1,25 +1,11 @@
steps:
# First, install make
- name: "gcr.io/google-appengine/debian9"
args: ["sh", "-c", "apt-get update && apt-get install -y make"]
volumes:
- name: "make"
path: "/usr/bin"
- name: "gcr.io/google-appengine/debian9"
args: ["sh", "-c", "cp -r . /kaniko/ && mkdir -p /workspace/go/src/github.com/GoogleContainerTools/ && cp -r /kaniko/ /workspace/go/src/github.com/GoogleContainerTools/"]
volumes:
- name: "make"
path: "/usr/bin"
# Then, build the binary
- name: "gcr.io/google-appengine/golang"
args: ["sh", "-c", "make"]
volumes:
- name: "make"
path: "/usr/bin"
dir: go/src/github.com/GoogleContainerTools/kaniko
env: ["GOPATH=/workspace/go/"]
# Then, build kaniko with kaniko
- name: "gcr.io/kaniko-project/executor:latest"
args: ["--dockerfile=/workspace/deploy/Dockerfile",
"--context=/workspace/go/src/github.com/GoogleContainerTools/kaniko/",
"--destination=gcr.io/kaniko-project/executor:${COMMIT_SHA}"]
# First, build kaniko
- name: "gcr.io/cloud-builders/docker"
args: ["build", "-f", "deploy/Dockerfile",
"-t", "gcr.io/kaniko-project/executor:${COMMIT_SHA}", "."]
# Then, we want to build kaniko:debug
- name: "gcr.io/cloud-builders/docker"
args: ["build", "-f", "deploy/Dockerfile_debug",
"-t", "gcr.io/kaniko-project/executor:debug-${COMMIT_SHA}", "."]
images: ["gcr.io/kaniko-project/executor:${COMMIT_SHA}",
"gcr.io/kaniko-project/executor:debug-${COMMIT_SHA}"]

BIN
docs/demo.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 MiB

Binary file not shown.

View File

@ -14,5 +14,9 @@ ADD $contextenv/* /tmp/${contextenv}/
ADD context/tars/fil* /tars/
ADD context/tars/file.tar /tars_again
# Test with ARG
ARG file
COPY $file /arg
# Finally, test adding a remote URL, concurrently with a normal file
ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3/docker-credential-gcr_linux_386-1.4.3.tar.gz context/foo /test/all/

View File

@ -1,4 +1,4 @@
FROM gcr.io/distroless/base
FROM alpine:3.7
COPY context/foo foo
COPY context/foo /foodir/
COPY context/bar/b* bar/

View File

@ -0,0 +1,10 @@
FROM gcr.io/distroless/base:latest
COPY . .
FROM scratch as second
ENV foopath context/foo
COPY --from=0 $foopath context/b* /foo/
FROM gcr.io/distroless/base:latest
ARG file
COPY --from=second /foo $file

View File

@ -0,0 +1,3 @@
FROM busybox
ADD context/tars /tmp/tars
RUN mv /tmp/tars /foo

View File

@ -1,6 +1,7 @@
FROM gcr.io/kaniko-test/onbuild-base:latest
COPY context/foo foo
ENV dir /new/workdir/
ONBUILD RUN echo "onbuild" > /tmp/onbuild
ARG file
ONBUILD RUN echo "onbuild" > $file
ONBUILD RUN echo "onbuild 2" > ${dir}
ONBUILD WORKDIR /new/workdir

View File

@ -14,6 +14,10 @@
FROM gcr.io/google-appengine/debian9
RUN echo "hey" > /etc/foo
RUN apt-get update && apt-get install -y \
bzr \
cvs \
RUN echo "baz" > /etc/baz
RUN cp /etc/baz /etc/bar
RUN rm /etc/baz
# Test with ARG
ARG file
RUN echo "run" > $file

View File

@ -1,4 +1,14 @@
FROM scratch
ADD context/foo /foo
ENV hello hello
ADD context/foo /$hello
# First, make sure simple arg replacement works
ARG file
COPY $file /foo
# Check that setting a default value works
ARG file2=context/bar/bat
COPY $file2 /bat
# Check that overriding a default value works
ARG file3=context/bar/baz
COPY $file3 /baz
# Check that setting an ENV will override the ARG
ENV file context/bar/bam/bat
COPY $file /env

View File

@ -1,4 +1,4 @@
FROM gcr.io/google-appengine/debian9:latest
FROM gcr.io/google-appengine/debian9@sha256:6b3aa04751aa2ac3b0c7be4ee71148b66d693ad212ce6d3244bd2a2a147f314a
COPY context/foo foo
WORKDIR /test
# Test that this will be appended on to the previous command, to create /test/workdir
@ -11,3 +11,7 @@ ENV dir /another/new/dir
WORKDIR $dir/newdir
WORKDIR $dir/$doesntexist
WORKDIR /
# Test with ARG
ARG workdir
WORKDIR $workdir

View File

@ -0,0 +1,12 @@
[
{
"Image1": "gcr.io/kaniko-test/docker-test-multistage:latest",
"Image2": "gcr.io/kaniko-test/kaniko-test-multistage:latest",
"DiffType": "File",
"Diff": {
"Adds": null,
"Dels": null,
"Mods": null
}
}
]

View File

@ -0,0 +1,12 @@
[
{
"Image1": "gcr.io/kaniko-test/docker-test-mv-add:latest",
"Image2": "gcr.io/kaniko-test/kaniko-test-mv-add:latest",
"DiffType": "File",
"Diff": {
"Adds": null,
"Dels": null,
"Mods": null
}
}
]

View File

@ -6,43 +6,7 @@
"Diff": {
"Adds": null,
"Dels": null,
"Mods": [
{
"Name": "/var/log/dpkg.log",
"Size1": 57481,
"Size2": 57481
},
{
"Name": "/var/log/apt/term.log",
"Size1": 23671,
"Size2": 23671
},
{
"Name": "/var/cache/ldconfig/aux-cache",
"Size1": 8057,
"Size2": 8057
},
{
"Name": "/var/log/apt/history.log",
"Size1": 5661,
"Size2": 5661
},
{
"Name": "/var/log/alternatives.log",
"Size1": 2579,
"Size2": 2579
},
{
"Name": "/usr/lib/python2.7/dist-packages/keyrings/__init__.pyc",
"Size1": 140,
"Size2": 140
},
{
"Name": "/usr/lib/python2.7/dist-packages/lazr/__init__.pyc",
"Size1": 136,
"Size2": 136
}
]
"Mods": null
}
}
]

View File

@ -26,6 +26,7 @@ const (
executorImage = "executor-image"
dockerImage = "gcr.io/cloud-builders/docker"
ubuntuImage = "ubuntu"
structureTestImage = "gcr.io/gcp-runtimes/container-structure-test"
testRepo = "gcr.io/kaniko-test/"
dockerPrefix = "docker-"
kanikoPrefix = "kaniko-"
@ -46,6 +47,7 @@ var fileTests = []struct {
kanikoContextBucket bool
repo string
snapshotMode string
args []string
}{
{
description: "test extract filesystem",
@ -63,6 +65,9 @@ var fileTests = []struct {
dockerContext: dockerfilesPath,
kanikoContext: dockerfilesPath,
repo: "test-run",
args: []string{
"file=/file",
},
},
{
description: "test run no files changed",
@ -98,6 +103,9 @@ var fileTests = []struct {
dockerContext: buildcontextPath,
kanikoContext: buildcontextPath,
repo: "test-workdir",
args: []string{
"workdir=/arg/workdir",
},
},
{
description: "test volume",
@ -114,6 +122,17 @@ var fileTests = []struct {
dockerContext: buildcontextPath,
kanikoContext: buildcontextPath,
repo: "test-add",
args: []string{
"file=context/foo",
},
},
{
description: "test mv add",
dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_mv_add",
configPath: "/workspace/integration_tests/dockerfiles/config_test_mv_add.json",
dockerContext: buildcontextPath,
kanikoContext: buildcontextPath,
repo: "test-mv-add",
},
{
description: "test registry",
@ -130,6 +149,9 @@ var fileTests = []struct {
dockerContext: buildcontextPath,
kanikoContext: buildcontextPath,
repo: "test-onbuild",
args: []string{
"file=/tmp/onbuild",
},
},
{
description: "test scratch",
@ -138,6 +160,22 @@ var fileTests = []struct {
dockerContext: buildcontextPath,
kanikoContext: buildcontextPath,
repo: "test-scratch",
args: []string{
"hello=hello-value",
"file=context/foo",
"file3=context/b*",
},
},
{
description: "test multistage",
dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_multistage",
configPath: "/workspace/integration_tests/dockerfiles/config_test_multistage.json",
dockerContext: buildcontextPath,
kanikoContext: buildcontextPath,
repo: "test-multistage",
args: []string{
"file=/foo2",
},
},
}
@ -197,15 +235,6 @@ func main() {
Name: ubuntuImage,
Args: []string{"chmod", "+x", "container-diff-linux-amd64"},
}
structureTestsStep := step{
Name: "gcr.io/cloud-builders/gsutil",
Args: []string{"cp", "gs://container-structure-test/latest/container-structure-test", "."},
}
structureTestPermissions := step{
Name: ubuntuImage,
Args: []string{"chmod", "+x", "container-structure-test"},
}
GCSBucketTarBuildContext := step{
Name: ubuntuImage,
Args: []string{"tar", "-C", "/workspace/integration_tests/", "-zcvf", "/workspace/context.tar.gz", "."},
@ -231,18 +260,23 @@ func main() {
Args: []string{"push", onbuildBaseImage},
}
y := testyaml{
Steps: []step{containerDiffStep, containerDiffPermissions, structureTestsStep, structureTestPermissions, GCSBucketTarBuildContext, uploadTarBuildContext, buildExecutorImage,
buildOnbuildImage, pushOnbuildBase},
Steps: []step{containerDiffStep, containerDiffPermissions, GCSBucketTarBuildContext,
uploadTarBuildContext, buildExecutorImage, buildOnbuildImage, pushOnbuildBase},
Timeout: "1200s",
}
for _, test := range fileTests {
// First, build the image with docker
dockerImageTag := testRepo + dockerPrefix + test.repo
var buildArgs []string
buildArgFlag := "--build-arg"
for _, arg := range test.args {
buildArgs = append(buildArgs, buildArgFlag)
buildArgs = append(buildArgs, arg)
}
dockerBuild := step{
Name: dockerImage,
Args: []string{"build", "-t", dockerImageTag, "-f", test.dockerfilePath, test.dockerContext},
Args: append([]string{"build", "-t", dockerImageTag, "-f", test.dockerfilePath, test.dockerContext}, buildArgs...),
}
// Then, buld the image with kaniko
kanikoImage := testRepo + kanikoPrefix + test.repo
snapshotMode := ""
@ -255,7 +289,7 @@ func main() {
}
kaniko := step{
Name: executorImage,
Args: []string{"--destination", kanikoImage, "--dockerfile", test.dockerfilePath, contextFlag, test.kanikoContext, snapshotMode},
Args: append([]string{"--destination", kanikoImage, "--dockerfile", test.dockerfilePath, contextFlag, test.kanikoContext, snapshotMode}, buildArgs...),
}
// Pull the kaniko image
@ -280,7 +314,7 @@ func main() {
}
compareOutputs := step{
Name: ubuntuImage,
Args: []string{"cmp", test.configPath, containerDiffOutputFile},
Args: []string{"cmp", "-b", test.configPath, containerDiffOutputFile},
}
y.Steps = append(y.Steps, dockerBuild, kaniko, pullKanikoImage, containerDiff, catContainerDiffOutput, compareOutputs)
@ -307,20 +341,15 @@ func main() {
Args: []string{"pull", kanikoImage},
}
// Run structure tests on the kaniko and docker image
args := "container-structure-test -image " + kanikoImage + " " + test.structureTestYamlPath
structureTest := step{
Name: ubuntuImage,
Args: []string{"sh", "-c", args},
Env: []string{"PATH=/workspace:/bin"},
kanikoStructureTest := step{
Name: structureTestImage,
Args: []string{"test", "--image", kanikoImage, "--config", test.structureTestYamlPath},
}
args = "container-structure-test -image " + dockerImageTag + " " + test.structureTestYamlPath
dockerStructureTest := step{
Name: ubuntuImage,
Args: []string{"sh", "-c", args},
Env: []string{"PATH=/workspace:/bin"},
Name: structureTestImage,
Args: []string{"test", "--image", dockerImageTag, "--config", test.structureTestYamlPath},
}
y.Steps = append(y.Steps, dockerBuild, kaniko, pullKanikoImage, structureTest, dockerStructureTest)
y.Steps = append(y.Steps, dockerBuild, kaniko, pullKanikoImage, kanikoStructureTest, dockerStructureTest)
}
d, _ := yaml.Marshal(&y)

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.0 KiB

BIN
logo/Kaniko-Logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

1
logo/README.md Normal file
View File

@ -0,0 +1 @@
Thank you @ggcarlosr for this awesome logo!

View File

@ -17,11 +17,13 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"path/filepath"
"strings"
"github.com/google/go-containerregistry/v1"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/sirupsen/logrus"
)
@ -40,7 +42,7 @@ type AddCommand struct {
// - If dest doesn't end with a slash, the filepath is inferred to be <dest>/<filename>
// 2. If <src> is a local tar archive:
// -If <src> is a local tar archive, it is unpacked at the dest, as 'tar -x' would
func (a *AddCommand) ExecuteCommand(config *manifest.Schema2Config) error {
func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
srcs := a.cmd.SourcesAndDest[:len(a.cmd.SourcesAndDest)-1]
dest := a.cmd.SourcesAndDest[len(a.cmd.SourcesAndDest)-1]
@ -48,7 +50,8 @@ func (a *AddCommand) ExecuteCommand(config *manifest.Schema2Config) error {
logrus.Infof("dest: %s", dest)
// First, resolve any environment replacement
resolvedEnvs, err := util.ResolveEnvironmentReplacementList(a.cmd.SourcesAndDest, config.Env, true)
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
resolvedEnvs, err := util.ResolveEnvironmentReplacementList(a.cmd.SourcesAndDest, replacementEnvs, true)
if err != nil {
return err
}
@ -100,7 +103,7 @@ func (a *AddCommand) ExecuteCommand(config *manifest.Schema2Config) error {
},
buildcontext: a.buildcontext,
}
if err := copyCmd.ExecuteCommand(config); err != nil {
if err := copyCmd.ExecuteCommand(config, buildArgs); err != nil {
return err
}
a.snapshotFiles = append(a.snapshotFiles, copyCmd.snapshotFiles...)

46
pkg/commands/arg.go Normal file
View File

@ -0,0 +1,46 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
"strings"
)
type ArgCommand struct {
cmd *instructions.ArgCommand
}
// ExecuteCommand only needs to add this ARG key/value as seen
func (r *ArgCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("ARG")
buildArgs.AddArg(r.cmd.Key, r.cmd.Value)
return nil
}
// FilesToSnapshot returns an empty array since this command only touches metadata.
func (r *ArgCommand) FilesToSnapshot() []string {
return []string{}
}
// CreatedBy returns some information about the command for the image config history
func (r *ArgCommand) CreatedBy() string {
return strings.Join([]string{r.cmd.Name(), r.cmd.Key}, " ")
}

View File

@ -17,10 +17,12 @@ limitations under the License.
package commands
import (
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/sirupsen/logrus"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"strings"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
)
type CmdCommand struct {
@ -29,13 +31,18 @@ type CmdCommand struct {
// ExecuteCommand executes the CMD command
// Argument handling is the same as RUN.
func (c *CmdCommand) ExecuteCommand(config *manifest.Schema2Config) error {
func (c *CmdCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: CMD")
var newCommand []string
if c.cmd.PrependShell {
// This is the default shell on Linux
// TODO: Support shell command here
shell := []string{"/bin/sh", "-c"}
var shell []string
if len(config.Shell) > 0 {
shell = config.Shell
} else {
shell = append(shell, "/bin/sh", "-c")
}
newCommand = append(shell, strings.Join(c.cmd.CmdLine, " "))
} else {
newCommand = c.cmd.CmdLine

View File

@ -16,34 +16,32 @@ limitations under the License.
package commands
import (
"testing"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/strslice"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"testing"
)
var cmdTests = []struct {
prependShell bool
cmdLine []string
expectedCmd strslice.StrSlice
expectedCmd []string
}{
{
prependShell: true,
cmdLine: []string{"echo", "cmd1"},
expectedCmd: strslice.StrSlice{"/bin/sh", "-c", "echo cmd1"},
expectedCmd: []string{"/bin/sh", "-c", "echo cmd1"},
},
{
prependShell: false,
cmdLine: []string{"echo", "cmd2"},
expectedCmd: strslice.StrSlice{"echo", "cmd2"},
expectedCmd: []string{"echo", "cmd2"},
},
}
func TestExecuteCmd(t *testing.T) {
cfg := &manifest.Schema2Config{
cfg := &v1.Config{
Cmd: nil,
}
@ -56,7 +54,7 @@ func TestExecuteCmd(t *testing.T) {
},
},
}
err := cmd.ExecuteCommand(cfg)
err := cmd.ExecuteCommand(cfg, nil)
testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedCmd, cfg.Cmd)
}
}

View File

@ -17,8 +17,9 @@ limitations under the License.
package commands
import (
"github.com/containers/image/manifest"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -28,7 +29,7 @@ type DockerCommand interface {
// 1. Making required changes to the filesystem (ex. copying files for ADD/COPY or setting ENV variables)
// 2. Updating metadata fields in the config
// It should not change the config history.
ExecuteCommand(*manifest.Schema2Config) error
ExecuteCommand(*v1.Config, *dockerfile.BuildArgs) error
// The config history has a "created by" field, should return information about the command
CreatedBy() string
// A list of files to snapshot, empty for metadata commands or nil if we don't know
@ -61,6 +62,14 @@ func GetCommand(cmd instructions.Command, buildcontext string) (DockerCommand, e
return &OnBuildCommand{cmd: c}, nil
case *instructions.VolumeCommand:
return &VolumeCommand{cmd: c}, nil
case *instructions.StopSignalCommand:
return &StopSignalCommand{cmd: c}, nil
case *instructions.ArgCommand:
return &ArgCommand{cmd: c}, nil
case *instructions.ShellCommand:
return &ShellCommand{cmd: c}, nil
case *instructions.HealthCheckCommand:
return &HealthCheckCommand{cmd: c}, nil
case *instructions.MaintainerCommand:
logrus.Warnf("%s is deprecated, skipping", cmd.Name())
return nil, nil

View File

@ -17,13 +17,16 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/sirupsen/logrus"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"os"
"path/filepath"
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
)
type CopyCommand struct {
@ -32,15 +35,20 @@ type CopyCommand struct {
snapshotFiles []string
}
func (c *CopyCommand) ExecuteCommand(config *manifest.Schema2Config) error {
func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
srcs := c.cmd.SourcesAndDest[:len(c.cmd.SourcesAndDest)-1]
dest := c.cmd.SourcesAndDest[len(c.cmd.SourcesAndDest)-1]
logrus.Infof("cmd: copy %s", srcs)
logrus.Infof("dest: %s", dest)
// Resolve from
if c.cmd.From != "" {
c.buildcontext = filepath.Join(constants.KanikoDir, c.cmd.From)
}
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
// First, resolve any environment replacement
resolvedEnvs, err := util.ResolveEnvironmentReplacementList(c.cmd.SourcesAndDest, config.Env, true)
resolvedEnvs, err := util.ResolveEnvironmentReplacementList(c.cmd.SourcesAndDest, replacementEnvs, true)
if err != nil {
return err
}
@ -57,11 +65,19 @@ func (c *CopyCommand) ExecuteCommand(config *manifest.Schema2Config) error {
if err != nil {
return err
}
destPath, err := util.DestinationFilepath(src, dest, config.WorkingDir)
cwd := config.WorkingDir
if cwd == "" {
cwd = constants.RootDir
}
destPath, err := util.DestinationFilepath(src, dest, cwd)
if err != nil {
return err
}
if fi.IsDir() {
if !filepath.IsAbs(dest) {
// we need to add '/' to the end to indicate the destination is a directory
dest = filepath.Join(cwd, dest) + "/"
}
if err := util.CopyDir(fullPath, dest); err != nil {
return err
}

View File

@ -17,10 +17,12 @@ limitations under the License.
package commands
import (
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/sirupsen/logrus"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"strings"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
)
type EntrypointCommand struct {
@ -28,13 +30,18 @@ type EntrypointCommand struct {
}
// ExecuteCommand handles command processing similar to CMD and RUN,
func (e *EntrypointCommand) ExecuteCommand(config *manifest.Schema2Config) error {
func (e *EntrypointCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: ENTRYPOINT")
var newCommand []string
if e.cmd.PrependShell {
// This is the default shell on Linux
// TODO: Support shell command here
shell := []string{"/bin/sh", "-c"}
var shell []string
if len(config.Shell) > 0 {
shell = config.Shell
} else {
shell = append(shell, "/bin/sh", "-c")
}
newCommand = append(shell, strings.Join(e.cmd.CmdLine, " "))
} else {
newCommand = e.cmd.CmdLine

View File

@ -19,31 +19,30 @@ import (
"testing"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/strslice"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
)
var entrypointTests = []struct {
prependShell bool
cmdLine []string
expectedCmd strslice.StrSlice
expectedCmd []string
}{
{
prependShell: true,
cmdLine: []string{"echo", "cmd1"},
expectedCmd: strslice.StrSlice{"/bin/sh", "-c", "echo cmd1"},
expectedCmd: []string{"/bin/sh", "-c", "echo cmd1"},
},
{
prependShell: false,
cmdLine: []string{"echo", "cmd2"},
expectedCmd: strslice.StrSlice{"echo", "cmd2"},
expectedCmd: []string{"echo", "cmd2"},
},
}
func TestEntrypointExecuteCmd(t *testing.T) {
cfg := &manifest.Schema2Config{
cfg := &v1.Config{
Cmd: nil,
}
@ -56,7 +55,7 @@ func TestEntrypointExecuteCmd(t *testing.T) {
},
},
}
err := cmd.ExecuteCommand(cfg)
err := cmd.ExecuteCommand(cfg, nil)
testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedCmd, cfg.Entrypoint)
}
}

View File

@ -17,11 +17,12 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
)
@ -29,59 +30,11 @@ type EnvCommand struct {
cmd *instructions.EnvCommand
}
func (e *EnvCommand) ExecuteCommand(config *manifest.Schema2Config) error {
func (e *EnvCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: ENV")
newEnvs := e.cmd.Env
for index, pair := range newEnvs {
expandedKey, err := util.ResolveEnvironmentReplacement(pair.Key, config.Env, false)
if err != nil {
return err
}
expandedValue, err := util.ResolveEnvironmentReplacement(pair.Value, config.Env, false)
if err != nil {
return err
}
newEnvs[index] = instructions.KeyValuePair{
Key: expandedKey,
Value: expandedValue,
}
}
return updateConfigEnv(newEnvs, config)
}
func updateConfigEnv(newEnvs []instructions.KeyValuePair, config *manifest.Schema2Config) error {
// First, convert config.Env array to []instruction.KeyValuePair
var kvps []instructions.KeyValuePair
for _, env := range config.Env {
entry := strings.Split(env, "=")
kvps = append(kvps, instructions.KeyValuePair{
Key: entry[0],
Value: entry[1],
})
}
// Iterate through new environment variables, and replace existing keys
// We can't use a map because we need to preserve the order of the environment variables
Loop:
for _, newEnv := range newEnvs {
for index, kvp := range kvps {
// If key exists, replace the KeyValuePair...
if kvp.Key == newEnv.Key {
logrus.Debugf("Replacing environment variable %v with %v in config", kvp, newEnv)
kvps[index] = newEnv
continue Loop
}
}
// ... Else, append it as a new env variable
kvps = append(kvps, newEnv)
}
// Convert back to array and set in config
envArray := []string{}
for _, kvp := range kvps {
entry := kvp.Key + "=" + kvp.Value
envArray = append(envArray, entry)
}
config.Env = envArray
return nil
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
return util.UpdateConfigEnv(newEnvs, config, replacementEnvs)
}
// We know that no files have changed, so return an empty array

View File

@ -16,46 +16,15 @@ limitations under the License.
package commands
import (
"testing"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"testing"
)
func TestUpdateEnvConfig(t *testing.T) {
cfg := &manifest.Schema2Config{
Env: []string{
"PATH=/path/to/dir",
"hey=hey",
},
}
newEnvs := []instructions.KeyValuePair{
{
Key: "foo",
Value: "foo2",
},
{
Key: "PATH",
Value: "/new/path/",
},
{
Key: "foo",
Value: "newfoo",
},
}
expectedEnvArray := []string{
"PATH=/new/path/",
"hey=hey",
"foo=newfoo",
}
updateConfigEnv(newEnvs, cfg)
testutil.CheckErrorAndDeepEqual(t, false, nil, expectedEnvArray, cfg.Env)
}
func Test_EnvExecute(t *testing.T) {
cfg := &manifest.Schema2Config{
cfg := &v1.Config{
Env: []string{
"path=/usr/",
"home=/root",
@ -77,6 +46,10 @@ func Test_EnvExecute(t *testing.T) {
Key: "$path",
Value: "$home/",
},
{
Key: "$buildArg1",
Value: "$buildArg2",
},
},
},
}
@ -86,7 +59,20 @@ func Test_EnvExecute(t *testing.T) {
"home=/root",
"HOME=/root",
"/usr/=/root/",
"foo=foo2",
}
err := envCmd.ExecuteCommand(cfg)
buildArgs := setUpBuildArgs()
err := envCmd.ExecuteCommand(cfg, buildArgs)
testutil.CheckErrorAndDeepEqual(t, false, err, expectedEnvs, cfg.Env)
}
func setUpBuildArgs() *dockerfile.BuildArgs {
buildArgs := dockerfile.NewBuildArgs([]string{
"buildArg1=foo",
"buildArg2=foo2",
})
buildArgs.AddArg("buildArg1", nil)
d := "default"
buildArgs.AddArg("buildArg2", &d)
return buildArgs
}

View File

@ -18,11 +18,12 @@ package commands
import (
"fmt"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
)
@ -30,17 +31,18 @@ type ExposeCommand struct {
cmd *instructions.ExposeCommand
}
func (r *ExposeCommand) ExecuteCommand(config *manifest.Schema2Config) error {
func (r *ExposeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: EXPOSE")
// Grab the currently exposed ports
existingPorts := config.ExposedPorts
if existingPorts == nil {
existingPorts = make(map[manifest.Schema2Port]struct{})
existingPorts = make(map[string]struct{})
}
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
// Add any new ones in
for _, p := range r.cmd.Ports {
// Resolve any environment variables
p, err := util.ResolveEnvironmentReplacement(p, config.Env, false)
p, err := util.ResolveEnvironmentReplacement(p, replacementEnvs, false)
if err != nil {
return err
}
@ -53,8 +55,7 @@ func (r *ExposeCommand) ExecuteCommand(config *manifest.Schema2Config) error {
return fmt.Errorf("Invalid protocol: %s", protocol)
}
logrus.Infof("Adding exposed port: %s", p)
var x struct{}
existingPorts[manifest.Schema2Port(p)] = x
existingPorts[p] = struct{}{}
}
config.ExposedPorts = existingPorts
return nil

View File

@ -17,16 +17,17 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"testing"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
)
func TestUpdateExposedPorts(t *testing.T) {
cfg := &manifest.Schema2Config{
ExposedPorts: manifest.Schema2PortSet{
cfg := &v1.Config{
ExposedPorts: map[string]struct{}{
"8080/tcp": {},
},
Env: []string{
@ -51,7 +52,7 @@ func TestUpdateExposedPorts(t *testing.T) {
},
}
expectedPorts := manifest.Schema2PortSet{
expectedPorts := map[string]struct{}{
"8080/tcp": {},
"8081/tcp": {},
"8082/tcp": {},
@ -60,14 +61,14 @@ func TestUpdateExposedPorts(t *testing.T) {
"8085/tcp": {},
"8085/udp": {},
}
err := exposeCmd.ExecuteCommand(cfg)
buildArgs := dockerfile.NewBuildArgs([]string{})
err := exposeCmd.ExecuteCommand(cfg, buildArgs)
testutil.CheckErrorAndDeepEqual(t, false, err, expectedPorts, cfg.ExposedPorts)
}
func TestInvalidProtocol(t *testing.T) {
cfg := &manifest.Schema2Config{
ExposedPorts: manifest.Schema2PortSet{},
cfg := &v1.Config{
ExposedPorts: map[string]struct{}{},
}
ports := []string{
@ -79,7 +80,7 @@ func TestInvalidProtocol(t *testing.T) {
Ports: ports,
},
}
err := exposeCmd.ExecuteCommand(cfg)
buildArgs := dockerfile.NewBuildArgs([]string{})
err := exposeCmd.ExecuteCommand(cfg, buildArgs)
testutil.CheckErrorAndDeepEqual(t, true, err, nil, nil)
}

View File

@ -0,0 +1,52 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package commands
import (
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
)
type HealthCheckCommand struct {
cmd *instructions.HealthCheckCommand
}
// ExecuteCommand handles command processing similar to CMD and RUN,
func (h *HealthCheckCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: HEALTHCHECK")
check := v1.HealthConfig(*h.cmd.Health)
config.Healthcheck = &check
return nil
}
// FilesToSnapshot returns an empty array since this is a metadata command
func (h *HealthCheckCommand) FilesToSnapshot() []string {
return []string{}
}
// CreatedBy returns some information about the command for the image config history
func (h *HealthCheckCommand) CreatedBy() string {
entrypoint := []string{"HEALTHCHECK"}
return strings.Join(append(entrypoint, strings.Join(h.cmd.Health.Test, " ")), " ")
}

View File

@ -17,11 +17,12 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
)
@ -29,24 +30,29 @@ type LabelCommand struct {
cmd *instructions.LabelCommand
}
func (r *LabelCommand) ExecuteCommand(config *manifest.Schema2Config) error {
func (r *LabelCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: LABEL")
return updateLabels(r.cmd.Labels, config)
return updateLabels(r.cmd.Labels, config, buildArgs)
}
func updateLabels(labels []instructions.KeyValuePair, config *manifest.Schema2Config) error {
func updateLabels(labels []instructions.KeyValuePair, config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
existingLabels := config.Labels
if existingLabels == nil {
existingLabels = make(map[string]string)
}
// Let's unescape values before setting the label
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
for index, kvp := range labels {
unescaped, err := util.ResolveEnvironmentReplacement(kvp.Value, []string{}, false)
key, err := util.ResolveEnvironmentReplacement(kvp.Key, replacementEnvs, false)
if err != nil {
return err
}
unescaped, err := util.ResolveEnvironmentReplacement(kvp.Value, replacementEnvs, false)
if err != nil {
return err
}
labels[index] = instructions.KeyValuePair{
Key: kvp.Key,
Key: key,
Value: unescaped,
}
}

View File

@ -17,15 +17,15 @@ limitations under the License.
package commands
import (
"testing"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"testing"
)
func TestUpdateLabels(t *testing.T) {
cfg := &manifest.Schema2Config{
cfg := &v1.Config{
Labels: map[string]string{
"foo": "bar",
},
@ -48,14 +48,25 @@ func TestUpdateLabels(t *testing.T) {
Key: "backslashes",
Value: "lots\\\\ of\\\\ words",
},
{
Key: "$label",
Value: "foo",
},
}
expectedLabels := map[string]string{
"foo": "override",
"bar": "baz",
"multiword": "lots of words",
"backslashes": "lots\\ of\\ words",
arguments := []string{
"label=build_arg_label",
}
updateLabels(labels, cfg)
buildArgs := dockerfile.NewBuildArgs(arguments)
buildArgs.AddArg("label", nil)
expectedLabels := map[string]string{
"foo": "override",
"bar": "baz",
"multiword": "lots of words",
"backslashes": "lots\\ of\\ words",
"build_arg_label": "foo",
}
updateLabels(labels, cfg, buildArgs)
testutil.CheckErrorAndDeepEqual(t, false, nil, expectedLabels, cfg.Labels)
}

View File

@ -17,9 +17,10 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
)
@ -28,10 +29,11 @@ type OnBuildCommand struct {
}
//ExecuteCommand adds the specified expression in Onbuild to the config
func (o *OnBuildCommand) ExecuteCommand(config *manifest.Schema2Config) error {
func (o *OnBuildCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: ONBUILD")
logrus.Infof("args: %s", o.cmd.Expression)
resolvedExpression, err := util.ResolveEnvironmentReplacement(o.cmd.Expression, config.Env, false)
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
resolvedExpression, err := util.ResolveEnvironmentReplacement(o.cmd.Expression, replacementEnvs, false)
if err != nil {
return err
}

View File

@ -17,11 +17,12 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"testing"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
)
var onbuildTests = []struct {
@ -50,7 +51,7 @@ var onbuildTests = []struct {
func TestExecuteOnbuild(t *testing.T) {
for _, test := range onbuildTests {
cfg := &manifest.Schema2Config{
cfg := &v1.Config{
Env: []string{
"dir=/some/dir",
},
@ -62,8 +63,8 @@ func TestExecuteOnbuild(t *testing.T) {
Expression: test.expression,
},
}
err := onbuildCmd.ExecuteCommand(cfg)
buildArgs := dockerfile.NewBuildArgs([]string{})
err := onbuildCmd.ExecuteCommand(cfg, buildArgs)
testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedArray, cfg.OnBuild)
}

View File

@ -17,27 +17,32 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
"os"
"os/exec"
"strconv"
"strings"
"syscall"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/sirupsen/logrus"
)
type RunCommand struct {
cmd *instructions.RunCommand
}
func (r *RunCommand) ExecuteCommand(config *manifest.Schema2Config) error {
func (r *RunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
var newCommand []string
if r.cmd.PrependShell {
// This is the default shell on Linux
// TODO: Support shell command here
shell := []string{"/bin/sh", "-c"}
var shell []string
if len(config.Shell) > 0 {
shell = config.Shell
} else {
shell = append(shell, "/bin/sh", "-c")
}
newCommand = append(shell, strings.Join(r.cmd.CmdLine, " "))
} else {
newCommand = r.cmd.CmdLine
@ -49,7 +54,8 @@ func (r *RunCommand) ExecuteCommand(config *manifest.Schema2Config) error {
cmd := exec.Command(newCommand[0], newCommand[1:]...)
cmd.Dir = config.WorkingDir
cmd.Stdout = os.Stdout
cmd.Env = config.Env
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
cmd.Env = replacementEnvs
// If specified, run the command as a specific user
if config.User != "" {

54
pkg/commands/shell.go Normal file
View File

@ -0,0 +1,54 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
"strings"
)
type ShellCommand struct {
cmd *instructions.ShellCommand
}
// ExecuteCommand handles command processing similar to CMD and RUN,
func (s *ShellCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: SHELL")
var newShell []string
newShell = s.cmd.Shell
logrus.Infof("Replacing Shell in config with %v", newShell)
config.Shell = newShell
return nil
}
// FilesToSnapshot returns an empty array since this is a metadata command
func (s *ShellCommand) FilesToSnapshot() []string {
return []string{}
}
// CreatedBy returns some information about the command for the image config history
func (s *ShellCommand) CreatedBy() string {
entrypoint := []string{"SHELL"}
cmdLine := strings.Join(s.cmd.Shell, " ")
return strings.Join(append(entrypoint, cmdLine), " ")
}

View File

@ -0,0 +1,55 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package commands
import (
"testing"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
)
var shellTests = []struct {
cmdLine []string
expectedShell []string
}{
{
cmdLine: []string{"/bin/bash", "-c"},
expectedShell: []string{"/bin/bash", "-c"},
},
{
cmdLine: []string{"/bin/bash"},
expectedShell: []string{"/bin/bash"},
},
}
func TestShellExecuteCmd(t *testing.T) {
cfg := &v1.Config{
Shell: nil,
}
for _, test := range shellTests {
cmd := ShellCommand{
&instructions.ShellCommand{
Shell: test.cmdLine,
},
}
err := cmd.ExecuteCommand(cfg, nil)
testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedShell, cfg.Shell)
}
}

View File

@ -0,0 +1,66 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/docker/docker/pkg/signal"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
"strings"
)
type StopSignalCommand struct {
cmd *instructions.StopSignalCommand
}
// ExecuteCommand handles command processing similar to CMD and RUN,
func (s *StopSignalCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: STOPSIGNAL")
// resolve possible environment variables
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
resolvedEnvs, err := util.ResolveEnvironmentReplacementList([]string{s.cmd.Signal}, replacementEnvs, false)
if err != nil {
return err
}
stopsignal := resolvedEnvs[0]
// validate stopsignal
_, err = signal.ParseSignal(stopsignal)
if err != nil {
return err
}
logrus.Infof("Replacing StopSignal in config with %v", stopsignal)
config.StopSignal = stopsignal
return nil
}
// FilesToSnapshot returns an empty array since this is a metadata command
func (s *StopSignalCommand) FilesToSnapshot() []string {
return []string{}
}
// CreatedBy returns some information about the command for the image config history
func (s *StopSignalCommand) CreatedBy() string {
entrypoint := []string{"STOPSIGNAL"}
return strings.Join(append(entrypoint, s.cmd.Signal), " ")
}

View File

@ -0,0 +1,61 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"testing"
)
var stopsignalTests = []struct {
signal string
expectedSignal string
}{
{
signal: "SIGKILL",
expectedSignal: "SIGKILL",
},
{
signal: "${STOPSIG}",
expectedSignal: "SIGKILL",
},
{
signal: "1",
expectedSignal: "1",
},
}
func TestStopsignalExecuteCmd(t *testing.T) {
cfg := &v1.Config{
StopSignal: "",
Env: []string{"STOPSIG=SIGKILL"},
}
for _, test := range stopsignalTests {
cmd := StopSignalCommand{
&instructions.StopSignalCommand{
Signal: test.signal,
},
}
b := dockerfile.NewBuildArgs([]string{})
err := cmd.ExecuteCommand(cfg, b)
testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedSignal, cfg.StopSignal)
}
}

View File

@ -17,30 +17,31 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
"os/user"
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/sirupsen/logrus"
)
type UserCommand struct {
cmd *instructions.UserCommand
}
func (r *UserCommand) ExecuteCommand(config *manifest.Schema2Config) error {
func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: USER")
u := r.cmd.User
userAndGroup := strings.Split(u, ":")
userStr, err := util.ResolveEnvironmentReplacement(userAndGroup[0], config.Env, false)
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
userStr, err := util.ResolveEnvironmentReplacement(userAndGroup[0], replacementEnvs, false)
if err != nil {
return err
}
var groupStr string
if len(userAndGroup) > 1 {
groupStr, err = util.ResolveEnvironmentReplacement(userAndGroup[1], config.Env, false)
groupStr, err = util.ResolveEnvironmentReplacement(userAndGroup[1], replacementEnvs, false)
if err != nil {
return err
}

View File

@ -16,11 +16,12 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"testing"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
)
var userTests = []struct {
@ -82,7 +83,7 @@ var userTests = []struct {
func TestUpdateUser(t *testing.T) {
for _, test := range userTests {
cfg := &manifest.Schema2Config{
cfg := &v1.Config{
Env: []string{
"envuser=root",
"envgroup=root",
@ -93,7 +94,8 @@ func TestUpdateUser(t *testing.T) {
User: test.user,
},
}
err := cmd.ExecuteCommand(cfg)
buildArgs := dockerfile.NewBuildArgs([]string{})
err := cmd.ExecuteCommand(cfg, buildArgs)
testutil.CheckErrorAndDeepEqual(t, test.shouldError, err, test.expectedUid, cfg.User)
}
}

View File

@ -17,12 +17,13 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"os"
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
)
@ -31,10 +32,11 @@ type VolumeCommand struct {
snapshotFiles []string
}
func (v *VolumeCommand) ExecuteCommand(config *manifest.Schema2Config) error {
func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: VOLUME")
volumes := v.cmd.Volumes
resolvedVolumes, err := util.ResolveEnvironmentReplacementList(volumes, config.Env, true)
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
resolvedVolumes, err := util.ResolveEnvironmentReplacementList(volumes, replacementEnvs, true)
if err != nil {
return err
}

View File

@ -16,15 +16,16 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"testing"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
)
func TestUpdateVolume(t *testing.T) {
cfg := &manifest.Schema2Config{
cfg := &v1.Config{
Env: []string{
"VOLUME=/etc",
},
@ -49,7 +50,7 @@ func TestUpdateVolume(t *testing.T) {
"/var/lib": {},
"/etc": {},
}
err := volumeCmd.ExecuteCommand(cfg)
buildArgs := dockerfile.NewBuildArgs([]string{})
err := volumeCmd.ExecuteCommand(cfg, buildArgs)
testutil.CheckErrorAndDeepEqual(t, false, err, expectedVolumes, cfg.Volumes)
}

View File

@ -17,12 +17,13 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"os"
"path/filepath"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
"github.com/sirupsen/logrus"
)
@ -31,10 +32,11 @@ type WorkdirCommand struct {
snapshotFiles []string
}
func (w *WorkdirCommand) ExecuteCommand(config *manifest.Schema2Config) error {
func (w *WorkdirCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error {
logrus.Info("cmd: workdir")
workdirPath := w.cmd.Path
resolvedWorkingDir, err := util.ResolveEnvironmentReplacement(workdirPath, config.Env, true)
replacementEnvs := buildArgs.ReplacementEnvs(config.Env)
resolvedWorkingDir, err := util.ResolveEnvironmentReplacement(workdirPath, replacementEnvs, true)
if err != nil {
return err
}

View File

@ -16,11 +16,12 @@ limitations under the License.
package commands
import (
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"testing"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/google/go-containerregistry/v1"
)
// Each test here changes the same WorkingDir field in the config
@ -63,7 +64,7 @@ var workdirTests = []struct {
func TestWorkdirCommand(t *testing.T) {
cfg := &manifest.Schema2Config{
cfg := &v1.Config{
WorkingDir: "/",
Env: []string{
"path=usr/",
@ -78,7 +79,8 @@ func TestWorkdirCommand(t *testing.T) {
},
snapshotFiles: []string{},
}
cmd.ExecuteCommand(cfg)
buildArgs := dockerfile.NewBuildArgs([]string{})
cmd.ExecuteCommand(cfg, buildArgs)
testutil.CheckErrorAndDeepEqual(t, false, nil, test.expectedPath, cfg.WorkingDir)
}
}

View File

@ -0,0 +1,47 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockerfile
import (
d "github.com/docker/docker/builder/dockerfile"
"strings"
)
type BuildArgs struct {
d.BuildArgs
}
func NewBuildArgs(args []string) *BuildArgs {
argsFromOptions := make(map[string]*string)
for _, a := range args {
s := strings.Split(a, "=")
if len(s) == 1 {
argsFromOptions[s[0]] = nil
} else {
argsFromOptions[s[0]] = &s[1]
}
}
return &BuildArgs{
*d.NewBuildArgs(argsFromOptions),
}
}
// ReplacementEnvs returns a list of filtered environment variables
func (b *BuildArgs) ReplacementEnvs(envs []string) []string {
filtered := b.FilterAllowed(envs)
return append(envs, filtered...)
}

View File

@ -18,10 +18,19 @@ package dockerfile
import (
"bytes"
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/docker/docker/builder/dockerfile/parser"
"github.com/google/go-containerregistry/authn"
"github.com/google/go-containerregistry/name"
"github.com/google/go-containerregistry/v1"
"github.com/google/go-containerregistry/v1/empty"
"github.com/google/go-containerregistry/v1/remote"
"net/http"
"path/filepath"
"strconv"
"strings"
)
// Parse parses the contents of a Dockerfile and returns a list of commands
@ -37,6 +46,28 @@ func Parse(b []byte) ([]instructions.Stage, error) {
return stages, err
}
// ResolveStages resolves any calls to previous stages with names to indices
// Ex. --from=second_stage should be --from=1 for easier processing later on
func ResolveStages(stages []instructions.Stage) {
nameToIndex := make(map[string]string)
for i, stage := range stages {
index := strconv.Itoa(i)
if stage.Name != index {
nameToIndex[stage.Name] = index
}
for _, cmd := range stage.Commands {
switch c := cmd.(type) {
case *instructions.CopyCommand:
if c.From != "" {
if val, ok := nameToIndex[c.From]; ok {
c.From = val
}
}
}
}
}
}
// ParseCommands parses an array of commands into an array of instructions.Command; used for onbuild
func ParseCommands(cmdArray []string) ([]instructions.Command, error) {
var cmds []instructions.Command
@ -54,3 +85,69 @@ func ParseCommands(cmdArray []string) ([]instructions.Command, error) {
}
return cmds, nil
}
// Dependencies returns a list of files in this stage that will be needed in later stages
func Dependencies(index int, stages []instructions.Stage, buildArgs *BuildArgs) ([]string, error) {
var dependencies []string
for stageIndex, stage := range stages {
if stageIndex <= index {
continue
}
var sourceImage v1.Image
if stage.BaseName == constants.NoBaseImage {
sourceImage = empty.Image
} else {
// Initialize source image
ref, err := name.ParseReference(stage.BaseName, name.WeakValidation)
if err != nil {
return nil, err
}
auth, err := authn.DefaultKeychain.Resolve(ref.Context().Registry)
if err != nil {
return nil, err
}
sourceImage, err = remote.Image(ref, auth, http.DefaultTransport)
if err != nil {
return nil, err
}
}
imageConfig, err := sourceImage.ConfigFile()
if err != nil {
return nil, err
}
for _, cmd := range stage.Commands {
switch c := cmd.(type) {
case *instructions.EnvCommand:
replacementEnvs := buildArgs.ReplacementEnvs(imageConfig.Config.Env)
if err := util.UpdateConfigEnv(c.Env, &imageConfig.Config, replacementEnvs); err != nil {
return nil, err
}
case *instructions.ArgCommand:
buildArgs.AddArg(c.Key, c.Value)
case *instructions.CopyCommand:
if c.From != strconv.Itoa(index) {
continue
}
// First, resolve any environment replacement
replacementEnvs := buildArgs.ReplacementEnvs(imageConfig.Config.Env)
resolvedEnvs, err := util.ResolveEnvironmentReplacementList(c.SourcesAndDest, replacementEnvs, true)
if err != nil {
return nil, err
}
// Resolve wildcards and get a list of resolved sources
srcs, err := util.ResolveSources(resolvedEnvs, constants.RootDir)
if err != nil {
return nil, err
}
for index, src := range srcs {
if !filepath.IsAbs(src) {
srcs[index] = filepath.Join(constants.RootDir, src)
}
}
dependencies = append(dependencies, srcs...)
}
}
}
return dependencies, nil
}

View File

@ -0,0 +1,136 @@
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockerfile
import (
"fmt"
"github.com/GoogleContainerTools/kaniko/testutil"
"github.com/docker/docker/builder/dockerfile/instructions"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"testing"
)
func Test_ResolveStages(t *testing.T) {
dockerfile := `
FROM scratch
RUN echo hi > /hi
FROM scratch AS second
COPY --from=0 /hi /hi2
FROM scratch
COPY --from=second /hi2 /hi3
`
stages, err := Parse([]byte(dockerfile))
if err != nil {
t.Fatal(err)
}
ResolveStages(stages)
for index, stage := range stages {
if index == 0 {
continue
}
copyCmd := stage.Commands[0].(*instructions.CopyCommand)
expectedStage := strconv.Itoa(index - 1)
if copyCmd.From != expectedStage {
t.Fatalf("unexpected copy command: %s resolved to stage %s, expected %s", copyCmd.String(), copyCmd.From, expectedStage)
}
}
}
func Test_Dependencies(t *testing.T) {
testDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
}
helloPath := filepath.Join(testDir, "hello")
if err := os.Mkdir(helloPath, 0755); err != nil {
t.Fatal(err)
}
dockerfile := fmt.Sprintf(`
FROM scratch
COPY %s %s
FROM scratch AS second
ENV hienv %s
COPY a b
COPY --from=0 /$hienv %s /hi2/
`, helloPath, helloPath, helloPath, testDir)
stages, err := Parse([]byte(dockerfile))
if err != nil {
t.Fatal(err)
}
expectedDependencies := [][]string{
{
helloPath,
testDir,
},
nil,
}
for index := range stages {
buildArgs := NewBuildArgs([]string{})
actualDeps, err := Dependencies(index, stages, buildArgs)
testutil.CheckErrorAndDeepEqual(t, false, err, expectedDependencies[index], actualDeps)
}
}
func Test_DependenciesWithArg(t *testing.T) {
testDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
}
helloPath := filepath.Join(testDir, "hello")
if err := os.Mkdir(helloPath, 0755); err != nil {
t.Fatal(err)
}
dockerfile := fmt.Sprintf(`
FROM scratch
COPY %s %s
FROM scratch AS second
ARG hienv
COPY a b
COPY --from=0 /$hienv %s /hi2/
`, helloPath, helloPath, testDir)
stages, err := Parse([]byte(dockerfile))
if err != nil {
t.Fatal(err)
}
expectedDependencies := [][]string{
{
helloPath,
testDir,
},
nil,
}
buildArgs := NewBuildArgs([]string{fmt.Sprintf("hienv=%s", helloPath)})
for index := range stages {
actualDeps, err := Dependencies(index, stages, buildArgs)
testutil.CheckErrorAndDeepEqual(t, false, err, expectedDependencies[index], actualDeps)
}
}

View File

@ -17,103 +17,208 @@ limitations under the License.
package executor
import (
"bytes"
"fmt"
"io/ioutil"
"github.com/GoogleContainerTools/kaniko/pkg/snapshot"
"io"
"net/http"
"os"
"path/filepath"
"strconv"
"github.com/google/go-containerregistry/v1/empty"
"github.com/google/go-containerregistry/v1/tarball"
"github.com/google/go-containerregistry/authn"
"github.com/google/go-containerregistry/name"
"github.com/google/go-containerregistry/v1"
"github.com/google/go-containerregistry/v1/mutate"
"github.com/google/go-containerregistry/v1/remote"
"github.com/GoogleContainerTools/kaniko/pkg/commands"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/GoogleContainerTools/kaniko/pkg/dockerfile"
"github.com/GoogleContainerTools/kaniko/pkg/image"
"github.com/GoogleContainerTools/kaniko/pkg/snapshot"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/containers/image/manifest"
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/sirupsen/logrus"
"io/ioutil"
)
func DoBuild(dockerfilePath, srcContext, destination, snapshotMode string, dockerInsecureSkipTLSVerify bool) error {
func DoBuild(dockerfilePath, srcContext, snapshotMode string, args []string) (name.Reference, v1.Image, error) {
// Parse dockerfile and unpack base image to root
d, err := ioutil.ReadFile(dockerfilePath)
if err != nil {
return err
return nil, nil, err
}
stages, err := dockerfile.Parse(d)
if err != nil {
return err
}
baseImage := stages[0].BaseName
// Unpack file system to root
logrus.Infof("Unpacking filesystem of %s...", baseImage)
if err := util.ExtractFileSystemFromImage(baseImage); err != nil {
return err
return nil, nil, err
}
dockerfile.ResolveStages(stages)
hasher, err := getHasher(snapshotMode)
if err != nil {
return err
return nil, nil, err
}
l := snapshot.NewLayeredMap(hasher)
snapshotter := snapshot.NewSnapshotter(l, constants.RootDir)
// Take initial snapshot
if err := snapshotter.Init(); err != nil {
return err
}
// Initialize source image
sourceImage, err := image.NewSourceImage(baseImage)
if err != nil {
return err
}
// Set environment variables within the image
if err := image.SetEnvVariables(sourceImage); err != nil {
return err
}
imageConfig := sourceImage.Config()
// Currently only supports single stage builds
for _, stage := range stages {
if err := resolveOnBuild(&stage, imageConfig); err != nil {
return err
for index, stage := range stages {
baseImage := stage.BaseName
finalStage := index == len(stages)-1
// Unpack file system to root
logrus.Infof("Unpacking filesystem of %s...", baseImage)
var sourceImage v1.Image
var ref name.Reference
if baseImage == constants.NoBaseImage {
logrus.Info("No base image, nothing to extract")
sourceImage = empty.Image
} else {
// Initialize source image
ref, err = name.ParseReference(baseImage, name.WeakValidation)
if err != nil {
return nil, nil, err
}
auth, err := authn.DefaultKeychain.Resolve(ref.Context().Registry)
if err != nil {
return nil, nil, err
}
sourceImage, err = remote.Image(ref, auth, http.DefaultTransport)
if err != nil {
return nil, nil, err
}
}
if err := util.GetFSFromImage(sourceImage); err != nil {
return nil, nil, err
}
l := snapshot.NewLayeredMap(hasher)
snapshotter := snapshot.NewSnapshotter(l, constants.RootDir)
// Take initial snapshot
if err := snapshotter.Init(); err != nil {
return nil, nil, err
}
imageConfig, err := sourceImage.ConfigFile()
if err != nil {
return nil, nil, err
}
if err := resolveOnBuild(&stage, &imageConfig.Config); err != nil {
return nil, nil, err
}
buildArgs := dockerfile.NewBuildArgs(args)
for _, cmd := range stage.Commands {
dockerCommand, err := commands.GetCommand(cmd, srcContext)
if err != nil {
return err
return nil, nil, err
}
if dockerCommand == nil {
continue
}
if err := dockerCommand.ExecuteCommand(imageConfig); err != nil {
return err
if err := dockerCommand.ExecuteCommand(&imageConfig.Config, buildArgs); err != nil {
return nil, nil, err
}
if !finalStage {
continue
}
// Now, we get the files to snapshot from this command and take the snapshot
snapshotFiles := dockerCommand.FilesToSnapshot()
contents, err := snapshotter.TakeSnapshot(snapshotFiles)
if err != nil {
return err
return nil, nil, err
}
util.MoveVolumeWhitelistToWhitelist()
if contents == nil {
logrus.Info("No files were changed, appending empty layer to config.")
sourceImage.AppendConfigHistory(constants.Author, true)
continue
}
// Append the layer to the image
if err := sourceImage.AppendLayer(contents, constants.Author); err != nil {
opener := func() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader(contents)), nil
}
layer, err := tarball.LayerFromOpener(opener)
if err != nil {
return nil, nil, err
}
sourceImage, err = mutate.Append(sourceImage,
mutate.Addendum{
Layer: layer,
History: v1.History{
Author: constants.Author,
CreatedBy: dockerCommand.CreatedBy(),
},
},
)
if err != nil {
return nil, nil, err
}
}
if finalStage {
sourceImage, err = mutate.Config(sourceImage, imageConfig.Config)
if err != nil {
return nil, nil, err
}
return ref, sourceImage, nil
}
if err := saveStageDependencies(index, stages, buildArgs); err != nil {
return nil, nil, err
}
// Delete the filesystem
if err := util.DeleteFilesystem(); err != nil {
return nil, nil, err
}
}
return nil, nil, err
}
func DoPush(ref name.Reference, image v1.Image, destination string) error {
// Push the image
destRef, err := name.ParseReference(destination, name.WeakValidation)
if err != nil {
return err
}
wo := remote.WriteOptions{}
if ref != nil {
wo.MountPaths = []name.Repository{ref.Context()}
}
pushAuth, err := authn.DefaultKeychain.Resolve(destRef.Context().Registry)
if err != nil {
return err
}
return remote.Write(destRef, image, pushAuth, http.DefaultTransport, wo)
}
func saveStageDependencies(index int, stages []instructions.Stage, buildArgs *dockerfile.BuildArgs) error {
// First, get the files in this stage later stages will need
dependencies, err := dockerfile.Dependencies(index, stages, buildArgs)
logrus.Infof("saving dependencies %s", dependencies)
if err != nil {
return err
}
// Then, create the directory they will exist in
i := strconv.Itoa(index)
dependencyDir := filepath.Join(constants.KanikoDir, i)
if err := os.MkdirAll(dependencyDir, 0755); err != nil {
return err
}
// Now, copy over dependencies to this dir
for _, d := range dependencies {
fi, err := os.Lstat(d)
if err != nil {
return err
}
dest := filepath.Join(dependencyDir, d)
if fi.IsDir() {
if err := util.CopyDir(d, dest); err != nil {
return err
}
} else if fi.Mode()&os.ModeSymlink != 0 {
if err := util.CopySymlink(d, dest); err != nil {
return err
}
} else {
if err := util.CopyFile(d, dest); err != nil {
return err
}
}
}
// Push the image
if err := setDefaultEnv(); err != nil {
return err
}
return image.PushImage(sourceImage, destination, dockerInsecureSkipTLSVerify)
return nil
}
func getHasher(snapshotMode string) (func(string) (string, error), error) {
@ -127,7 +232,7 @@ func getHasher(snapshotMode string) (func(string) (string, error), error) {
return nil, fmt.Errorf("%s is not a valid snapshot mode", snapshotMode)
}
func resolveOnBuild(stage *instructions.Stage, config *manifest.Schema2Config) error {
func resolveOnBuild(stage *instructions.Stage, config *v1.Config) error {
if config.OnBuild == nil {
return nil
}
@ -141,18 +246,3 @@ func resolveOnBuild(stage *instructions.Stage, config *manifest.Schema2Config) e
logrus.Infof("Executing %v build triggers", len(cmds))
return nil
}
// setDefaultEnv sets default values for HOME and PATH so that
// config.json and docker-credential-gcr can be accessed
func setDefaultEnv() error {
defaultEnvs := map[string]string{
"HOME": "/root",
"PATH": "/usr/local/bin/",
}
for key, val := range defaultEnvs {
if err := os.Setenv(key, val); err != nil {
return err
}
}
return nil
}

View File

@ -17,80 +17,27 @@ limitations under the License.
package image
import (
"fmt"
"os"
"strings"
"github.com/GoogleContainerTools/kaniko/pkg/version"
"github.com/containers/image/types"
"github.com/google/go-containerregistry/v1"
img "github.com/GoogleContainerTools/container-diff/pkg/image"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/containers/image/copy"
"github.com/containers/image/docker"
"github.com/containers/image/signature"
"github.com/containers/image/transports/alltransports"
"github.com/sirupsen/logrus"
)
// sourceImage is the image that will be modified by the executor
// NewSourceImage initializes the source image with the base image
func NewSourceImage(srcImg string) (*img.MutableSource, error) {
if srcImg == constants.NoBaseImage {
return img.NewMutableSource(nil)
}
logrus.Infof("Initializing source image %s", srcImg)
ref, err := docker.ParseReference("//" + srcImg)
if err != nil {
return nil, err
}
return img.NewMutableSource(ref)
}
// PushImage pushes the final image
func PushImage(ms *img.MutableSource, destImg string, dockerInsecureSkipTLSVerify bool) error {
srcRef := &img.ProxyReference{
ImageReference: nil,
Src: ms,
}
destRef, err := alltransports.ParseImageName("docker://" + destImg)
if err != nil {
return err
}
policyContext, err := getPolicyContext()
if err != nil {
return err
}
logrus.Infof("Pushing image to %s", destImg)
opts := &copy.Options{
DestinationCtx: &types.SystemContext{
DockerRegistryUserAgent: fmt.Sprintf("kaniko/executor-%s", version.Version()),
DockerInsecureSkipTLSVerify: dockerInsecureSkipTLSVerify,
},
}
return copy.Image(policyContext, destRef, srcRef, opts)
}
// SetEnvVariables sets environment variables as specified in the image
func SetEnvVariables(ms *img.MutableSource) error {
envVars := ms.Env()
for key, val := range envVars {
if err := os.Setenv(key, val); err != nil {
func SetEnvVariables(img v1.Image) error {
cfg, err := img.ConfigFile()
if err != nil {
return err
}
envVars := cfg.Config.Env
for _, envVar := range envVars {
split := strings.SplitN(envVar, "=", 2)
if err := os.Setenv(split[0], split[1]); err != nil {
return err
}
logrus.Debugf("Setting environment variable %s=%s", key, val)
logrus.Infof("Setting environment variable %s", envVar)
}
return nil
}
func getPolicyContext() (*signature.PolicyContext, error) {
policyContext, err := signature.NewPolicyContext(&signature.Policy{
Default: signature.PolicyRequirements{signature.NewPRInsecureAcceptAnything()},
})
if err != nil {
logrus.Debugf("Error retrieving policy context: %s", err)
return nil, err
}
return policyContext, nil
}

View File

@ -16,6 +16,11 @@ limitations under the License.
package snapshot
import (
"path/filepath"
"strings"
)
type LayeredMap struct {
layers []map[string]string
hasher func(string) (string, error)
@ -33,6 +38,21 @@ func (l *LayeredMap) Snapshot() {
l.layers = append(l.layers, map[string]string{})
}
func (l *LayeredMap) GetFlattenedPathsForWhiteOut() map[string]struct{} {
paths := map[string]struct{}{}
for _, l := range l.layers {
for p := range l {
if strings.HasPrefix(filepath.Base(p), ".wh.") {
delete(paths, p)
} else {
paths[p] = struct{}{}
}
paths[p] = struct{}{}
}
}
return paths
}
func (l *LayeredMap) Get(s string) (string, bool) {
for i := len(l.layers) - 1; i >= 0; i-- {
if v, ok := l.layers[i][s]; ok {

View File

@ -19,10 +19,8 @@ package snapshot
import (
"archive/tar"
"bytes"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/sirupsen/logrus"
"io"
"io/ioutil"
"os"
@ -33,6 +31,7 @@ import (
type Snapshotter struct {
l *LayeredMap
directory string
hardlinks map[uint64]string
}
// NewSnapshotter creates a new snapshotter rooted at d
@ -51,85 +50,125 @@ func (s *Snapshotter) Init() error {
// TakeSnapshot takes a snapshot of the filesystem, avoiding directories in the whitelist, and creates
// a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed
func (s *Snapshotter) TakeSnapshot(files []string) ([]byte, error) {
if files != nil {
return s.TakeSnapshotOfFiles(files)
}
logrus.Info("Taking snapshot of full filesystem...")
buf := bytes.NewBuffer([]byte{})
filesAdded, err := s.snapShotFS(buf)
if err != nil {
return nil, err
}
contents, err := ioutil.ReadAll(buf)
var filesAdded bool
var err error
if files == nil {
filesAdded, err = s.snapShotFS(buf)
} else {
filesAdded, err = s.snapshotFiles(buf, files)
}
if err != nil {
return nil, err
}
contents := buf.Bytes()
if !filesAdded {
return nil, nil
}
return contents, err
}
// TakeSnapshotOfFiles takes a snapshot of specific files
// snapshotFiles takes a snapshot of specific files
// Used for ADD/COPY commands, when we know which files have changed
func (s *Snapshotter) TakeSnapshotOfFiles(files []string) ([]byte, error) {
logrus.Infof("Taking snapshot of files %v...", files)
func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) {
s.hardlinks = map[uint64]string{}
s.l.Snapshot()
if len(files) == 0 {
logrus.Info("No files changed in this command, skipping snapshotting.")
return nil, nil
return false, nil
}
buf := bytes.NewBuffer([]byte{})
w := tar.NewWriter(buf)
defer w.Close()
filesAdded := false
logrus.Infof("Taking snapshot of files %v...", files)
snapshottedFiles := make(map[string]bool)
for _, file := range files {
info, err := os.Lstat(file)
if err != nil {
return nil, err
}
if util.PathInWhitelist(file, s.directory) {
logrus.Debugf("Not adding %s to layer, as it is whitelisted", file)
continue
}
// Only add to the tar if we add it to the layeredmap.
maybeAdd, err := s.l.MaybeAdd(file)
if err != nil {
return nil, err
}
if maybeAdd {
filesAdded = true
util.AddToTar(file, info, w)
}
parentDirs := util.ParentDirectories(file)
files = append(parentDirs, files...)
}
if !filesAdded {
return nil, nil
}
return ioutil.ReadAll(buf)
}
func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) {
s.l.Snapshot()
filesAdded := false
w := tar.NewWriter(f)
defer w.Close()
err := filepath.Walk(s.directory, func(path string, info os.FileInfo, err error) error {
// Now create the tar.
for _, file := range files {
file = filepath.Clean(file)
if val, ok := snapshottedFiles[file]; ok && val {
continue
}
if util.PathInWhitelist(file, s.directory) {
logrus.Debugf("Not adding %s to layer, as it's whitelisted", file)
continue
}
snapshottedFiles[file] = true
info, err := os.Lstat(file)
if err != nil {
return false, err
}
// Only add to the tar if we add it to the layeredmap.
maybeAdd, err := s.l.MaybeAdd(file)
if err != nil {
return false, err
}
if maybeAdd {
filesAdded = true
if err := util.AddToTar(file, info, s.hardlinks, w); err != nil {
return false, err
}
}
}
return filesAdded, nil
}
func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) {
logrus.Info("Taking snapshot of full filesystem...")
s.hardlinks = map[uint64]string{}
s.l.Snapshot()
existingPaths := s.l.GetFlattenedPathsForWhiteOut()
filesAdded := false
w := tar.NewWriter(f)
defer w.Close()
// Save the fs state in a map to iterate over later.
memFs := map[string]os.FileInfo{}
filepath.Walk(s.directory, func(path string, info os.FileInfo, err error) error {
memFs[path] = info
return nil
})
// First handle whiteouts
for p := range memFs {
delete(existingPaths, p)
}
for path := range existingPaths {
// Only add the whiteout if the directory for the file still exists.
dir := filepath.Dir(path)
if _, ok := memFs[dir]; ok {
logrus.Infof("Adding whiteout for %s", path)
filesAdded = true
if err := util.Whiteout(path, w); err != nil {
return false, err
}
}
}
// Now create the tar.
for path, info := range memFs {
if util.PathInWhitelist(path, s.directory) {
logrus.Debugf("Not adding %s to layer, as it's whitelisted", path)
return nil
continue
}
// Only add to the tar if we add it to the layeredmap.
maybeAdd, err := s.l.MaybeAdd(path)
if err != nil {
return err
return false, err
}
if maybeAdd {
logrus.Debugf("Adding %s to layer, because it was changed.", path)
filesAdded = true
return util.AddToTar(path, info, w)
if err := util.AddToTar(path, info, s.hardlinks, w); err != nil {
return false, err
}
}
return nil
})
return filesAdded, err
}
return filesAdded, nil
}

View File

@ -149,30 +149,23 @@ func TestSnapshotFiles(t *testing.T) {
if err != nil {
t.Fatal(err)
}
expectedContents := map[string]string{
filepath.Join(testDir, "foo"): "newbaz1",
}
expectedFiles := []string{"/tmp", filepath.Join(testDir, "foo")}
// Check contents of the snapshot, make sure contents is equivalent to snapshotFiles
reader := bytes.NewReader(contents)
tr := tar.NewReader(reader)
numFiles := 0
var actualFiles []string
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
numFiles = numFiles + 1
if _, isFile := expectedContents[hdr.Name]; !isFile {
t.Fatalf("File %s unexpectedly in tar", hdr.Name)
}
contents, _ := ioutil.ReadAll(tr)
if string(contents) != expectedContents[hdr.Name] {
t.Fatalf("Contents of %s incorrect, expected: %s, actual: %s", hdr.Name, expectedContents[hdr.Name], string(contents))
if err != nil {
t.Fatal(err)
}
actualFiles = append(actualFiles, hdr.Name)
}
if numFiles != 1 {
t.Fatalf("%s was not added.", filepath.Join(testDir, "foo"))
}
testutil.CheckErrorAndDeepEqual(t, false, nil, expectedFiles, actualFiles)
}
func TestEmptySnapshot(t *testing.T) {

View File

@ -20,6 +20,7 @@ import (
"github.com/docker/docker/builder/dockerfile/instructions"
"github.com/docker/docker/builder/dockerfile/parser"
"github.com/docker/docker/builder/dockerfile/shell"
"github.com/google/go-containerregistry/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"net/http"
@ -178,7 +179,7 @@ func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, resolvedSources []stri
}
if len(resolvedSources) == 1 {
fi, err := os.Stat(filepath.Join(root, resolvedSources[0]))
fi, err := os.Lstat(filepath.Join(root, resolvedSources[0]))
if err != nil {
return err
}
@ -222,3 +223,53 @@ func IsSrcRemoteFileURL(rawurl string) bool {
}
return true
}
func UpdateConfigEnv(newEnvs []instructions.KeyValuePair, config *v1.Config, replacementEnvs []string) error {
for index, pair := range newEnvs {
expandedKey, err := ResolveEnvironmentReplacement(pair.Key, replacementEnvs, false)
if err != nil {
return err
}
expandedValue, err := ResolveEnvironmentReplacement(pair.Value, replacementEnvs, false)
if err != nil {
return err
}
newEnvs[index] = instructions.KeyValuePair{
Key: expandedKey,
Value: expandedValue,
}
}
// First, convert config.Env array to []instruction.KeyValuePair
var kvps []instructions.KeyValuePair
for _, env := range config.Env {
entry := strings.Split(env, "=")
kvps = append(kvps, instructions.KeyValuePair{
Key: entry[0],
Value: entry[1],
})
}
// Iterate through new environment variables, and replace existing keys
// We can't use a map because we need to preserve the order of the environment variables
Loop:
for _, newEnv := range newEnvs {
for index, kvp := range kvps {
// If key exists, replace the KeyValuePair...
if kvp.Key == newEnv.Key {
logrus.Debugf("Replacing environment variable %v with %v in config", kvp, newEnv)
kvps[index] = newEnv
continue Loop
}
}
// ... Else, append it as a new env variable
kvps = append(kvps, newEnv)
}
// Convert back to array and set in config
envArray := []string{}
for _, kvp := range kvps {
entry := kvp.Key + "=" + kvp.Value
envArray = append(envArray, entry)
}
config.Env = envArray
return nil
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package util
import (
"archive/tar"
"bufio"
"io"
"net/http"
@ -25,38 +26,196 @@ import (
"strings"
"time"
pkgutil "github.com/GoogleContainerTools/container-diff/pkg/util"
"github.com/google/go-containerregistry/v1"
"github.com/GoogleContainerTools/kaniko/pkg/constants"
"github.com/containers/image/docker"
"github.com/sirupsen/logrus"
)
var whitelist = []string{"/kaniko"}
var whitelist = []string{
"/kaniko",
// /var/run is a special case. It's common to mount in /var/run/docker.sock or something similar
// which leads to a special mount on the /var/run/docker.sock file itself, but the directory to exist
// in the image with no way to tell if it came from the base image or not.
"/var/run",
}
var volumeWhitelist = []string{}
// ExtractFileSystemFromImage pulls an image and unpacks it to a file system at root
func ExtractFileSystemFromImage(img string) error {
func GetFSFromImage(img v1.Image) error {
whitelist, err := fileSystemWhitelist(constants.WhitelistPath)
if err != nil {
return err
}
logrus.Infof("Whitelisted directories are %s", whitelist)
if img == constants.NoBaseImage {
logrus.Info("No base image, nothing to extract")
return nil
}
ref, err := docker.ParseReference("//" + img)
logrus.Infof("Mounted directories: %v", whitelist)
layers, err := img.Layers()
if err != nil {
return err
}
imgSrc, err := ref.NewImageSource(nil)
if err != nil {
return err
fs := map[string]struct{}{}
whiteouts := map[string]struct{}{}
for i := len(layers) - 1; i >= 0; i-- {
logrus.Infof("Unpacking layer: %d", i)
l := layers[i]
r, err := l.Uncompressed()
if err != nil {
return err
}
tr := tar.NewReader(r)
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
path := filepath.Join("/", filepath.Clean(hdr.Name))
base := filepath.Base(path)
dir := filepath.Dir(path)
if strings.HasPrefix(base, ".wh.") {
logrus.Infof("Whiting out %s", path)
name := strings.TrimPrefix(base, ".wh.")
whiteouts[filepath.Join(dir, name)] = struct{}{}
continue
}
if checkWhiteouts(path, whiteouts) {
logrus.Infof("Not adding %s because it is whited out", path)
continue
}
if _, ok := fs[path]; ok {
logrus.Infof("Not adding %s because it was added by a prior layer", path)
continue
}
if checkWhitelist(path, whitelist) {
logrus.Infof("Not adding %s because it is whitelisted", path)
continue
}
if hdr.Typeflag == tar.TypeSymlink {
if checkWhitelist(hdr.Linkname, whitelist) {
logrus.Debugf("skipping symlink from %s to %s because %s is whitelisted", hdr.Linkname, path, hdr.Linkname)
continue
}
}
fs[path] = struct{}{}
if err := extractFile("/", hdr, tr); err != nil {
return err
}
}
}
return pkgutil.GetFileSystemFromReference(ref, imgSrc, constants.RootDir, whitelist)
return nil
}
// DeleteFilesystem deletes the extracted image file system
func DeleteFilesystem() error {
logrus.Info("Deleting filesystem...")
err := filepath.Walk(constants.RootDir, func(path string, info os.FileInfo, err error) error {
if PathInWhitelist(path, constants.RootDir) || ChildDirInWhitelist(path, constants.RootDir) {
logrus.Debugf("Not deleting %s, as it's whitelisted", path)
return nil
}
if path == constants.RootDir {
return nil
}
return os.RemoveAll(path)
})
return err
}
// ChildDirInWhitelist returns true if there is a child file or directory of the path in the whitelist
func ChildDirInWhitelist(path, directory string) bool {
for _, d := range whitelist {
dirPath := filepath.Join(directory, d)
if HasFilepathPrefix(dirPath, path) {
return true
}
}
return false
}
func unTar(r io.Reader, dest string) error {
tr := tar.NewReader(r)
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if err := extractFile(dest, hdr, tr); err != nil {
return err
}
}
return nil
}
func extractFile(dest string, hdr *tar.Header, tr io.Reader) error {
path := filepath.Join(dest, filepath.Clean(hdr.Name))
base := filepath.Base(path)
dir := filepath.Dir(path)
mode := hdr.FileInfo().Mode()
switch hdr.Typeflag {
case tar.TypeReg:
logrus.Debugf("creating file %s", path)
// It's possible a file is in the tar before it's directory.
if _, err := os.Stat(dir); os.IsNotExist(err) {
logrus.Debugf("base %s for file %s does not exist. Creating.", base, path)
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
}
currFile, err := os.Create(path)
if err != nil {
return err
}
// manually set permissions on file, since the default umask (022) will interfere
if err = os.Chmod(path, mode); err != nil {
return err
}
if _, err = io.Copy(currFile, tr); err != nil {
return err
}
currFile.Close()
case tar.TypeDir:
logrus.Debugf("creating dir %s", path)
if err := os.MkdirAll(path, mode); err != nil {
return err
}
// In some cases, MkdirAll doesn't change the permissions, so run Chmod
if err := os.Chmod(path, mode); err != nil {
return err
}
case tar.TypeLink:
logrus.Debugf("link from %s to %s", hdr.Linkname, path)
// The base directory for a link may not exist before it is created.
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
if err := os.Symlink(filepath.Clean(filepath.Join("/", hdr.Linkname)), path); err != nil {
return err
}
case tar.TypeSymlink:
logrus.Debugf("symlink from %s to %s", hdr.Linkname, path)
// The base directory for a symlink may not exist before it is created.
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
if err := os.Symlink(hdr.Linkname, path); err != nil {
return err
}
}
return nil
}
// PathInWhitelist returns true if the path is whitelisted
func PathInWhitelist(path, directory string) bool {
for _, c := range constants.KanikoBuildFiles {
if path == c {
@ -65,7 +224,30 @@ func PathInWhitelist(path, directory string) bool {
}
for _, d := range whitelist {
dirPath := filepath.Join(directory, d)
if pkgutil.HasFilepathPrefix(path, dirPath) {
if HasFilepathPrefix(path, dirPath) {
return true
}
}
return false
}
func checkWhiteouts(path string, whiteouts map[string]struct{}) bool {
// Don't add the file if it or it's directory are whited out.
if _, ok := whiteouts[path]; ok {
return true
}
for wd := range whiteouts {
if HasFilepathPrefix(path, wd) {
logrus.Infof("Not adding %s because it's directory is whited out", path)
return true
}
}
return false
}
func checkWhitelist(path string, whitelist []string) bool {
for _, wl := range whitelist {
if HasFilepathPrefix(path, wl) {
return true
}
}
@ -117,9 +299,15 @@ func RelativeFiles(fp string, root string) ([]string, error) {
fullPath := filepath.Join(root, fp)
logrus.Debugf("Getting files and contents at root %s", fullPath)
err := filepath.Walk(fullPath, func(path string, info os.FileInfo, err error) error {
if PathInWhitelist(path, root) {
return nil
}
if err != nil {
return err
}
if PathInWhitelist(path, root) {
return nil
}
relPath, err := filepath.Rel(root, path)
if err != nil {
return err
@ -135,12 +323,32 @@ func Files(root string) ([]string, error) {
var files []string
logrus.Debugf("Getting files and contents at root %s", root)
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if PathInWhitelist(path, root) {
return nil
}
files = append(files, path)
return err
})
return files, err
}
// ParentDirectories returns a list of paths to all parent directories
// Ex. /some/temp/dir -> [/some, /some/temp, /some/temp/dir]
func ParentDirectories(path string) []string {
path = filepath.Clean(path)
dirs := strings.Split(path, "/")
dirPath := constants.RootDir
var paths []string
for index, dir := range dirs {
if dir == "" || index == (len(dirs)-1) {
continue
}
dirPath = filepath.Join(dirPath, dir)
paths = append(paths, dirPath)
}
return paths
}
// FilepathExists returns true if the path exists
func FilepathExists(path string) bool {
_, err := os.Lstat(path)
@ -219,7 +427,7 @@ func CopyDir(src, dest string) error {
}
for _, file := range files {
fullPath := filepath.Join(src, file)
fi, err := os.Stat(fullPath)
fi, err := os.Lstat(fullPath)
if err != nil {
return err
}
@ -268,3 +476,22 @@ func CopyFile(src, dest string) error {
defer srcFile.Close()
return CreateFile(dest, srcFile, fi.Mode())
}
// HasFilepathPrefix checks if the given file path begins with prefix
func HasFilepathPrefix(path, prefix string) bool {
path = filepath.Clean(path)
prefix = filepath.Clean(prefix)
pathArray := strings.Split(path, "/")
prefixArray := strings.Split(prefix, "/")
if len(pathArray) < len(prefixArray) {
return false
}
for index := range prefixArray {
if prefixArray[index] == pathArray[index] {
continue
}
return false
}
return true
}

View File

@ -17,9 +17,12 @@ limitations under the License.
package util
import (
"archive/tar"
"bytes"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"sort"
"testing"
@ -47,7 +50,7 @@ func Test_fileSystemWhitelist(t *testing.T) {
}
actualWhitelist, err := fileSystemWhitelist(path)
expectedWhitelist := []string{"/kaniko", "/proc", "/dev", "/dev/pts", "/sys"}
expectedWhitelist := []string{"/kaniko", "/proc", "/dev", "/dev/pts", "/sys", "/var/run"}
sort.Strings(actualWhitelist)
sort.Strings(expectedWhitelist)
testutil.CheckErrorAndDeepEqual(t, false, err, expectedWhitelist, actualWhitelist)
@ -100,16 +103,13 @@ var tests = []struct {
files: map[string]string{
"/workspace/foo/a": "baz1",
"/workspace/foo/b": "baz2",
"/kaniko/file": "file",
},
directory: "",
expectedFiles: []string{
"workspace/foo/a",
"workspace/foo/b",
"kaniko/file",
"workspace",
"workspace/foo",
"kaniko",
".",
},
},
@ -131,3 +131,387 @@ func Test_RelativeFiles(t *testing.T) {
testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedFiles, actualFiles)
}
}
func Test_ParentDirectories(t *testing.T) {
tests := []struct {
name string
path string
expected []string
}{
{
name: "regular path",
path: "/path/to/dir",
expected: []string{
"/path",
"/path/to",
},
},
{
name: "current directory",
path: ".",
expected: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual := ParentDirectories(tt.path)
testutil.CheckErrorAndDeepEqual(t, false, nil, tt.expected, actual)
})
}
}
func Test_checkWhiteouts(t *testing.T) {
type args struct {
path string
whiteouts map[string]struct{}
}
tests := []struct {
name string
args args
want bool
}{
{
name: "file whited out",
args: args{
path: "/foo",
whiteouts: map[string]struct{}{"/foo": {}},
},
want: true,
},
{
name: "directory whited out",
args: args{
path: "/foo/bar",
whiteouts: map[string]struct{}{"/foo": {}},
},
want: true,
},
{
name: "grandparent whited out",
args: args{
path: "/foo/bar/baz",
whiteouts: map[string]struct{}{"/foo": {}},
},
want: true,
},
{
name: "sibling whited out",
args: args{
path: "/foo/bar/baz",
whiteouts: map[string]struct{}{"/foo/bat": {}},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := checkWhiteouts(tt.args.path, tt.args.whiteouts); got != tt.want {
t.Errorf("checkWhiteouts() = %v, want %v", got, tt.want)
}
})
}
}
func Test_checkWhitelist(t *testing.T) {
type args struct {
path string
whitelist []string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "file whitelisted",
args: args{
path: "/foo",
whitelist: []string{"/foo"},
},
want: true,
},
{
name: "directory whitelisted",
args: args{
path: "/foo/bar",
whitelist: []string{"/foo"},
},
want: true,
},
{
name: "grandparent whitelisted",
args: args{
path: "/foo/bar/baz",
whitelist: []string{"/foo"},
},
want: true,
},
{
name: "sibling whitelisted",
args: args{
path: "/foo/bar/baz",
whitelist: []string{"/foo/bat"},
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := checkWhitelist(tt.args.path, tt.args.whitelist); got != tt.want {
t.Errorf("checkWhitelist() = %v, want %v", got, tt.want)
}
})
}
}
func TestHasFilepathPrefix(t *testing.T) {
type args struct {
path string
prefix string
}
tests := []struct {
name string
args args
want bool
}{
{
name: "parent",
args: args{
path: "/foo/bar",
prefix: "/foo",
},
want: true,
},
{
name: "nested parent",
args: args{
path: "/foo/bar/baz",
prefix: "/foo/bar",
},
want: true,
},
{
name: "sibling",
args: args{
path: "/foo/bar",
prefix: "/bar",
},
want: false,
},
{
name: "nested sibling",
args: args{
path: "/foo/bar/baz",
prefix: "/foo/bar",
},
want: true,
},
{
name: "name prefix",
args: args{
path: "/foo2/bar",
prefix: "/foo",
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := HasFilepathPrefix(tt.args.path, tt.args.prefix); got != tt.want {
t.Errorf("HasFilepathPrefix() = %v, want %v", got, tt.want)
}
})
}
}
type checker func(root string, t *testing.T)
func fileExists(p string) checker {
return func(root string, t *testing.T) {
_, err := os.Stat(filepath.Join(root, p))
if err != nil {
t.Fatalf("File does not exist")
}
}
}
func fileMatches(p string, c []byte) checker {
return func(root string, t *testing.T) {
actual, err := ioutil.ReadFile(filepath.Join(root, p))
if err != nil {
t.Fatalf("error reading file: %s", p)
}
if !reflect.DeepEqual(actual, c) {
t.Errorf("file contents do not match. %v!=%v", actual, c)
}
}
}
func permissionsMatch(p string, perms os.FileMode) checker {
return func(root string, t *testing.T) {
fi, err := os.Stat(filepath.Join(root, p))
if err != nil {
t.Fatalf("error statting file %s", p)
}
if fi.Mode() != perms {
t.Errorf("Permissions do not match. %s != %s", fi.Mode(), perms)
}
}
}
func linkPointsTo(src, dst string) checker {
return func(root string, t *testing.T) {
link := filepath.Join(root, src)
got, err := os.Readlink(link)
if err != nil {
t.Fatalf("error reading link %s: %s", link, err)
}
if got != dst {
t.Errorf("link destination does not match: %s != %s", got, dst)
}
}
}
func fileHeader(name string, contents string, mode int64) *tar.Header {
return &tar.Header{
Name: name,
Size: int64(len(contents)),
Mode: mode,
Typeflag: tar.TypeReg,
}
}
func linkHeader(name, linkname string) *tar.Header {
return &tar.Header{
Name: name,
Size: 0,
Typeflag: tar.TypeSymlink,
Linkname: linkname,
}
}
func hardlinkHeader(name, linkname string) *tar.Header {
return &tar.Header{
Name: name,
Size: 0,
Typeflag: tar.TypeLink,
Linkname: linkname,
}
}
func dirHeader(name string, mode int64) *tar.Header {
return &tar.Header{
Name: name,
Size: 0,
Typeflag: tar.TypeDir,
Mode: mode,
}
}
func TestExtractFile(t *testing.T) {
type tc struct {
name string
hdrs []*tar.Header
contents []byte
checkers []checker
}
tcs := []tc{
{
name: "normal file",
contents: []byte("helloworld"),
hdrs: []*tar.Header{fileHeader("./bar", "helloworld", 0644)},
checkers: []checker{
fileExists("/bar"),
fileMatches("/bar", []byte("helloworld")),
permissionsMatch("/bar", 0644),
},
},
{
name: "normal file, directory does not exist",
contents: []byte("helloworld"),
hdrs: []*tar.Header{fileHeader("./foo/bar", "helloworld", 0644)},
checkers: []checker{
fileExists("/foo/bar"),
fileMatches("/foo/bar", []byte("helloworld")),
permissionsMatch("/foo/bar", 0644),
permissionsMatch("/foo", 0755|os.ModeDir),
},
},
{
name: "normal file, directory is created after",
contents: []byte("helloworld"),
hdrs: []*tar.Header{
fileHeader("./foo/bar", "helloworld", 0644),
dirHeader("./foo", 0722),
},
checkers: []checker{
fileExists("/foo/bar"),
fileMatches("/foo/bar", []byte("helloworld")),
permissionsMatch("/foo/bar", 0644),
permissionsMatch("/foo", 0722|os.ModeDir),
},
},
{
name: "symlink",
hdrs: []*tar.Header{linkHeader("./bar", "bar/bat")},
checkers: []checker{
linkPointsTo("/bar", "bar/bat"),
},
},
{
name: "symlink relative path",
hdrs: []*tar.Header{linkHeader("./bar", "./foo/bar/baz")},
checkers: []checker{
linkPointsTo("/bar", "./foo/bar/baz"),
},
},
{
name: "symlink parent does not exist",
hdrs: []*tar.Header{linkHeader("./foo/bar/baz", "../../bat")},
checkers: []checker{
linkPointsTo("/foo/bar/baz", "../../bat"),
},
},
{
name: "symlink parent does not exist",
hdrs: []*tar.Header{linkHeader("./foo/bar/baz", "../../bat")},
checkers: []checker{
linkPointsTo("/foo/bar/baz", "../../bat"),
permissionsMatch("/foo", 0755|os.ModeDir),
permissionsMatch("/foo/bar", 0755|os.ModeDir),
},
},
{
name: "hardlink",
hdrs: []*tar.Header{
fileHeader("/bin/gzip", "gzip-binary", 0751),
hardlinkHeader("/bin/uncompress", "/bin/gzip"),
},
checkers: []checker{
linkPointsTo("/bin/uncompress", "/bin/gzip"),
},
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
tc := tc
t.Parallel()
r, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(r)
for _, hdr := range tc.hdrs {
if err := extractFile(r, hdr, bytes.NewReader(tc.contents)); err != nil {
t.Fatal(err)
}
}
for _, checker := range tc.checkers {
checker(r, t)
}
})
}
}

View File

@ -23,18 +23,16 @@ import (
"io"
"io/ioutil"
"os"
"path/filepath"
"syscall"
pkgutil "github.com/GoogleContainerTools/container-diff/pkg/util"
"github.com/docker/docker/pkg/archive"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var hardlinks = make(map[uint64]string)
// AddToTar adds the file i to tar w at path p
func AddToTar(p string, i os.FileInfo, w *tar.Writer) error {
func AddToTar(p string, i os.FileInfo, hardlinks map[uint64]string, w *tar.Writer) error {
linkDst := ""
if i.Mode()&os.ModeSymlink != 0 {
var err error
@ -49,7 +47,7 @@ func AddToTar(p string, i os.FileInfo, w *tar.Writer) error {
}
hdr.Name = p
hardlink, linkDst := checkHardlink(p, i)
hardlink, linkDst := checkHardlink(p, hardlinks, i)
if hardlink {
hdr.Linkname = linkDst
hdr.Typeflag = tar.TypeLink
@ -72,8 +70,23 @@ func AddToTar(p string, i os.FileInfo, w *tar.Writer) error {
return nil
}
func Whiteout(p string, w *tar.Writer) error {
dir := filepath.Dir(p)
name := ".wh." + filepath.Base(p)
th := &tar.Header{
Name: filepath.Join(dir, name),
Size: 0,
}
if err := w.WriteHeader(th); err != nil {
return err
}
return nil
}
// Returns true if path is hardlink, and the link destination
func checkHardlink(p string, i os.FileInfo) (bool, string) {
func checkHardlink(p string, hardlinks map[uint64]string, i os.FileInfo) (bool, string) {
hardlink := false
linkDst := ""
if sys := i.Sys(); sys != nil {
@ -108,7 +121,7 @@ func UnpackLocalTarArchive(path, dest string) error {
return UnpackCompressedTar(path, dest)
} else if compressionLevel == archive.Bzip2 {
bzr := bzip2.NewReader(file)
return pkgutil.UnTar(bzr, dest, nil)
return unTar(bzr, dest)
}
}
if fileIsUncompressedTar(path) {
@ -117,7 +130,7 @@ func UnpackLocalTarArchive(path, dest string) error {
return err
}
defer file.Close()
return pkgutil.UnTar(file, dest, nil)
return unTar(file, dest)
}
return errors.New("path does not lead to local tar archive")
}
@ -181,5 +194,5 @@ func UnpackCompressedTar(path, dir string) error {
return err
}
defer gzr.Close()
return pkgutil.UnTar(gzr, dir, nil)
return unTar(gzr, dir)
}

View File

@ -101,7 +101,7 @@ func createTar(testdir string, writer io.Writer) error {
if err != nil {
return err
}
if err := AddToTar(filePath, fi, w); err != nil {
if err := AddToTar(filePath, fi, map[uint64]string{}, w); err != nil {
return err
}
}

View File

@ -21,7 +21,7 @@ GREEN='\033[0;32m'
RESET='\033[0m'
echo "Running go tests..."
go test -cover -v -tags "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper exclude_graphdriver_btrfs" -timeout 60s `go list ./... | grep -v vendor` | sed ''/PASS/s//$(printf "${GREEN}PASS${RESET}")/'' | sed ''/FAIL/s//$(printf "${RED}FAIL${RESET}")/''
go test -cover -v -timeout 60s `go list ./... | grep -v vendor` | sed ''/PASS/s//$(printf "${GREEN}PASS${RESET}")/'' | sed ''/FAIL/s//$(printf "${RED}FAIL${RESET}")/''
GO_TEST_EXIT_CODE=${PIPESTATUS[0]}
if [[ $GO_TEST_EXIT_CODE -ne 0 ]]; then
exit $GO_TEST_EXIT_CODE

View File

@ -269,6 +269,9 @@ type BucketAttrs struct {
// The bucket's Cross-Origin Resource Sharing (CORS) configuration.
CORS []CORS
// The encryption configuration used by default for newly inserted objects.
Encryption *BucketEncryption
}
// Lifecycle is the lifecycle configuration for objects in the bucket.
@ -406,6 +409,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
Lifecycle: toLifecycle(b.Lifecycle),
RetentionPolicy: rp,
CORS: toCORS(b.Cors),
Encryption: toBucketEncryption(b.Encryption),
}
acl := make([]ACLRule, len(b.Acl))
for i, rule := range b.Acl {
@ -470,10 +474,11 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
Lifecycle: toRawLifecycle(b.Lifecycle),
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
Cors: toRawCORS(b.CORS),
Encryption: b.Encryption.toRawBucketEncryption(),
}
}
// The bucket's Cross-Origin Resource Sharing (CORS) configuration.
// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration.
type CORS struct {
// MaxAge is the value to return in the Access-Control-Max-Age
// header used in preflight responses.
@ -495,14 +500,23 @@ type CORS struct {
ResponseHeaders []string
}
// BucketEncryption is a bucket's encryption configuration.
type BucketEncryption struct {
// A Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt
// objects inserted into this bucket, if no encryption method is specified.
// The key's location must be the same as the bucket's.
DefaultKMSKeyName string
}
type BucketAttrsToUpdate struct {
// VersioningEnabled, if set, updates whether the bucket uses versioning.
// If set, updates whether the bucket uses versioning.
VersioningEnabled optional.Bool
// RequesterPays, if set, updates whether the bucket is a Requester Pays bucket.
// If set, updates whether the bucket is a Requester Pays bucket.
RequesterPays optional.Bool
// RetentionPolicy, if set, updates the retention policy of the bucket. Using
// If set, updates the retention policy of the bucket. Using
// RetentionPolicy.RetentionPeriod = 0 will delete the existing policy.
//
// This feature is in private alpha release. It is not currently available to
@ -510,11 +524,15 @@ type BucketAttrsToUpdate struct {
// subject to any SLA or deprecation policy.
RetentionPolicy *RetentionPolicy
// CORS, if set, replaces the CORS configuration with a new configuration.
// When an empty slice is provided, all CORS policies are removed; when nil
// is provided, the value is ignored in the update.
// If set, replaces the CORS configuration with a new configuration.
// An empty (rather than nil) slice causes all CORS policies to be removed.
CORS []CORS
// If set, replaces the encryption configuration of the bucket. Using
// BucketEncryption.DefaultKMSKeyName = "" will delete the existing
// configuration.
Encryption *BucketEncryption
setLabels map[string]string
deleteLabels map[string]bool
}
@ -563,6 +581,14 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
ForceSendFields: []string{"RequesterPays"},
}
}
if ua.Encryption != nil {
if ua.Encryption.DefaultKMSKeyName == "" {
rb.NullFields = append(rb.NullFields, "Encryption")
rb.Encryption = nil
} else {
rb.Encryption = ua.Encryption.toRawBucketEncryption()
}
}
if ua.setLabels != nil || ua.deleteLabels != nil {
rb.Labels = map[string]string{}
for k, v := range ua.setLabels {
@ -788,6 +814,22 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
return l
}
func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption {
if e == nil {
return nil
}
return &raw.BucketEncryption{
DefaultKmsKeyName: e.DefaultKMSKeyName,
}
}
func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption {
if e == nil {
return nil
}
return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName}
}
// Objects returns an iterator over the objects in the bucket that match the Query q.
// If q is nil, no filtering is done.
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
@ -869,8 +911,6 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
return resp.NextPageToken, nil
}
// TODO(jbd): Add storage.buckets.update.
// Buckets returns an iterator over the buckets in the project. You may
// optionally set the iterator's Prefix field to restrict the list to buckets
// whose names begin with the prefix. By default, all buckets in the project

View File

@ -60,6 +60,15 @@ type Copier struct {
// ProgressFunc should return quickly without blocking.
ProgressFunc func(copiedBytes, totalBytes uint64)
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
// any.
//
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
// (via ObjectHandle.Key) on the destination object will result in an error when
// Run is called.
DestinationKMSKeyName string
dst, src *ObjectHandle
}
@ -74,6 +83,9 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
if err := c.dst.validate(); err != nil {
return nil, err
}
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
}
// Convert destination attributes to raw form, omitting the bucket.
// If the bucket is included but name or content-type aren't, the service
// returns a 400 with "Required" as the only message. Omitting the bucket
@ -100,6 +112,9 @@ func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.Rewr
if c.RewriteToken != "" {
call.RewriteToken(c.RewriteToken)
}
if c.DestinationKMSKeyName != "" {
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
}
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}

View File

@ -19,6 +19,9 @@ Google Cloud Storage stores data in named objects, which are grouped into bucket
More information about Google Cloud Storage is available at
https://cloud.google.com/storage/docs.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
All of the methods of this package use exponential backoff to retry calls
that fail with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff.
@ -158,10 +161,5 @@ SignedURL for details.
// TODO: Handle error.
}
fmt.Println(url)
Authentication
See examples of authorization and authentication at
https://godoc.org/cloud.google.com/go#pkg-examples.
*/
package storage // import "cloud.google.com/go/storage"

View File

@ -22,6 +22,7 @@ import (
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
@ -74,11 +75,6 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
return nil, err
}
req = withContext(req, ctx)
if length < 0 && offset > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
} else if length > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
}
if o.userProject != "" {
req.Header.Set("X-Goog-User-Project", o.userProject)
}
@ -88,39 +84,57 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
return nil, err
}
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
// Define a function that initiates a Read with offset and length, assuming we
// have already read seen bytes.
reopen := func(seen int64) (*http.Response, error) {
start := offset + seen
if length < 0 && start > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
} else if length > 0 {
// The end character isn't affected by how many bytes we've seen.
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
}
}
if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
return nil
})
if err != nil {
return nil, err
}
if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
return nil
})
return res, nil
}
res, err := reopen(0)
if err != nil {
return nil, err
}
var size int64 // total size of object, even if a range was requested.
if res.StatusCode == http.StatusPartialContent {
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
@ -155,6 +169,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
cacheControl: res.Header.Get("Cache-Control"),
wantCRC: crc,
checkCRC: checkCRC,
reopen: reopen,
}, nil
}
@ -180,15 +195,16 @@ var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
type Reader struct {
body io.ReadCloser
remain, size int64
contentType string
contentEncoding string
cacheControl string
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
checkedCRC bool // did we check the CRC? (For tests.)
body io.ReadCloser
seen, remain, size int64
contentType string
contentEncoding string
cacheControl string
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
checkedCRC bool // did we check the CRC? (For tests.)
reopen func(seen int64) (*http.Response, error)
}
// Close closes the Reader. It must be called when done reading.
@ -197,7 +213,7 @@ func (r *Reader) Close() error {
}
func (r *Reader) Read(p []byte) (int, error) {
n, err := r.body.Read(p)
n, err := r.readWithRetry(p)
if r.remain != -1 {
r.remain -= int64(n)
}
@ -217,6 +233,35 @@ func (r *Reader) Read(p []byte) (int, error) {
return n, err
}
func (r *Reader) readWithRetry(p []byte) (int, error) {
n := 0
for len(p[n:]) > 0 {
m, err := r.body.Read(p[n:])
n += m
r.seen += int64(m)
if !shouldRetryRead(err) {
return n, err
}
// Read failed, but we will try again. Send a ranged read request that takes
// into account the number of bytes we've already seen.
res, err := r.reopen(r.seen)
if err != nil {
// reopen already retries
return n, err
}
r.body.Close()
r.body = res.Body
}
return n, nil
}
func shouldRetryRead(err error) bool {
if err == nil {
return false
}
return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2")
}
// Size returns the size of the object in bytes.
// The returned value is always the same and is not affected by
// calls to Read or Close.

View File

@ -722,6 +722,14 @@ type ObjectAttrs struct {
// encryption in Google Cloud Storage.
CustomerKeySHA256 string
// Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object,
// if the object is encrypted by such a key.
//
// Providing both a KMSKeyName and a customer-supplied encryption key (via
// ObjectHandle.Key) will result in an error when writing an object.
KMSKeyName string
// Prefix is set only for ObjectAttrs which represent synthetic "directory
// entries" when iterating over buckets using Query.Delimiter. See
// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
@ -779,6 +787,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
Metageneration: o.Metageneration,
StorageClass: o.StorageClass,
CustomerKeySHA256: sha256,
KMSKeyName: o.KmsKeyName,
Created: convertTime(o.TimeCreated),
Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated),

View File

@ -88,6 +88,9 @@ func (w *Writer) open() error {
if !utf8.ValidString(attrs.Name) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
}
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil {
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key")
}
pr, pw := io.Pipe()
w.pw = pw
w.opened = true
@ -119,6 +122,9 @@ func (w *Writer) open() error {
if w.ProgressFunc != nil {
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
}
if attrs.KMSKeyName != "" {
call.KmsKeyName(attrs.KMSKeyName)
}
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
w.mu.Lock()
w.err = err

21
vendor/github.com/Azure/go-ansiterm/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Microsoft Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

188
vendor/github.com/Azure/go-ansiterm/constants.go generated vendored Normal file
View File

@ -0,0 +1,188 @@
package ansiterm
const LogEnv = "DEBUG_TERMINAL"
// ANSI constants
// References:
// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
// -- http://en.wikipedia.org/wiki/ANSI_escape_code
// -- http://vt100.net/emu/dec_ansi_parser
// -- http://vt100.net/emu/vt500_parser.svg
// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
// -- http://www.inwap.com/pdp10/ansicode.txt
const (
// ECMA-48 Set Graphics Rendition
// Note:
// -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
// -- Fonts could possibly be supported via SetCurrentConsoleFontEx
// -- Windows does not expose the per-window cursor (i.e., caret) blink times
ANSI_SGR_RESET = 0
ANSI_SGR_BOLD = 1
ANSI_SGR_DIM = 2
_ANSI_SGR_ITALIC = 3
ANSI_SGR_UNDERLINE = 4
_ANSI_SGR_BLINKSLOW = 5
_ANSI_SGR_BLINKFAST = 6
ANSI_SGR_REVERSE = 7
_ANSI_SGR_INVISIBLE = 8
_ANSI_SGR_LINETHROUGH = 9
_ANSI_SGR_FONT_00 = 10
_ANSI_SGR_FONT_01 = 11
_ANSI_SGR_FONT_02 = 12
_ANSI_SGR_FONT_03 = 13
_ANSI_SGR_FONT_04 = 14
_ANSI_SGR_FONT_05 = 15
_ANSI_SGR_FONT_06 = 16
_ANSI_SGR_FONT_07 = 17
_ANSI_SGR_FONT_08 = 18
_ANSI_SGR_FONT_09 = 19
_ANSI_SGR_FONT_10 = 20
_ANSI_SGR_DOUBLEUNDERLINE = 21
ANSI_SGR_BOLD_DIM_OFF = 22
_ANSI_SGR_ITALIC_OFF = 23
ANSI_SGR_UNDERLINE_OFF = 24
_ANSI_SGR_BLINK_OFF = 25
_ANSI_SGR_RESERVED_00 = 26
ANSI_SGR_REVERSE_OFF = 27
_ANSI_SGR_INVISIBLE_OFF = 28
_ANSI_SGR_LINETHROUGH_OFF = 29
ANSI_SGR_FOREGROUND_BLACK = 30
ANSI_SGR_FOREGROUND_RED = 31
ANSI_SGR_FOREGROUND_GREEN = 32
ANSI_SGR_FOREGROUND_YELLOW = 33
ANSI_SGR_FOREGROUND_BLUE = 34
ANSI_SGR_FOREGROUND_MAGENTA = 35
ANSI_SGR_FOREGROUND_CYAN = 36
ANSI_SGR_FOREGROUND_WHITE = 37
_ANSI_SGR_RESERVED_01 = 38
ANSI_SGR_FOREGROUND_DEFAULT = 39
ANSI_SGR_BACKGROUND_BLACK = 40
ANSI_SGR_BACKGROUND_RED = 41
ANSI_SGR_BACKGROUND_GREEN = 42
ANSI_SGR_BACKGROUND_YELLOW = 43
ANSI_SGR_BACKGROUND_BLUE = 44
ANSI_SGR_BACKGROUND_MAGENTA = 45
ANSI_SGR_BACKGROUND_CYAN = 46
ANSI_SGR_BACKGROUND_WHITE = 47
_ANSI_SGR_RESERVED_02 = 48
ANSI_SGR_BACKGROUND_DEFAULT = 49
// 50 - 65: Unsupported
ANSI_MAX_CMD_LENGTH = 4096
MAX_INPUT_EVENTS = 128
DEFAULT_WIDTH = 80
DEFAULT_HEIGHT = 24
ANSI_BEL = 0x07
ANSI_BACKSPACE = 0x08
ANSI_TAB = 0x09
ANSI_LINE_FEED = 0x0A
ANSI_VERTICAL_TAB = 0x0B
ANSI_FORM_FEED = 0x0C
ANSI_CARRIAGE_RETURN = 0x0D
ANSI_ESCAPE_PRIMARY = 0x1B
ANSI_ESCAPE_SECONDARY = 0x5B
ANSI_OSC_STRING_ENTRY = 0x5D
ANSI_COMMAND_FIRST = 0x40
ANSI_COMMAND_LAST = 0x7E
DCS_ENTRY = 0x90
CSI_ENTRY = 0x9B
OSC_STRING = 0x9D
ANSI_PARAMETER_SEP = ";"
ANSI_CMD_G0 = '('
ANSI_CMD_G1 = ')'
ANSI_CMD_G2 = '*'
ANSI_CMD_G3 = '+'
ANSI_CMD_DECPNM = '>'
ANSI_CMD_DECPAM = '='
ANSI_CMD_OSC = ']'
ANSI_CMD_STR_TERM = '\\'
KEY_CONTROL_PARAM_2 = ";2"
KEY_CONTROL_PARAM_3 = ";3"
KEY_CONTROL_PARAM_4 = ";4"
KEY_CONTROL_PARAM_5 = ";5"
KEY_CONTROL_PARAM_6 = ";6"
KEY_CONTROL_PARAM_7 = ";7"
KEY_CONTROL_PARAM_8 = ";8"
KEY_ESC_CSI = "\x1B["
KEY_ESC_N = "\x1BN"
KEY_ESC_O = "\x1BO"
FILL_CHARACTER = ' '
)
func getByteRange(start byte, end byte) []byte {
bytes := make([]byte, 0, 32)
for i := start; i <= end; i++ {
bytes = append(bytes, byte(i))
}
return bytes
}
var toGroundBytes = getToGroundBytes()
var executors = getExecuteBytes()
// SPACE 20+A0 hex Always and everywhere a blank space
// Intermediate 20-2F hex !"#$%&'()*+,-./
var intermeds = getByteRange(0x20, 0x2F)
// Parameters 30-3F hex 0123456789:;<=>?
// CSI Parameters 30-39, 3B hex 0123456789;
var csiParams = getByteRange(0x30, 0x3F)
var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
var upperCase = getByteRange(0x40, 0x5F)
// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~
var lowerCase = getByteRange(0x60, 0x7E)
// Alphabetics 40-7E hex (all of upper and lower case)
var alphabetics = append(upperCase, lowerCase...)
var printables = getByteRange(0x20, 0x7F)
var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
var escapeToGroundBytes = getEscapeToGroundBytes()
// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
// byte ranges below
func getEscapeToGroundBytes() []byte {
escapeToGroundBytes := getByteRange(0x30, 0x4F)
escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
return escapeToGroundBytes
}
func getExecuteBytes() []byte {
executeBytes := getByteRange(0x00, 0x17)
executeBytes = append(executeBytes, 0x19)
executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
return executeBytes
}
func getToGroundBytes() []byte {
groundBytes := []byte{0x18}
groundBytes = append(groundBytes, 0x1A)
groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
groundBytes = append(groundBytes, 0x99)
groundBytes = append(groundBytes, 0x9A)
groundBytes = append(groundBytes, 0x9C)
return groundBytes
}
// Delete 7F hex Always and everywhere ignored
// C1 Control 80-9F hex 32 additional control characters
// G1 Displayable A1-FE hex 94 additional displayable characters
// Special A0+FF hex Same as SPACE and DELETE

7
vendor/github.com/Azure/go-ansiterm/context.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
package ansiterm
type ansiContext struct {
currentChar byte
paramBuffer []byte
interBuffer []byte
}

49
vendor/github.com/Azure/go-ansiterm/csi_entry_state.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
package ansiterm
type csiEntryState struct {
baseState
}
func (csiState csiEntryState) Handle(b byte) (s state, e error) {
csiState.parser.logf("CsiEntry::Handle %#x", b)
nextState, err := csiState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case sliceContains(alphabetics, b):
return csiState.parser.ground, nil
case sliceContains(csiCollectables, b):
return csiState.parser.csiParam, nil
case sliceContains(executors, b):
return csiState, csiState.parser.execute()
}
return csiState, nil
}
func (csiState csiEntryState) Transition(s state) error {
csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
csiState.baseState.Transition(s)
switch s {
case csiState.parser.ground:
return csiState.parser.csiDispatch()
case csiState.parser.csiParam:
switch {
case sliceContains(csiParams, csiState.parser.context.currentChar):
csiState.parser.collectParam()
case sliceContains(intermeds, csiState.parser.context.currentChar):
csiState.parser.collectInter()
}
}
return nil
}
func (csiState csiEntryState) Enter() error {
csiState.parser.clear()
return nil
}

38
vendor/github.com/Azure/go-ansiterm/csi_param_state.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package ansiterm
type csiParamState struct {
baseState
}
func (csiState csiParamState) Handle(b byte) (s state, e error) {
csiState.parser.logf("CsiParam::Handle %#x", b)
nextState, err := csiState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case sliceContains(alphabetics, b):
return csiState.parser.ground, nil
case sliceContains(csiCollectables, b):
csiState.parser.collectParam()
return csiState, nil
case sliceContains(executors, b):
return csiState, csiState.parser.execute()
}
return csiState, nil
}
func (csiState csiParamState) Transition(s state) error {
csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
csiState.baseState.Transition(s)
switch s {
case csiState.parser.ground:
return csiState.parser.csiDispatch()
}
return nil
}

View File

@ -0,0 +1,36 @@
package ansiterm
type escapeIntermediateState struct {
baseState
}
func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
escState.parser.logf("escapeIntermediateState::Handle %#x", b)
nextState, err := escState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case sliceContains(intermeds, b):
return escState, escState.parser.collectInter()
case sliceContains(executors, b):
return escState, escState.parser.execute()
case sliceContains(escapeIntermediateToGroundBytes, b):
return escState.parser.ground, nil
}
return escState, nil
}
func (escState escapeIntermediateState) Transition(s state) error {
escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
escState.baseState.Transition(s)
switch s {
case escState.parser.ground:
return escState.parser.escDispatch()
}
return nil
}

47
vendor/github.com/Azure/go-ansiterm/escape_state.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
package ansiterm
type escapeState struct {
baseState
}
func (escState escapeState) Handle(b byte) (s state, e error) {
escState.parser.logf("escapeState::Handle %#x", b)
nextState, err := escState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case b == ANSI_ESCAPE_SECONDARY:
return escState.parser.csiEntry, nil
case b == ANSI_OSC_STRING_ENTRY:
return escState.parser.oscString, nil
case sliceContains(executors, b):
return escState, escState.parser.execute()
case sliceContains(escapeToGroundBytes, b):
return escState.parser.ground, nil
case sliceContains(intermeds, b):
return escState.parser.escapeIntermediate, nil
}
return escState, nil
}
func (escState escapeState) Transition(s state) error {
escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name())
escState.baseState.Transition(s)
switch s {
case escState.parser.ground:
return escState.parser.escDispatch()
case escState.parser.escapeIntermediate:
return escState.parser.collectInter()
}
return nil
}
func (escState escapeState) Enter() error {
escState.parser.clear()
return nil
}

90
vendor/github.com/Azure/go-ansiterm/event_handler.go generated vendored Normal file
View File

@ -0,0 +1,90 @@
package ansiterm
type AnsiEventHandler interface {
// Print
Print(b byte) error
// Execute C0 commands
Execute(b byte) error
// CUrsor Up
CUU(int) error
// CUrsor Down
CUD(int) error
// CUrsor Forward
CUF(int) error
// CUrsor Backward
CUB(int) error
// Cursor to Next Line
CNL(int) error
// Cursor to Previous Line
CPL(int) error
// Cursor Horizontal position Absolute
CHA(int) error
// Vertical line Position Absolute
VPA(int) error
// CUrsor Position
CUP(int, int) error
// Horizontal and Vertical Position (depends on PUM)
HVP(int, int) error
// Text Cursor Enable Mode
DECTCEM(bool) error
// Origin Mode
DECOM(bool) error
// 132 Column Mode
DECCOLM(bool) error
// Erase in Display
ED(int) error
// Erase in Line
EL(int) error
// Insert Line
IL(int) error
// Delete Line
DL(int) error
// Insert Character
ICH(int) error
// Delete Character
DCH(int) error
// Set Graphics Rendition
SGR([]int) error
// Pan Down
SU(int) error
// Pan Up
SD(int) error
// Device Attributes
DA([]string) error
// Set Top and Bottom Margins
DECSTBM(int, int) error
// Index
IND() error
// Reverse Index
RI() error
// Flush updates from previous commands
Flush() error
}

24
vendor/github.com/Azure/go-ansiterm/ground_state.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
package ansiterm
type groundState struct {
baseState
}
func (gs groundState) Handle(b byte) (s state, e error) {
gs.parser.context.currentChar = b
nextState, err := gs.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case sliceContains(printables, b):
return gs, gs.parser.print()
case sliceContains(executors, b):
return gs, gs.parser.execute()
}
return gs, nil
}

View File

@ -0,0 +1,31 @@
package ansiterm
type oscStringState struct {
baseState
}
func (oscState oscStringState) Handle(b byte) (s state, e error) {
oscState.parser.logf("OscString::Handle %#x", b)
nextState, err := oscState.baseState.Handle(b)
if nextState != nil || err != nil {
return nextState, err
}
switch {
case isOscStringTerminator(b):
return oscState.parser.ground, nil
}
return oscState, nil
}
// See below for OSC string terminators for linux
// http://man7.org/linux/man-pages/man4/console_codes.4.html
func isOscStringTerminator(b byte) bool {
if b == ANSI_BEL || b == 0x5C {
return true
}
return false
}

151
vendor/github.com/Azure/go-ansiterm/parser.go generated vendored Normal file
View File

@ -0,0 +1,151 @@
package ansiterm
import (
"errors"
"log"
"os"
)
type AnsiParser struct {
currState state
eventHandler AnsiEventHandler
context *ansiContext
csiEntry state
csiParam state
dcsEntry state
escape state
escapeIntermediate state
error state
ground state
oscString state
stateMap []state
logf func(string, ...interface{})
}
type Option func(*AnsiParser)
func WithLogf(f func(string, ...interface{})) Option {
return func(ap *AnsiParser) {
ap.logf = f
}
}
func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser {
ap := &AnsiParser{
eventHandler: evtHandler,
context: &ansiContext{},
}
for _, o := range opts {
o(ap)
}
if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
logFile, _ := os.Create("ansiParser.log")
logger := log.New(logFile, "", log.LstdFlags)
if ap.logf != nil {
l := ap.logf
ap.logf = func(s string, v ...interface{}) {
l(s, v...)
logger.Printf(s, v...)
}
} else {
ap.logf = logger.Printf
}
}
if ap.logf == nil {
ap.logf = func(string, ...interface{}) {}
}
ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}}
ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}}
ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}}
ap.escape = escapeState{baseState{name: "Escape", parser: ap}}
ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}}
ap.error = errorState{baseState{name: "Error", parser: ap}}
ap.ground = groundState{baseState{name: "Ground", parser: ap}}
ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}}
ap.stateMap = []state{
ap.csiEntry,
ap.csiParam,
ap.dcsEntry,
ap.escape,
ap.escapeIntermediate,
ap.error,
ap.ground,
ap.oscString,
}
ap.currState = getState(initialState, ap.stateMap)
ap.logf("CreateParser: parser %p", ap)
return ap
}
func getState(name string, states []state) state {
for _, el := range states {
if el.Name() == name {
return el
}
}
return nil
}
func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
for i, b := range bytes {
if err := ap.handle(b); err != nil {
return i, err
}
}
return len(bytes), ap.eventHandler.Flush()
}
func (ap *AnsiParser) handle(b byte) error {
ap.context.currentChar = b
newState, err := ap.currState.Handle(b)
if err != nil {
return err
}
if newState == nil {
ap.logf("WARNING: newState is nil")
return errors.New("New state of 'nil' is invalid.")
}
if newState != ap.currState {
if err := ap.changeState(newState); err != nil {
return err
}
}
return nil
}
func (ap *AnsiParser) changeState(newState state) error {
ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
// Exit old state
if err := ap.currState.Exit(); err != nil {
ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
return err
}
// Perform transition action
if err := ap.currState.Transition(newState); err != nil {
ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
return err
}
// Enter new state
if err := newState.Enter(); err != nil {
ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err)
return err
}
ap.currState = newState
return nil
}

View File

@ -0,0 +1,99 @@
package ansiterm
import (
"strconv"
)
func parseParams(bytes []byte) ([]string, error) {
paramBuff := make([]byte, 0, 0)
params := []string{}
for _, v := range bytes {
if v == ';' {
if len(paramBuff) > 0 {
// Completed parameter, append it to the list
s := string(paramBuff)
params = append(params, s)
paramBuff = make([]byte, 0, 0)
}
} else {
paramBuff = append(paramBuff, v)
}
}
// Last parameter may not be terminated with ';'
if len(paramBuff) > 0 {
s := string(paramBuff)
params = append(params, s)
}
return params, nil
}
func parseCmd(context ansiContext) (string, error) {
return string(context.currentChar), nil
}
func getInt(params []string, dflt int) int {
i := getInts(params, 1, dflt)[0]
return i
}
func getInts(params []string, minCount int, dflt int) []int {
ints := []int{}
for _, v := range params {
i, _ := strconv.Atoi(v)
// Zero is mapped to the default value in VT100.
if i == 0 {
i = dflt
}
ints = append(ints, i)
}
if len(ints) < minCount {
remaining := minCount - len(ints)
for i := 0; i < remaining; i++ {
ints = append(ints, dflt)
}
}
return ints
}
func (ap *AnsiParser) modeDispatch(param string, set bool) error {
switch param {
case "?3":
return ap.eventHandler.DECCOLM(set)
case "?6":
return ap.eventHandler.DECOM(set)
case "?25":
return ap.eventHandler.DECTCEM(set)
}
return nil
}
func (ap *AnsiParser) hDispatch(params []string) error {
if len(params) == 1 {
return ap.modeDispatch(params[0], true)
}
return nil
}
func (ap *AnsiParser) lDispatch(params []string) error {
if len(params) == 1 {
return ap.modeDispatch(params[0], false)
}
return nil
}
func getEraseParam(params []string) int {
param := getInt(params, 0)
if param < 0 || 3 < param {
param = 0
}
return param
}

119
vendor/github.com/Azure/go-ansiterm/parser_actions.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
package ansiterm
func (ap *AnsiParser) collectParam() error {
currChar := ap.context.currentChar
ap.logf("collectParam %#x", currChar)
ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
return nil
}
func (ap *AnsiParser) collectInter() error {
currChar := ap.context.currentChar
ap.logf("collectInter %#x", currChar)
ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
return nil
}
func (ap *AnsiParser) escDispatch() error {
cmd, _ := parseCmd(*ap.context)
intermeds := ap.context.interBuffer
ap.logf("escDispatch currentChar: %#x", ap.context.currentChar)
ap.logf("escDispatch: %v(%v)", cmd, intermeds)
switch cmd {
case "D": // IND
return ap.eventHandler.IND()
case "E": // NEL, equivalent to CRLF
err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
if err == nil {
err = ap.eventHandler.Execute(ANSI_LINE_FEED)
}
return err
case "M": // RI
return ap.eventHandler.RI()
}
return nil
}
func (ap *AnsiParser) csiDispatch() error {
cmd, _ := parseCmd(*ap.context)
params, _ := parseParams(ap.context.paramBuffer)
ap.logf("Parsed params: %v with length: %d", params, len(params))
ap.logf("csiDispatch: %v(%v)", cmd, params)
switch cmd {
case "@":
return ap.eventHandler.ICH(getInt(params, 1))
case "A":
return ap.eventHandler.CUU(getInt(params, 1))
case "B":
return ap.eventHandler.CUD(getInt(params, 1))
case "C":
return ap.eventHandler.CUF(getInt(params, 1))
case "D":
return ap.eventHandler.CUB(getInt(params, 1))
case "E":
return ap.eventHandler.CNL(getInt(params, 1))
case "F":
return ap.eventHandler.CPL(getInt(params, 1))
case "G":
return ap.eventHandler.CHA(getInt(params, 1))
case "H":
ints := getInts(params, 2, 1)
x, y := ints[0], ints[1]
return ap.eventHandler.CUP(x, y)
case "J":
param := getEraseParam(params)
return ap.eventHandler.ED(param)
case "K":
param := getEraseParam(params)
return ap.eventHandler.EL(param)
case "L":
return ap.eventHandler.IL(getInt(params, 1))
case "M":
return ap.eventHandler.DL(getInt(params, 1))
case "P":
return ap.eventHandler.DCH(getInt(params, 1))
case "S":
return ap.eventHandler.SU(getInt(params, 1))
case "T":
return ap.eventHandler.SD(getInt(params, 1))
case "c":
return ap.eventHandler.DA(params)
case "d":
return ap.eventHandler.VPA(getInt(params, 1))
case "f":
ints := getInts(params, 2, 1)
x, y := ints[0], ints[1]
return ap.eventHandler.HVP(x, y)
case "h":
return ap.hDispatch(params)
case "l":
return ap.lDispatch(params)
case "m":
return ap.eventHandler.SGR(getInts(params, 1, 0))
case "r":
ints := getInts(params, 2, 1)
top, bottom := ints[0], ints[1]
return ap.eventHandler.DECSTBM(top, bottom)
default:
ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)
return nil
}
}
func (ap *AnsiParser) print() error {
return ap.eventHandler.Print(ap.context.currentChar)
}
func (ap *AnsiParser) clear() error {
ap.context = &ansiContext{}
return nil
}
func (ap *AnsiParser) execute() error {
return ap.eventHandler.Execute(ap.context.currentChar)
}

71
vendor/github.com/Azure/go-ansiterm/states.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
package ansiterm
type stateID int
type state interface {
Enter() error
Exit() error
Handle(byte) (state, error)
Name() string
Transition(state) error
}
type baseState struct {
name string
parser *AnsiParser
}
func (base baseState) Enter() error {
return nil
}
func (base baseState) Exit() error {
return nil
}
func (base baseState) Handle(b byte) (s state, e error) {
switch {
case b == CSI_ENTRY:
return base.parser.csiEntry, nil
case b == DCS_ENTRY:
return base.parser.dcsEntry, nil
case b == ANSI_ESCAPE_PRIMARY:
return base.parser.escape, nil
case b == OSC_STRING:
return base.parser.oscString, nil
case sliceContains(toGroundBytes, b):
return base.parser.ground, nil
}
return nil, nil
}
func (base baseState) Name() string {
return base.name
}
func (base baseState) Transition(s state) error {
if s == base.parser.ground {
execBytes := []byte{0x18}
execBytes = append(execBytes, 0x1A)
execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
execBytes = append(execBytes, 0x99)
execBytes = append(execBytes, 0x9A)
if sliceContains(execBytes, base.parser.context.currentChar) {
return base.parser.execute()
}
}
return nil
}
type dcsEntryState struct {
baseState
}
type errorState struct {
baseState
}

21
vendor/github.com/Azure/go-ansiterm/utilities.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package ansiterm
import (
"strconv"
)
func sliceContains(bytes []byte, b byte) bool {
for _, v := range bytes {
if v == b {
return true
}
}
return false
}
func convertBytesToInteger(bytes []byte) int {
s := string(bytes)
i, _ := strconv.Atoi(s)
return i
}

182
vendor/github.com/Azure/go-ansiterm/winterm/ansi.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
// +build windows
package winterm
import (
"fmt"
"os"
"strconv"
"strings"
"syscall"
"github.com/Azure/go-ansiterm"
)
// Windows keyboard constants
// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
const (
VK_PRIOR = 0x21 // PAGE UP key
VK_NEXT = 0x22 // PAGE DOWN key
VK_END = 0x23 // END key
VK_HOME = 0x24 // HOME key
VK_LEFT = 0x25 // LEFT ARROW key
VK_UP = 0x26 // UP ARROW key
VK_RIGHT = 0x27 // RIGHT ARROW key
VK_DOWN = 0x28 // DOWN ARROW key
VK_SELECT = 0x29 // SELECT key
VK_PRINT = 0x2A // PRINT key
VK_EXECUTE = 0x2B // EXECUTE key
VK_SNAPSHOT = 0x2C // PRINT SCREEN key
VK_INSERT = 0x2D // INS key
VK_DELETE = 0x2E // DEL key
VK_HELP = 0x2F // HELP key
VK_F1 = 0x70 // F1 key
VK_F2 = 0x71 // F2 key
VK_F3 = 0x72 // F3 key
VK_F4 = 0x73 // F4 key
VK_F5 = 0x74 // F5 key
VK_F6 = 0x75 // F6 key
VK_F7 = 0x76 // F7 key
VK_F8 = 0x77 // F8 key
VK_F9 = 0x78 // F9 key
VK_F10 = 0x79 // F10 key
VK_F11 = 0x7A // F11 key
VK_F12 = 0x7B // F12 key
RIGHT_ALT_PRESSED = 0x0001
LEFT_ALT_PRESSED = 0x0002
RIGHT_CTRL_PRESSED = 0x0004
LEFT_CTRL_PRESSED = 0x0008
SHIFT_PRESSED = 0x0010
NUMLOCK_ON = 0x0020
SCROLLLOCK_ON = 0x0040
CAPSLOCK_ON = 0x0080
ENHANCED_KEY = 0x0100
)
type ansiCommand struct {
CommandBytes []byte
Command string
Parameters []string
IsSpecial bool
}
func newAnsiCommand(command []byte) *ansiCommand {
if isCharacterSelectionCmdChar(command[1]) {
// Is Character Set Selection commands
return &ansiCommand{
CommandBytes: command,
Command: string(command),
IsSpecial: true,
}
}
// last char is command character
lastCharIndex := len(command) - 1
ac := &ansiCommand{
CommandBytes: command,
Command: string(command[lastCharIndex]),
IsSpecial: false,
}
// more than a single escape
if lastCharIndex != 0 {
start := 1
// skip if double char escape sequence
if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
start++
}
// convert this to GetNextParam method
ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
}
return ac
}
func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
if index < 0 || index >= len(ac.Parameters) {
return defaultValue
}
param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
if err != nil {
return defaultValue
}
return int16(param)
}
func (ac *ansiCommand) String() string {
return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
bytesToHex(ac.CommandBytes),
ac.Command,
strings.Join(ac.Parameters, "\",\""))
}
// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
func isAnsiCommandChar(b byte) bool {
switch {
case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
return true
case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
// non-CSI escape sequence terminator
return true
case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
// String escape sequence terminator
return true
}
return false
}
func isXtermOscSequence(command []byte, current byte) bool {
return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
}
func isCharacterSelectionCmdChar(b byte) bool {
return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
}
// bytesToHex converts a slice of bytes to a human-readable string.
func bytesToHex(b []byte) string {
hex := make([]string, len(b))
for i, ch := range b {
hex[i] = fmt.Sprintf("%X", ch)
}
return strings.Join(hex, "")
}
// ensureInRange adjusts the passed value, if necessary, to ensure it is within
// the passed min / max range.
func ensureInRange(n int16, min int16, max int16) int16 {
if n < min {
return min
} else if n > max {
return max
} else {
return n
}
}
func GetStdFile(nFile int) (*os.File, uintptr) {
var file *os.File
switch nFile {
case syscall.STD_INPUT_HANDLE:
file = os.Stdin
case syscall.STD_OUTPUT_HANDLE:
file = os.Stdout
case syscall.STD_ERROR_HANDLE:
file = os.Stderr
default:
panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
}
fd, err := syscall.GetStdHandle(nFile)
if err != nil {
panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err))
}
return file, uintptr(fd)
}

327
vendor/github.com/Azure/go-ansiterm/winterm/api.go generated vendored Normal file
View File

@ -0,0 +1,327 @@
// +build windows
package winterm
import (
"fmt"
"syscall"
"unsafe"
)
//===========================================================================================================
// IMPORTANT NOTE:
//
// The methods below make extensive use of the "unsafe" package to obtain the required pointers.
// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
// variables) the pointers reference *before* the API completes.
//
// As a result, in those cases, the code must hint that the variables remain in active by invoking the
// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
// require unsafe pointers.
//
// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
// the garbage collector the variables remain in use if:
//
// -- The value is not a pointer (e.g., int32, struct)
// -- The value is not referenced by the method after passing the pointer to Windows
//
// See http://golang.org/doc/go1.3.
//===========================================================================================================
var (
kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo")
setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo")
setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition")
setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode")
getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute")
setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo")
writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW")
readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW")
waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject")
)
// Windows Console constants
const (
// Console modes
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
ENABLE_PROCESSED_INPUT = 0x0001
ENABLE_LINE_INPUT = 0x0002
ENABLE_ECHO_INPUT = 0x0004
ENABLE_WINDOW_INPUT = 0x0008
ENABLE_MOUSE_INPUT = 0x0010
ENABLE_INSERT_MODE = 0x0020
ENABLE_QUICK_EDIT_MODE = 0x0040
ENABLE_EXTENDED_FLAGS = 0x0080
ENABLE_AUTO_POSITION = 0x0100
ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
ENABLE_PROCESSED_OUTPUT = 0x0001
ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
DISABLE_NEWLINE_AUTO_RETURN = 0x0008
ENABLE_LVB_GRID_WORLDWIDE = 0x0010
// Character attributes
// Note:
// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
// Clearing all foreground or background colors results in black; setting all creates white.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
FOREGROUND_BLUE uint16 = 0x0001
FOREGROUND_GREEN uint16 = 0x0002
FOREGROUND_RED uint16 = 0x0004
FOREGROUND_INTENSITY uint16 = 0x0008
FOREGROUND_MASK uint16 = 0x000F
BACKGROUND_BLUE uint16 = 0x0010
BACKGROUND_GREEN uint16 = 0x0020
BACKGROUND_RED uint16 = 0x0040
BACKGROUND_INTENSITY uint16 = 0x0080
BACKGROUND_MASK uint16 = 0x00F0
COMMON_LVB_MASK uint16 = 0xFF00
COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
COMMON_LVB_UNDERSCORE uint16 = 0x8000
// Input event types
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
KEY_EVENT = 0x0001
MOUSE_EVENT = 0x0002
WINDOW_BUFFER_SIZE_EVENT = 0x0004
MENU_EVENT = 0x0008
FOCUS_EVENT = 0x0010
// WaitForSingleObject return codes
WAIT_ABANDONED = 0x00000080
WAIT_FAILED = 0xFFFFFFFF
WAIT_SIGNALED = 0x0000000
WAIT_TIMEOUT = 0x00000102
// WaitForSingleObject wait duration
WAIT_INFINITE = 0xFFFFFFFF
WAIT_ONE_SECOND = 1000
WAIT_HALF_SECOND = 500
WAIT_QUARTER_SECOND = 250
)
// Windows API Console types
// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
type (
CHAR_INFO struct {
UnicodeChar uint16
Attributes uint16
}
CONSOLE_CURSOR_INFO struct {
Size uint32
Visible int32
}
CONSOLE_SCREEN_BUFFER_INFO struct {
Size COORD
CursorPosition COORD
Attributes uint16
Window SMALL_RECT
MaximumWindowSize COORD
}
COORD struct {
X int16
Y int16
}
SMALL_RECT struct {
Left int16
Top int16
Right int16
Bottom int16
}
// INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
INPUT_RECORD struct {
EventType uint16
KeyEvent KEY_EVENT_RECORD
}
KEY_EVENT_RECORD struct {
KeyDown int32
RepeatCount uint16
VirtualKeyCode uint16
VirtualScanCode uint16
UnicodeChar uint16
ControlKeyState uint32
}
WINDOW_BUFFER_SIZE struct {
Size COORD
}
)
// boolToBOOL converts a Go bool into a Windows int32.
func boolToBOOL(f bool) int32 {
if f {
return int32(1)
} else {
return int32(0)
}
}
// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
return checkError(r1, r2, err)
}
// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
return checkError(r1, r2, err)
}
// SetConsoleCursorPosition location of the console cursor.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
use(coord)
return checkError(r1, r2, err)
}
// GetConsoleMode gets the console mode for given file descriptor
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
func GetConsoleMode(handle uintptr) (mode uint32, err error) {
err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
return mode, err
}
// SetConsoleMode sets the console mode for given file descriptor
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
func SetConsoleMode(handle uintptr, mode uint32) error {
r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
use(mode)
return checkError(r1, r2, err)
}
// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
info := CONSOLE_SCREEN_BUFFER_INFO{}
err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
if err != nil {
return nil, err
}
return &info, nil
}
func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
use(scrollRect)
use(clipRect)
use(destOrigin)
use(char)
return checkError(r1, r2, err)
}
// SetConsoleScreenBufferSize sets the size of the console screen buffer.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
use(coord)
return checkError(r1, r2, err)
}
// SetConsoleTextAttribute sets the attributes of characters written to the
// console screen buffer by the WriteFile or WriteConsole function.
// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
use(attribute)
return checkError(r1, r2, err)
}
// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
// Note that the size and location must be within and no larger than the backing console screen buffer.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
use(isAbsolute)
use(rect)
return checkError(r1, r2, err)
}
// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
use(buffer)
use(bufferSize)
use(bufferCoord)
return checkError(r1, r2, err)
}
// ReadConsoleInput reads (and removes) data from the console input buffer.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
use(buffer)
return checkError(r1, r2, err)
}
// WaitForSingleObject waits for the passed handle to be signaled.
// It returns true if the handle was signaled; false otherwise.
// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
switch r1 {
case WAIT_ABANDONED, WAIT_TIMEOUT:
return false, nil
case WAIT_SIGNALED:
return true, nil
}
use(msWait)
return false, err
}
// String helpers
func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
}
func (coord COORD) String() string {
return fmt.Sprintf("%v,%v", coord.X, coord.Y)
}
func (rect SMALL_RECT) String() string {
return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
}
// checkError evaluates the results of a Windows API call and returns the error if it failed.
func checkError(r1, r2 uintptr, err error) error {
// Windows APIs return non-zero to indicate success
if r1 != 0 {
return nil
}
// Return the error if provided, otherwise default to EINVAL
if err != nil {
return err
}
return syscall.EINVAL
}
// coordToPointer converts a COORD into a uintptr (by fooling the type system).
func coordToPointer(c COORD) uintptr {
// Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
return uintptr(*((*uint32)(unsafe.Pointer(&c))))
}
// use is a no-op, but the compiler cannot see that it is.
// Calling use(p) ensures that p is kept live until that point.
func use(p interface{}) {}

View File

@ -0,0 +1,100 @@
// +build windows
package winterm
import "github.com/Azure/go-ansiterm"
const (
FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
)
// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
// request represented by the passed ANSI mode.
func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
switch ansiMode {
// Mode styles
case ansiterm.ANSI_SGR_BOLD:
windowsMode = windowsMode | FOREGROUND_INTENSITY
case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
windowsMode &^= FOREGROUND_INTENSITY
case ansiterm.ANSI_SGR_UNDERLINE:
windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
case ansiterm.ANSI_SGR_REVERSE:
inverted = true
case ansiterm.ANSI_SGR_REVERSE_OFF:
inverted = false
case ansiterm.ANSI_SGR_UNDERLINE_OFF:
windowsMode &^= COMMON_LVB_UNDERSCORE
// Foreground colors
case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
case ansiterm.ANSI_SGR_FOREGROUND_RED:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
// Background colors
case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
// Black with no intensity
windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
case ansiterm.ANSI_SGR_BACKGROUND_RED:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
}
return windowsMode, inverted
}
// invertAttributes inverts the foreground and background colors of a Windows attributes value
func invertAttributes(windowsMode uint16) uint16 {
return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
}

View File

@ -0,0 +1,101 @@
// +build windows
package winterm
const (
horizontal = iota
vertical
)
func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
if h.originMode {
sr := h.effectiveSr(info.Window)
return SMALL_RECT{
Top: sr.top,
Bottom: sr.bottom,
Left: 0,
Right: info.Size.X - 1,
}
} else {
return SMALL_RECT{
Top: info.Window.Top,
Bottom: info.Window.Bottom,
Left: 0,
Right: info.Size.X - 1,
}
}
}
// setCursorPosition sets the cursor to the specified position, bounded to the screen size
func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
position.X = ensureInRange(position.X, window.Left, window.Right)
position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
err := SetConsoleCursorPosition(h.fd, position)
if err != nil {
return err
}
h.logf("Cursor position set: (%d, %d)", position.X, position.Y)
return err
}
func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
return h.moveCursor(vertical, param)
}
func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
return h.moveCursor(horizontal, param)
}
func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
position := info.CursorPosition
switch moveMode {
case horizontal:
position.X += int16(param)
case vertical:
position.Y += int16(param)
}
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
position := info.CursorPosition
position.X = 0
position.Y += int16(param)
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
position := info.CursorPosition
position.X = int16(param) - 1
if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,84 @@
// +build windows
package winterm
import "github.com/Azure/go-ansiterm"
func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
// Ignore an invalid (negative area) request
if toCoord.Y < fromCoord.Y {
return nil
}
var err error
var coordStart = COORD{}
var coordEnd = COORD{}
xCurrent, yCurrent := fromCoord.X, fromCoord.Y
xEnd, yEnd := toCoord.X, toCoord.Y
// Clear any partial initial line
if xCurrent > 0 {
coordStart.X, coordStart.Y = xCurrent, yCurrent
coordEnd.X, coordEnd.Y = xEnd, yCurrent
err = h.clearRect(attributes, coordStart, coordEnd)
if err != nil {
return err
}
xCurrent = 0
yCurrent += 1
}
// Clear intervening rectangular section
if yCurrent < yEnd {
coordStart.X, coordStart.Y = xCurrent, yCurrent
coordEnd.X, coordEnd.Y = xEnd, yEnd-1
err = h.clearRect(attributes, coordStart, coordEnd)
if err != nil {
return err
}
xCurrent = 0
yCurrent = yEnd
}
// Clear remaining partial ending line
coordStart.X, coordStart.Y = xCurrent, yCurrent
coordEnd.X, coordEnd.Y = xEnd, yEnd
err = h.clearRect(attributes, coordStart, coordEnd)
if err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
width := toCoord.X - fromCoord.X + 1
height := toCoord.Y - fromCoord.Y + 1
size := uint32(width) * uint32(height)
if size <= 0 {
return nil
}
buffer := make([]CHAR_INFO, size)
char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
for i := 0; i < int(size); i++ {
buffer[i] = char
}
err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, &region)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,118 @@
// +build windows
package winterm
// effectiveSr gets the current effective scroll region in buffer coordinates
func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
if top >= bottom {
top = window.Top
bottom = window.Bottom
}
return scrollRegion{top: top, bottom: bottom}
}
func (h *windowsAnsiEventHandler) scrollUp(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
sr := h.effectiveSr(info.Window)
return h.scroll(param, sr, info)
}
func (h *windowsAnsiEventHandler) scrollDown(param int) error {
return h.scrollUp(-param)
}
func (h *windowsAnsiEventHandler) deleteLines(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
start := info.CursorPosition.Y
sr := h.effectiveSr(info.Window)
// Lines cannot be inserted or deleted outside the scrolling region.
if start >= sr.top && start <= sr.bottom {
sr.top = start
return h.scroll(param, sr, info)
} else {
return nil
}
}
func (h *windowsAnsiEventHandler) insertLines(param int) error {
return h.deleteLines(-param)
}
// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
// Copy from and clip to the scroll region (full buffer width)
scrollRect := SMALL_RECT{
Top: sr.top,
Bottom: sr.bottom,
Left: 0,
Right: info.Size.X - 1,
}
// Origin to which area should be copied
destOrigin := COORD{
X: 0,
Y: sr.top - int16(param),
}
char := CHAR_INFO{
UnicodeChar: ' ',
Attributes: h.attributes,
}
if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
return h.scrollLine(param, info.CursorPosition, info)
}
func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
return h.deleteCharacters(-param)
}
// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
// Copy from and clip to the scroll region (full buffer width)
scrollRect := SMALL_RECT{
Top: position.Y,
Bottom: position.Y,
Left: position.X,
Right: info.Size.X - 1,
}
// Origin to which area should be copied
destOrigin := COORD{
X: position.X - int16(columns),
Y: position.Y,
}
char := CHAR_INFO{
UnicodeChar: ' ',
Attributes: h.attributes,
}
if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,9 @@
// +build windows
package winterm
// AddInRange increments a value by the passed quantity while ensuring the values
// always remain within the supplied min / max range.
func addInRange(n int16, increment int16, min int16, max int16) int16 {
return ensureInRange(n+increment, min, max)
}

View File

@ -0,0 +1,743 @@
// +build windows
package winterm
import (
"bytes"
"log"
"os"
"strconv"
"github.com/Azure/go-ansiterm"
)
type windowsAnsiEventHandler struct {
fd uintptr
file *os.File
infoReset *CONSOLE_SCREEN_BUFFER_INFO
sr scrollRegion
buffer bytes.Buffer
attributes uint16
inverted bool
wrapNext bool
drewMarginByte bool
originMode bool
marginByte byte
curInfo *CONSOLE_SCREEN_BUFFER_INFO
curPos COORD
logf func(string, ...interface{})
}
type Option func(*windowsAnsiEventHandler)
func WithLogf(f func(string, ...interface{})) Option {
return func(w *windowsAnsiEventHandler) {
w.logf = f
}
}
func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler {
infoReset, err := GetConsoleScreenBufferInfo(fd)
if err != nil {
return nil
}
h := &windowsAnsiEventHandler{
fd: fd,
file: file,
infoReset: infoReset,
attributes: infoReset.Attributes,
}
for _, o := range opts {
o(h)
}
if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
logFile, _ := os.Create("winEventHandler.log")
logger := log.New(logFile, "", log.LstdFlags)
if h.logf != nil {
l := h.logf
h.logf = func(s string, v ...interface{}) {
l(s, v...)
logger.Printf(s, v...)
}
} else {
h.logf = logger.Printf
}
}
if h.logf == nil {
h.logf = func(string, ...interface{}) {}
}
return h
}
type scrollRegion struct {
top int16
bottom int16
}
// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
// current cursor position and scroll region settings, in which case it returns
// true. If no special handling is necessary, then it does nothing and returns
// false.
//
// In the false case, the caller should ensure that a carriage return
// and line feed are inserted or that the text is otherwise wrapped.
func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
if h.wrapNext {
if err := h.Flush(); err != nil {
return false, err
}
h.clearWrap()
}
pos, info, err := h.getCurrentInfo()
if err != nil {
return false, err
}
sr := h.effectiveSr(info.Window)
if pos.Y == sr.bottom {
// Scrolling is necessary. Let Windows automatically scroll if the scrolling region
// is the full window.
if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
if includeCR {
pos.X = 0
h.updatePos(pos)
}
return false, nil
}
// A custom scroll region is active. Scroll the window manually to simulate
// the LF.
if err := h.Flush(); err != nil {
return false, err
}
h.logf("Simulating LF inside scroll region")
if err := h.scrollUp(1); err != nil {
return false, err
}
if includeCR {
pos.X = 0
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return false, err
}
}
return true, nil
} else if pos.Y < info.Window.Bottom {
// Let Windows handle the LF.
pos.Y++
if includeCR {
pos.X = 0
}
h.updatePos(pos)
return false, nil
} else {
// The cursor is at the bottom of the screen but outside the scroll
// region. Skip the LF.
h.logf("Simulating LF outside scroll region")
if includeCR {
if err := h.Flush(); err != nil {
return false, err
}
pos.X = 0
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return false, err
}
}
return true, nil
}
}
// executeLF executes a LF without a CR.
func (h *windowsAnsiEventHandler) executeLF() error {
handled, err := h.simulateLF(false)
if err != nil {
return err
}
if !handled {
// Windows LF will reset the cursor column position. Write the LF
// and restore the cursor position.
pos, _, err := h.getCurrentInfo()
if err != nil {
return err
}
h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
if pos.X != 0 {
if err := h.Flush(); err != nil {
return err
}
h.logf("Resetting cursor position for LF without CR")
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return err
}
}
}
return nil
}
func (h *windowsAnsiEventHandler) Print(b byte) error {
if h.wrapNext {
h.buffer.WriteByte(h.marginByte)
h.clearWrap()
if _, err := h.simulateLF(true); err != nil {
return err
}
}
pos, info, err := h.getCurrentInfo()
if err != nil {
return err
}
if pos.X == info.Size.X-1 {
h.wrapNext = true
h.marginByte = b
} else {
pos.X++
h.updatePos(pos)
h.buffer.WriteByte(b)
}
return nil
}
func (h *windowsAnsiEventHandler) Execute(b byte) error {
switch b {
case ansiterm.ANSI_TAB:
h.logf("Execute(TAB)")
// Move to the next tab stop, but preserve auto-wrap if already set.
if !h.wrapNext {
pos, info, err := h.getCurrentInfo()
if err != nil {
return err
}
pos.X = (pos.X + 8) - pos.X%8
if pos.X >= info.Size.X {
pos.X = info.Size.X - 1
}
if err := h.Flush(); err != nil {
return err
}
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return err
}
}
return nil
case ansiterm.ANSI_BEL:
h.buffer.WriteByte(ansiterm.ANSI_BEL)
return nil
case ansiterm.ANSI_BACKSPACE:
if h.wrapNext {
if err := h.Flush(); err != nil {
return err
}
h.clearWrap()
}
pos, _, err := h.getCurrentInfo()
if err != nil {
return err
}
if pos.X > 0 {
pos.X--
h.updatePos(pos)
h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
}
return nil
case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
// Treat as true LF.
return h.executeLF()
case ansiterm.ANSI_LINE_FEED:
// Simulate a CR and LF for now since there is no way in go-ansiterm
// to tell if the LF should include CR (and more things break when it's
// missing than when it's incorrectly added).
handled, err := h.simulateLF(true)
if handled || err != nil {
return err
}
return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
case ansiterm.ANSI_CARRIAGE_RETURN:
if h.wrapNext {
if err := h.Flush(); err != nil {
return err
}
h.clearWrap()
}
pos, _, err := h.getCurrentInfo()
if err != nil {
return err
}
if pos.X != 0 {
pos.X = 0
h.updatePos(pos)
h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
}
return nil
default:
return nil
}
}
func (h *windowsAnsiEventHandler) CUU(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("CUU: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorVertical(-param)
}
func (h *windowsAnsiEventHandler) CUD(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("CUD: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorVertical(param)
}
func (h *windowsAnsiEventHandler) CUF(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("CUF: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorHorizontal(param)
}
func (h *windowsAnsiEventHandler) CUB(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("CUB: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorHorizontal(-param)
}
func (h *windowsAnsiEventHandler) CNL(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("CNL: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorLine(param)
}
func (h *windowsAnsiEventHandler) CPL(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("CPL: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorLine(-param)
}
func (h *windowsAnsiEventHandler) CHA(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("CHA: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.moveCursorColumn(param)
}
func (h *windowsAnsiEventHandler) VPA(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("VPA: [[%d]]", param)
h.clearWrap()
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
window := h.getCursorWindow(info)
position := info.CursorPosition
position.Y = window.Top + int16(param) - 1
return h.setCursorPosition(position, window)
}
func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("CUP: [[%d %d]]", row, col)
h.clearWrap()
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
window := h.getCursorWindow(info)
position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
return h.setCursorPosition(position, window)
}
func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("HVP: [[%d %d]]", row, col)
h.clearWrap()
return h.CUP(row, col)
}
func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
h.clearWrap()
return nil
}
func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)})
h.clearWrap()
h.originMode = enable
return h.CUP(1, 1)
}
func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
h.clearWrap()
if err := h.ED(2); err != nil {
return err
}
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
targetWidth := int16(80)
if use132 {
targetWidth = 132
}
if info.Size.X < targetWidth {
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
h.logf("set buffer failed: %v", err)
return err
}
}
window := info.Window
window.Left = 0
window.Right = targetWidth - 1
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
h.logf("set window failed: %v", err)
return err
}
if info.Size.X > targetWidth {
if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
h.logf("set buffer failed: %v", err)
return err
}
}
return SetConsoleCursorPosition(h.fd, COORD{0, 0})
}
func (h *windowsAnsiEventHandler) ED(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("ED: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
// [J -- Erases from the cursor to the end of the screen, including the cursor position.
// [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
// [2J -- Erases the complete display. The cursor does not move.
// Notes:
// -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
var start COORD
var end COORD
switch param {
case 0:
start = info.CursorPosition
end = COORD{info.Size.X - 1, info.Size.Y - 1}
case 1:
start = COORD{0, 0}
end = info.CursorPosition
case 2:
start = COORD{0, 0}
end = COORD{info.Size.X - 1, info.Size.Y - 1}
}
err = h.clearRange(h.attributes, start, end)
if err != nil {
return err
}
// If the whole buffer was cleared, move the window to the top while preserving
// the window-relative cursor position.
if param == 2 {
pos := info.CursorPosition
window := info.Window
pos.Y -= window.Top
window.Bottom -= window.Top
window.Top = 0
if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
return err
}
if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
return err
}
}
return nil
}
func (h *windowsAnsiEventHandler) EL(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("EL: [%v]", strconv.Itoa(param))
h.clearWrap()
// [K -- Erases from the cursor to the end of the line, including the cursor position.
// [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
// [2K -- Erases the complete line.
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
var start COORD
var end COORD
switch param {
case 0:
start = info.CursorPosition
end = COORD{info.Size.X, info.CursorPosition.Y}
case 1:
start = COORD{0, info.CursorPosition.Y}
end = info.CursorPosition
case 2:
start = COORD{0, info.CursorPosition.Y}
end = COORD{info.Size.X, info.CursorPosition.Y}
}
err = h.clearRange(h.attributes, start, end)
if err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) IL(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("IL: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.insertLines(param)
}
func (h *windowsAnsiEventHandler) DL(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("DL: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.deleteLines(param)
}
func (h *windowsAnsiEventHandler) ICH(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("ICH: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.insertCharacters(param)
}
func (h *windowsAnsiEventHandler) DCH(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("DCH: [%v]", strconv.Itoa(param))
h.clearWrap()
return h.deleteCharacters(param)
}
func (h *windowsAnsiEventHandler) SGR(params []int) error {
if err := h.Flush(); err != nil {
return err
}
strings := []string{}
for _, v := range params {
strings = append(strings, strconv.Itoa(v))
}
h.logf("SGR: [%v]", strings)
if len(params) <= 0 {
h.attributes = h.infoReset.Attributes
h.inverted = false
} else {
for _, attr := range params {
if attr == ansiterm.ANSI_SGR_RESET {
h.attributes = h.infoReset.Attributes
h.inverted = false
continue
}
h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
}
}
attributes := h.attributes
if h.inverted {
attributes = invertAttributes(attributes)
}
err := SetConsoleTextAttribute(h.fd, attributes)
if err != nil {
return err
}
return nil
}
func (h *windowsAnsiEventHandler) SU(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("SU: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.scrollUp(param)
}
func (h *windowsAnsiEventHandler) SD(param int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("SD: [%v]", []string{strconv.Itoa(param)})
h.clearWrap()
return h.scrollDown(param)
}
func (h *windowsAnsiEventHandler) DA(params []string) error {
h.logf("DA: [%v]", params)
// DA cannot be implemented because it must send data on the VT100 input stream,
// which is not available to go-ansiterm.
return nil
}
func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
if err := h.Flush(); err != nil {
return err
}
h.logf("DECSTBM: [%d, %d]", top, bottom)
// Windows is 0 indexed, Linux is 1 indexed
h.sr.top = int16(top - 1)
h.sr.bottom = int16(bottom - 1)
// This command also moves the cursor to the origin.
h.clearWrap()
return h.CUP(1, 1)
}
func (h *windowsAnsiEventHandler) RI() error {
if err := h.Flush(); err != nil {
return err
}
h.logf("RI: []")
h.clearWrap()
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
sr := h.effectiveSr(info.Window)
if info.CursorPosition.Y == sr.top {
return h.scrollDown(1)
}
return h.moveCursorVertical(-1)
}
func (h *windowsAnsiEventHandler) IND() error {
h.logf("IND: []")
return h.executeLF()
}
func (h *windowsAnsiEventHandler) Flush() error {
h.curInfo = nil
if h.buffer.Len() > 0 {
h.logf("Flush: [%s]", h.buffer.Bytes())
if _, err := h.buffer.WriteTo(h.file); err != nil {
return err
}
}
if h.wrapNext && !h.drewMarginByte {
h.logf("Flush: drawing margin byte '%c'", h.marginByte)
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return err
}
charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
size := COORD{1, 1}
position := COORD{0, 0}
region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
if err := WriteConsoleOutput(h.fd, charInfo, size, position, &region); err != nil {
return err
}
h.drewMarginByte = true
}
return nil
}
// cacheConsoleInfo ensures that the current console screen information has been queried
// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
if h.curInfo == nil {
info, err := GetConsoleScreenBufferInfo(h.fd)
if err != nil {
return COORD{}, nil, err
}
h.curInfo = info
h.curPos = info.CursorPosition
}
return h.curPos, h.curInfo, nil
}
func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
if h.curInfo == nil {
panic("failed to call getCurrentInfo before calling updatePos")
}
h.curPos = pos
}
// clearWrap clears the state where the cursor is in the margin
// waiting for the next character before wrapping the line. This must
// be done before most operations that act on the cursor.
func (h *windowsAnsiEventHandler) clearWrap() {
h.wrapNext = false
h.drewMarginByte = false
}

View File

@ -1,37 +0,0 @@
/*
Copyright 2018 Google, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package output
import (
"fmt"
"github.com/spf13/cobra"
"os"
)
var quiet bool
// PrintToStdErr prints to stderr if quiet flag isn't enabled
func PrintToStdErr(output string, vars ...interface{}) {
if !quiet {
fmt.Fprintf(os.Stderr, output, vars...)
}
}
// AddFlags adds quiet flag to suppress output to stderr
func AddFlags(cmd *cobra.Command) {
cmd.Flags().BoolVarP(&quiet, "quiet", "q", false, "Suppress output to stderr.")
}

View File

@ -1,262 +0,0 @@
/*
Copyright 2018 Google, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package image
import (
"bytes"
"compress/gzip"
"encoding/json"
"github.com/containers/image/docker"
img "github.com/containers/image/image"
"github.com/containers/image/types"
"io"
"io/ioutil"
"strings"
"time"
"github.com/containers/image/manifest"
digest "github.com/opencontainers/go-digest"
)
type MutableSource struct {
ProxySource
mfst *manifest.Schema2
cfg *manifest.Schema2Image
extraBlobs map[string][]byte
extraLayers []digest.Digest
}
func NewMutableSource(r types.ImageReference) (*MutableSource, error) {
if r == nil {
return MutableSourceFromScratch()
}
src, err := r.NewImageSource(nil)
if err != nil {
return nil, err
}
img, err := r.NewImage(nil)
if err != nil {
return nil, err
}
ms := &MutableSource{
ProxySource: ProxySource{
Ref: r,
ImageSource: src,
img: img,
},
extraBlobs: make(map[string][]byte),
}
if err := ms.populateManifestAndConfig(); err != nil {
return nil, err
}
return ms, nil
}
func MutableSourceFromScratch() (*MutableSource, error) {
config := &manifest.Schema2Image{
Schema2V1Image: manifest.Schema2V1Image{
Config: &manifest.Schema2Config{},
},
RootFS: &manifest.Schema2RootFS{},
History: []manifest.Schema2History{},
}
ref, err := docker.ParseReference("//scratch")
if err != nil {
return nil, err
}
src, err := ref.NewImageSource(nil)
if err != nil {
return nil, err
}
ms := &MutableSource{
ProxySource: ProxySource{
Ref: &ProxyReference{
ImageReference: ref,
},
ImageSource: src,
},
extraBlobs: make(map[string][]byte),
cfg: config,
mfst: &manifest.Schema2{
SchemaVersion: 2,
MediaType: manifest.DockerV2Schema2MediaType,
},
}
return ms, nil
}
// Manifest marshals the stored manifest to the byte format.
func (m *MutableSource) GetManifest(_ *digest.Digest) ([]byte, string, error) {
if err := m.saveConfig(); err != nil {
return nil, "", err
}
s, err := json.Marshal(m.mfst)
if err != nil {
return nil, "", err
}
return s, manifest.DockerV2Schema2MediaType, err
}
// populateManifestAndConfig parses the raw manifest and configs, storing them on the struct.
func (m *MutableSource) populateManifestAndConfig() error {
context := &types.SystemContext{
OSChoice: "linux",
ArchitectureChoice: "amd64",
}
image, err := m.ProxySource.Ref.NewImage(context)
if err != nil {
return err
}
defer image.Close()
// First get manifest
mfstBytes, mfstType, err := image.Manifest()
if err != nil {
return err
}
if mfstType == manifest.DockerV2ListMediaType {
// We need to select a manifest digest from the manifest list
unparsedImage := img.UnparsedInstance(m.ImageSource, nil)
mfstDigest, err := img.ChooseManifestInstanceFromManifestList(context, unparsedImage)
if err != nil {
return err
}
mfstBytes, _, err = m.ProxySource.GetManifest(&mfstDigest)
if err != nil {
return err
}
}
m.mfst, err = manifest.Schema2FromManifest(mfstBytes)
if err != nil {
return err
}
// Now, get config
configBlob, err := image.ConfigBlob()
if err != nil {
return err
}
return json.Unmarshal(configBlob, &m.cfg)
}
// GetBlob first checks the stored "extra" blobs, then proxies the call to the original source.
func (m *MutableSource) GetBlob(bi types.BlobInfo) (io.ReadCloser, int64, error) {
if b, ok := m.extraBlobs[bi.Digest.String()]; ok {
return ioutil.NopCloser(bytes.NewReader(b)), int64(len(b)), nil
}
return m.ImageSource.GetBlob(bi)
}
func gzipBytes(b []byte) ([]byte, error) {
buf := bytes.NewBuffer([]byte{})
w := gzip.NewWriter(buf)
_, err := w.Write(b)
w.Close()
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// appendLayer appends an uncompressed blob to the image, preserving the invariants required across the config and manifest.
func (m *MutableSource) AppendLayer(content []byte, author string) error {
compressedBlob, err := gzipBytes(content)
if err != nil {
return err
}
dgst := digest.FromBytes(compressedBlob)
// Add the layer to the manifest.
descriptor := manifest.Schema2Descriptor{
MediaType: manifest.DockerV2Schema2LayerMediaType,
Size: int64(len(content)),
Digest: dgst,
}
m.mfst.LayersDescriptors = append(m.mfst.LayersDescriptors, descriptor)
m.extraBlobs[dgst.String()] = compressedBlob
m.extraLayers = append(m.extraLayers, dgst)
// Also add it to the config.
diffID := digest.FromBytes(content)
m.cfg.RootFS.DiffIDs = append(m.cfg.RootFS.DiffIDs, diffID)
m.AppendConfigHistory(author, false)
return nil
}
// saveConfig marshals the stored image config, and updates the references to it in the manifest.
func (m *MutableSource) saveConfig() error {
cfgBlob, err := json.Marshal(m.cfg)
if err != nil {
return err
}
cfgDigest := digest.FromBytes(cfgBlob)
m.extraBlobs[cfgDigest.String()] = cfgBlob
m.mfst.ConfigDescriptor = manifest.Schema2Descriptor{
MediaType: manifest.DockerV2Schema2ConfigMediaType,
Size: int64(len(cfgBlob)),
Digest: cfgDigest,
}
return nil
}
// Env returns a map of environment variables stored in the image config
// Converts each variable from a string of the form KEY=VALUE to a map of KEY:VALUE
func (m *MutableSource) Env() map[string]string {
envArray := m.cfg.Schema2V1Image.Config.Env
envMap := make(map[string]string)
for _, env := range envArray {
entry := strings.Split(env, "=")
envMap[entry[0]] = entry[1]
}
return envMap
}
// SetEnv takes a map of environment variables, and converts them to an array of strings
// in the form KEY=VALUE, and then sets the image config
func (m *MutableSource) SetEnv(envMap map[string]string, author string) {
envArray := []string{}
for key, value := range envMap {
entry := key + "=" + value
envArray = append(envArray, entry)
}
m.cfg.Schema2V1Image.Config.Env = envArray
m.AppendConfigHistory(author, true)
}
func (m *MutableSource) Config() *manifest.Schema2Config {
return m.cfg.Schema2V1Image.Config
}
func (m *MutableSource) SetConfig(config *manifest.Schema2Config, author string, emptyLayer bool) {
m.cfg.Schema2V1Image.Config = config
m.AppendConfigHistory(author, emptyLayer)
}
func (m *MutableSource) AppendConfigHistory(author string, emptyLayer bool) {
history := manifest.Schema2History{
Created: time.Now(),
Author: author,
EmptyLayer: emptyLayer,
}
m.cfg.History = append(m.cfg.History, history)
}

Some files were not shown because too many files have changed in this diff Show More