chore(deps): bump github.com/google/go-containerregistry (#2508)
Bumps [github.com/google/go-containerregistry](https://github.com/google/go-containerregistry) from 0.14.0 to 0.15.1. - [Release notes](https://github.com/google/go-containerregistry/releases) - [Changelog](https://github.com/google/go-containerregistry/blob/main/.goreleaser.yml) - [Commits](https://github.com/google/go-containerregistry/compare/v0.14.0...v0.15.1) --- updated-dependencies: - dependency-name: github.com/google/go-containerregistry dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
8469f7f105
commit
5792a72413
22
go.mod
22
go.mod
|
|
@ -14,7 +14,7 @@ require (
|
|||
github.com/go-git/go-git/v5 v5.4.2
|
||||
github.com/golang/mock v1.6.0
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/go-containerregistry v0.14.0
|
||||
github.com/google/go-containerregistry v0.15.1
|
||||
github.com/google/go-github v17.0.0+incompatible
|
||||
github.com/google/slowjam v1.0.1
|
||||
github.com/karrick/godirwalk v1.16.1
|
||||
|
|
@ -24,7 +24,7 @@ require (
|
|||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/afero v1.9.5
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
golang.org/x/net v0.9.0
|
||||
golang.org/x/oauth2 v0.7.0
|
||||
|
|
@ -35,13 +35,13 @@ require github.com/containerd/containerd v1.7.0
|
|||
|
||||
require (
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
|
||||
golang.org/x/mod v0.9.0 // indirect
|
||||
golang.org/x/tools v0.7.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.110.0 // indirect
|
||||
cloud.google.com/go/compute v1.19.0 // indirect
|
||||
cloud.google.com/go/compute v1.19.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v0.13.0 // indirect
|
||||
github.com/Azure/azure-pipeline-go v0.2.3 // indirect
|
||||
|
|
@ -54,7 +54,7 @@ require (
|
|||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f // indirect
|
||||
github.com/acomagu/bufpipe v1.0.3 // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
|
|
@ -78,7 +78,7 @@ require (
|
|||
github.com/containerd/typeurl v1.0.2 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/docker/cli v23.0.1+incompatible // indirect
|
||||
github.com/docker/cli v23.0.5+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
|
|
@ -104,7 +104,7 @@ require (
|
|||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/kevinburke/ssh_config v1.1.0 // indirect
|
||||
github.com/klauspost/compress v1.16.0 // indirect
|
||||
github.com/klauspost/compress v1.16.5 // indirect
|
||||
github.com/mattn/go-ieproxy v0.0.2 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
|
|
@ -115,10 +115,10 @@ require (
|
|||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/sys/signal v0.7.0 // indirect
|
||||
github.com/moby/sys/symlink v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20221120202655-abb19827d345 // indirect
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
|
||||
github.com/opencontainers/runc v1.1.5 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.1.0-rc.1 // indirect
|
||||
github.com/opencontainers/selinux v1.11.0 // indirect
|
||||
|
|
@ -130,7 +130,7 @@ require (
|
|||
github.com/rootless-containers/rootlesskit v1.1.0 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa // indirect
|
||||
github.com/vbatts/tar-split v0.11.2 // indirect
|
||||
github.com/vbatts/tar-split v0.11.3 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.1 // indirect
|
||||
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
|
|
|
|||
42
go.sum
42
go.sum
|
|
@ -181,8 +181,9 @@ cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARy
|
|||
cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
|
||||
cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
|
||||
cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
|
||||
cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ=
|
||||
cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
|
||||
cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY=
|
||||
cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE=
|
||||
cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU=
|
||||
cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
|
||||
|
|
@ -751,8 +752,9 @@ github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOp
|
|||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
||||
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
|
||||
|
|
@ -1165,8 +1167,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
|
|||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
|
||||
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
|
||||
|
|
@ -1203,8 +1206,8 @@ github.com/docker/cli v20.10.17+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hH
|
|||
github.com/docker/cli v20.10.20+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v20.10.21+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v23.0.0-rc.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM=
|
||||
github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v23.0.5+incompatible h1:ufWmAOuD3Vmr7JP2G5K3cyuNC4YZWiAsuDEvFVVDafE=
|
||||
github.com/docker/cli v23.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
||||
github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
|
|
@ -1527,8 +1530,8 @@ github.com/google/go-containerregistry v0.0.0-20191010200024-a3d713f9b7f8/go.mod
|
|||
github.com/google/go-containerregistry v0.1.2/go.mod h1:GPivBPgdAyd2SU+vf6EpsgOtWDuPqjW0hJZt4rNdTZ4=
|
||||
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
|
||||
github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo=
|
||||
github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw=
|
||||
github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk=
|
||||
github.com/google/go-containerregistry v0.15.1 h1:RsJ9NbfxYWF8Wl4VmvkpN3zYATwuvlPq2j20zmcs63E=
|
||||
github.com/google/go-containerregistry v0.15.1/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q=
|
||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
|
||||
|
|
@ -1795,8 +1798,9 @@ github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e
|
|||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
|
||||
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
|
||||
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
|
|
@ -1959,8 +1963,9 @@ github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/f
|
|||
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ=
|
||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
||||
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||
github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU=
|
||||
github.com/moby/term v0.0.0-20221120202655-abb19827d345/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM=
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA=
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/moby/vpnkit v0.5.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
|
@ -2052,8 +2057,9 @@ github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
|
|||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1/go.mod h1:K/JAU0m27RFhDRX4PcFdIKntROP6y5Ed6O91aZYDQfs=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
|
|
@ -2301,8 +2307,9 @@ github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t6
|
|||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
|
||||
github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
|
||||
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
|
||||
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
|
||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
|
|
@ -2399,8 +2406,9 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
|
|||
github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
|
||||
github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
|
||||
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
|
||||
github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
|
||||
github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
|
||||
github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4/go.mod h1:inCTmtUdr5KJbreVojo06krnTgaeAz/Z7lynpPk/Q2c=
|
||||
github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
|
||||
github.com/veraison/go-cose v1.0.0-rc.1/go.mod h1:7ziE85vSq4ScFTg6wyoMXjucIGOf4JkFEZi/an96Ct4=
|
||||
|
|
@ -2709,8 +2717,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
|
|||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
|
||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -3029,6 +3037,7 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
|
@ -3187,8 +3196,8 @@ golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
|
|||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
@ -3516,7 +3525,6 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
|
|||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
|
|
|
|||
|
|
@ -15,4 +15,4 @@
|
|||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.19.0"
|
||||
const Version = "1.19.1"
|
||||
|
|
|
|||
|
|
@ -8,12 +8,8 @@ linters:
|
|||
- containedctx # struct contains a context
|
||||
- dupl # duplicate code
|
||||
- errname # erorrs are named correctly
|
||||
- goconst # strings that should be constants
|
||||
- godot # comments end in a period
|
||||
- misspell
|
||||
- nolintlint # "//nolint" directives are properly explained
|
||||
- revive # golint replacement
|
||||
- stylecheck # golint replacement, less configurable than revive
|
||||
- unconvert # unnecessary conversions
|
||||
- wastedassign
|
||||
|
||||
|
|
@ -23,10 +19,7 @@ linters:
|
|||
- exhaustive # check exhaustiveness of enum switch statements
|
||||
- gofmt # files are gofmt'ed
|
||||
- gosec # security
|
||||
- nestif # deeply nested ifs
|
||||
- nilerr # returns nil even with non-nil error
|
||||
- prealloc # slices that can be pre-allocated
|
||||
- structcheck # unused struct fields
|
||||
- unparam # unused function params
|
||||
|
||||
issues:
|
||||
|
|
@ -42,6 +35,18 @@ issues:
|
|||
text: "^line-length-limit: "
|
||||
source: "^//(go:generate|sys) "
|
||||
|
||||
#TODO: remove after upgrading to go1.18
|
||||
# ignore comment spacing for nolint and sys directives
|
||||
- linters:
|
||||
- revive
|
||||
text: "^comment-spacings: no space between comment delimiter and comment text"
|
||||
source: "//(cspell:|nolint:|sys |todo)"
|
||||
|
||||
# not on go 1.18 yet, so no any
|
||||
- linters:
|
||||
- revive
|
||||
text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
|
||||
|
||||
# allow unjustified ignores of error checks in defer statements
|
||||
- linters:
|
||||
- nolintlint
|
||||
|
|
@ -56,6 +61,8 @@ issues:
|
|||
|
||||
|
||||
linters-settings:
|
||||
exhaustive:
|
||||
default-signifies-exhaustive: true
|
||||
govet:
|
||||
enable-all: true
|
||||
disable:
|
||||
|
|
@ -98,6 +105,8 @@ linters-settings:
|
|||
disabled: true
|
||||
- name: flag-parameter # excessive, and a common idiom we use
|
||||
disabled: true
|
||||
- name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead
|
||||
disabled: true
|
||||
# general config
|
||||
- name: line-length-limit
|
||||
arguments:
|
||||
|
|
@ -138,7 +147,3 @@ linters-settings:
|
|||
- VPCI
|
||||
- WCOW
|
||||
- WIM
|
||||
stylecheck:
|
||||
checks:
|
||||
- "all"
|
||||
- "-ST1003" # use revive's var naming
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
const afHVSock = 34 // AF_HYPERV
|
||||
|
||||
// Well known Service and VM IDs
|
||||
//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
|
||||
// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
|
||||
|
||||
// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions.
|
||||
func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
|
||||
|
|
@ -31,7 +31,7 @@ func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
|
|||
}
|
||||
|
||||
// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions.
|
||||
func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff
|
||||
func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff
|
||||
return guid.GUID{
|
||||
Data1: 0xffffffff,
|
||||
Data2: 0xffff,
|
||||
|
|
@ -246,7 +246,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
|||
var addrbuf [addrlen * 2]byte
|
||||
|
||||
var bytes uint32
|
||||
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o)
|
||||
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o)
|
||||
if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
|
||||
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,2 @@
|
|||
// This package contains Win32 filesystem functionality.
|
||||
package fs
|
||||
|
|
@ -0,0 +1,202 @@
|
|||
//go:build windows
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/windows"
|
||||
|
||||
"github.com/Microsoft/go-winio/internal/stringbuffer"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew
|
||||
//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW
|
||||
|
||||
const NullHandle windows.Handle = 0
|
||||
|
||||
// AccessMask defines standard, specific, and generic rights.
|
||||
//
|
||||
// Bitmask:
|
||||
// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
|
||||
// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
|
||||
// +---------------+---------------+-------------------------------+
|
||||
// |G|G|G|G|Resvd|A| StandardRights| SpecificRights |
|
||||
// |R|W|E|A| |S| | |
|
||||
// +-+-------------+---------------+-------------------------------+
|
||||
//
|
||||
// GR Generic Read
|
||||
// GW Generic Write
|
||||
// GE Generic Exectue
|
||||
// GA Generic All
|
||||
// Resvd Reserved
|
||||
// AS Access Security System
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants
|
||||
type AccessMask = windows.ACCESS_MASK
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
// Not actually any.
|
||||
//
|
||||
// For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device"
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters
|
||||
FILE_ANY_ACCESS AccessMask = 0
|
||||
|
||||
// Specific Object Access
|
||||
// from ntioapi.h
|
||||
|
||||
FILE_READ_DATA AccessMask = (0x0001) // file & pipe
|
||||
FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory
|
||||
|
||||
FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe
|
||||
FILE_ADD_FILE AccessMask = (0x0002) // directory
|
||||
|
||||
FILE_APPEND_DATA AccessMask = (0x0004) // file
|
||||
FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory
|
||||
FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe
|
||||
|
||||
FILE_READ_EA AccessMask = (0x0008) // file & directory
|
||||
FILE_READ_PROPERTIES AccessMask = FILE_READ_EA
|
||||
|
||||
FILE_WRITE_EA AccessMask = (0x0010) // file & directory
|
||||
FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA
|
||||
|
||||
FILE_EXECUTE AccessMask = (0x0020) // file
|
||||
FILE_TRAVERSE AccessMask = (0x0020) // directory
|
||||
|
||||
FILE_DELETE_CHILD AccessMask = (0x0040) // directory
|
||||
|
||||
FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all
|
||||
|
||||
FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all
|
||||
|
||||
FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF)
|
||||
FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE)
|
||||
FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE)
|
||||
FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE)
|
||||
|
||||
SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF
|
||||
|
||||
// Standard Access
|
||||
// from ntseapi.h
|
||||
|
||||
DELETE AccessMask = 0x0001_0000
|
||||
READ_CONTROL AccessMask = 0x0002_0000
|
||||
WRITE_DAC AccessMask = 0x0004_0000
|
||||
WRITE_OWNER AccessMask = 0x0008_0000
|
||||
SYNCHRONIZE AccessMask = 0x0010_0000
|
||||
|
||||
STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000
|
||||
|
||||
STANDARD_RIGHTS_READ AccessMask = READ_CONTROL
|
||||
STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL
|
||||
STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL
|
||||
|
||||
STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000
|
||||
)
|
||||
|
||||
type FileShareMode uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
FILE_SHARE_NONE FileShareMode = 0x00
|
||||
FILE_SHARE_READ FileShareMode = 0x01
|
||||
FILE_SHARE_WRITE FileShareMode = 0x02
|
||||
FILE_SHARE_DELETE FileShareMode = 0x04
|
||||
FILE_SHARE_VALID_FLAGS FileShareMode = 0x07
|
||||
)
|
||||
|
||||
type FileCreationDisposition uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
// from winbase.h
|
||||
|
||||
CREATE_NEW FileCreationDisposition = 0x01
|
||||
CREATE_ALWAYS FileCreationDisposition = 0x02
|
||||
OPEN_EXISTING FileCreationDisposition = 0x03
|
||||
OPEN_ALWAYS FileCreationDisposition = 0x04
|
||||
TRUNCATE_EXISTING FileCreationDisposition = 0x05
|
||||
)
|
||||
|
||||
// CreateFile and co. take flags or attributes together as one parameter.
|
||||
// Define alias until we can use generics to allow both
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants
|
||||
type FileFlagOrAttribute uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const ( // from winnt.h
|
||||
FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000
|
||||
FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000
|
||||
FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000
|
||||
FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000
|
||||
FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000
|
||||
FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000
|
||||
FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000
|
||||
FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000
|
||||
FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000
|
||||
FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000
|
||||
FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000
|
||||
)
|
||||
|
||||
type FileSQSFlag = FileFlagOrAttribute
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const ( // from winbase.h
|
||||
SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16)
|
||||
SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16)
|
||||
SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16)
|
||||
SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16)
|
||||
|
||||
SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000
|
||||
SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000
|
||||
)
|
||||
|
||||
// GetFinalPathNameByHandle flags
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters
|
||||
type GetFinalPathFlag uint32
|
||||
|
||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
||||
const (
|
||||
GetFinalPathDefaultFlag GetFinalPathFlag = 0x0
|
||||
|
||||
FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0
|
||||
FILE_NAME_OPENED GetFinalPathFlag = 0x8
|
||||
|
||||
VOLUME_NAME_DOS GetFinalPathFlag = 0x0
|
||||
VOLUME_NAME_GUID GetFinalPathFlag = 0x1
|
||||
VOLUME_NAME_NT GetFinalPathFlag = 0x2
|
||||
VOLUME_NAME_NONE GetFinalPathFlag = 0x4
|
||||
)
|
||||
|
||||
// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle
|
||||
// with the given handle and flags. It transparently takes care of creating a buffer of the
|
||||
// correct size for the call.
|
||||
//
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew
|
||||
func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) {
|
||||
b := stringbuffer.NewWString()
|
||||
//TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n?
|
||||
for {
|
||||
n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// If the buffer wasn't large enough, n will be the total size needed (including null terminator).
|
||||
// Resize and try again.
|
||||
if n > b.Cap() {
|
||||
b.ResizeTo(n)
|
||||
continue
|
||||
}
|
||||
// If the buffer is large enough, n will be the size not including the null terminator.
|
||||
// Convert to a Go string and return.
|
||||
return b.String(), nil
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
package fs
|
||||
|
||||
// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level
|
||||
type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32`
|
||||
|
||||
// Impersonation levels
|
||||
const (
|
||||
SecurityAnonymous SecurityImpersonationLevel = 0
|
||||
SecurityIdentification SecurityImpersonationLevel = 1
|
||||
SecurityImpersonation SecurityImpersonationLevel = 2
|
||||
SecurityDelegation SecurityImpersonationLevel = 3
|
||||
)
|
||||
64
vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
generated
vendored
Normal file
64
vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
generated
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
//go:build windows
|
||||
|
||||
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
||||
errERROR_EINVAL error = syscall.EINVAL
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return errERROR_EINVAL
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||
|
||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
)
|
||||
|
||||
func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||
}
|
||||
|
||||
func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
||||
handle = windows.Handle(r0)
|
||||
if handle == windows.InvalidHandle {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -100,8 +100,8 @@ func (f *runtimeFunc) Load() error {
|
|||
(*byte)(unsafe.Pointer(&f.addr)),
|
||||
uint32(unsafe.Sizeof(f.addr)),
|
||||
&n,
|
||||
nil, //overlapped
|
||||
0, //completionRoutine
|
||||
nil, // overlapped
|
||||
0, // completionRoutine
|
||||
)
|
||||
})
|
||||
return f.err
|
||||
|
|
|
|||
132
vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
generated
vendored
Normal file
132
vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
generated
vendored
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
package stringbuffer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
// TODO: worth exporting and using in mkwinsyscall?
|
||||
|
||||
// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate
|
||||
// large path strings:
|
||||
// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310.
|
||||
const MinWStringCap = 310
|
||||
|
||||
// use *[]uint16 since []uint16 creates an extra allocation where the slice header
|
||||
// is copied to heap and then referenced via pointer in the interface header that sync.Pool
|
||||
// stores.
|
||||
var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly
|
||||
New: func() interface{} {
|
||||
b := make([]uint16, MinWStringCap)
|
||||
return &b
|
||||
},
|
||||
}
|
||||
|
||||
func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) }
|
||||
|
||||
// freeBuffer copies the slice header data, and puts a pointer to that in the pool.
|
||||
// This avoids taking a pointer to the slice header in WString, which can be set to nil.
|
||||
func freeBuffer(b []uint16) { pathPool.Put(&b) }
|
||||
|
||||
// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings
|
||||
// for interacting with Win32 APIs.
|
||||
// Sizes are specified as uint32 and not int.
|
||||
//
|
||||
// It is not thread safe.
|
||||
type WString struct {
|
||||
// type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future.
|
||||
|
||||
// raw buffer
|
||||
b []uint16
|
||||
}
|
||||
|
||||
// NewWString returns a [WString] allocated from a shared pool with an
|
||||
// initial capacity of at least [MinWStringCap].
|
||||
// Since the buffer may have been previously used, its contents are not guaranteed to be empty.
|
||||
//
|
||||
// The buffer should be freed via [WString.Free]
|
||||
func NewWString() *WString {
|
||||
return &WString{
|
||||
b: newBuffer(),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *WString) Free() {
|
||||
if b.empty() {
|
||||
return
|
||||
}
|
||||
freeBuffer(b.b)
|
||||
b.b = nil
|
||||
}
|
||||
|
||||
// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the
|
||||
// previous buffer back into pool.
|
||||
func (b *WString) ResizeTo(c uint32) uint32 {
|
||||
// allready sufficient (or n is 0)
|
||||
if c <= b.Cap() {
|
||||
return b.Cap()
|
||||
}
|
||||
|
||||
if c <= MinWStringCap {
|
||||
c = MinWStringCap
|
||||
}
|
||||
// allocate at-least double buffer size, as is done in [bytes.Buffer] and other places
|
||||
if c <= 2*b.Cap() {
|
||||
c = 2 * b.Cap()
|
||||
}
|
||||
|
||||
b2 := make([]uint16, c)
|
||||
if !b.empty() {
|
||||
copy(b2, b.b)
|
||||
freeBuffer(b.b)
|
||||
}
|
||||
b.b = b2
|
||||
return c
|
||||
}
|
||||
|
||||
// Buffer returns the underlying []uint16 buffer.
|
||||
func (b *WString) Buffer() []uint16 {
|
||||
if b.empty() {
|
||||
return nil
|
||||
}
|
||||
return b.b
|
||||
}
|
||||
|
||||
// Pointer returns a pointer to the first uint16 in the buffer.
|
||||
// If the [WString.Free] has already been called, the pointer will be nil.
|
||||
func (b *WString) Pointer() *uint16 {
|
||||
if b.empty() {
|
||||
return nil
|
||||
}
|
||||
return &b.b[0]
|
||||
}
|
||||
|
||||
// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer.
|
||||
//
|
||||
// It assumes that the data is null-terminated.
|
||||
func (b *WString) String() string {
|
||||
// Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows"
|
||||
// and would make this code Windows-only, which makes no sense.
|
||||
// So copy UTF16ToString code into here.
|
||||
// If other windows-specific code is added, switch to [windows.UTF16ToString]
|
||||
|
||||
s := b.b
|
||||
for i, v := range s {
|
||||
if v == 0 {
|
||||
s = s[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
return string(utf16.Decode(s))
|
||||
}
|
||||
|
||||
// Cap returns the underlying buffer capacity.
|
||||
func (b *WString) Cap() uint32 {
|
||||
if b.empty() {
|
||||
return 0
|
||||
}
|
||||
return b.cap()
|
||||
}
|
||||
|
||||
func (b *WString) cap() uint32 { return uint32(cap(b.b)) }
|
||||
func (b *WString) empty() bool { return b == nil || b.cap() == 0 }
|
||||
|
|
@ -16,11 +16,12 @@ import (
|
|||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
|
||||
"github.com/Microsoft/go-winio/internal/fs"
|
||||
)
|
||||
|
||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||
|
|
@ -163,19 +164,21 @@ func (s pipeAddress) String() string {
|
|||
}
|
||||
|
||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
|
||||
func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return syscall.Handle(0), ctx.Err()
|
||||
default:
|
||||
h, err := createFile(*path,
|
||||
wh, err := fs.CreateFile(*path,
|
||||
access,
|
||||
0,
|
||||
nil,
|
||||
syscall.OPEN_EXISTING,
|
||||
windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS,
|
||||
0)
|
||||
0, // mode
|
||||
nil, // security attributes
|
||||
fs.OPEN_EXISTING,
|
||||
fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS,
|
||||
0, // template file handle
|
||||
)
|
||||
h := syscall.Handle(wh)
|
||||
if err == nil {
|
||||
return h, nil
|
||||
}
|
||||
|
|
@ -219,7 +222,7 @@ func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
|||
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
|
||||
var err error
|
||||
var h syscall.Handle
|
||||
h, err = tryDialPipe(ctx, &path, access)
|
||||
h, err = tryDialPipe(ctx, &path, fs.AccessMask(access))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -279,6 +282,7 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
|
|||
}
|
||||
defer localFree(ntPath.Buffer)
|
||||
oa.ObjectName = &ntPath
|
||||
oa.Attributes = windows.OBJ_CASE_INSENSITIVE
|
||||
|
||||
// The security descriptor is only needed for the first pipe.
|
||||
if first {
|
||||
|
|
|
|||
|
|
@ -63,7 +63,6 @@ var (
|
|||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||
|
|
@ -305,24 +304,6 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
var _p0 *uint16
|
||||
_p0, err = syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||
}
|
||||
|
||||
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
||||
handle = syscall.Handle(r0)
|
||||
if handle == syscall.InvalidHandle {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
|
||||
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
|
||||
newport = syscall.Handle(r0)
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
|
|
@ -52,7 +53,7 @@ type defaultKeychain struct {
|
|||
|
||||
var (
|
||||
// DefaultKeychain implements Keychain by interpreting the docker config file.
|
||||
DefaultKeychain Keychain = &defaultKeychain{}
|
||||
DefaultKeychain = RefreshingKeychain(&defaultKeychain{}, 5*time.Minute)
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -178,3 +179,71 @@ func (w wrapper) Resolve(r Resource) (Authenticator, error) {
|
|||
}
|
||||
return FromConfig(AuthConfig{Username: u, Password: p}), nil
|
||||
}
|
||||
|
||||
func RefreshingKeychain(inner Keychain, duration time.Duration) Keychain {
|
||||
return &refreshingKeychain{
|
||||
keychain: inner,
|
||||
duration: duration,
|
||||
}
|
||||
}
|
||||
|
||||
type refreshingKeychain struct {
|
||||
keychain Keychain
|
||||
duration time.Duration
|
||||
clock func() time.Time
|
||||
}
|
||||
|
||||
func (r *refreshingKeychain) Resolve(target Resource) (Authenticator, error) {
|
||||
last := time.Now()
|
||||
auth, err := r.keychain.Resolve(target)
|
||||
if err != nil || auth == Anonymous {
|
||||
return auth, err
|
||||
}
|
||||
return &refreshing{
|
||||
target: target,
|
||||
keychain: r.keychain,
|
||||
last: last,
|
||||
cached: auth,
|
||||
duration: r.duration,
|
||||
clock: r.clock,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type refreshing struct {
|
||||
sync.Mutex
|
||||
target Resource
|
||||
keychain Keychain
|
||||
|
||||
duration time.Duration
|
||||
|
||||
last time.Time
|
||||
cached Authenticator
|
||||
|
||||
// for testing
|
||||
clock func() time.Time
|
||||
}
|
||||
|
||||
func (r *refreshing) Authorization() (*AuthConfig, error) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
if r.cached == nil || r.expired() {
|
||||
r.last = r.now()
|
||||
auth, err := r.keychain.Resolve(r.target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.cached = auth
|
||||
}
|
||||
return r.cached.Authorization()
|
||||
}
|
||||
|
||||
func (r *refreshing) now() time.Time {
|
||||
if r.clock == nil {
|
||||
return time.Now()
|
||||
}
|
||||
return r.clock()
|
||||
}
|
||||
|
||||
func (r *refreshing) expired() bool {
|
||||
return r.now().Sub(r.last) > r.duration
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ package name
|
|||
import (
|
||||
"net"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
|
@ -50,6 +51,11 @@ func (r Registry) String() string {
|
|||
return r.Name()
|
||||
}
|
||||
|
||||
// Repo returns a Repository in the Registry with the given name.
|
||||
func (r Registry) Repo(repo ...string) Repository {
|
||||
return Repository{Registry: r, repository: path.Join(repo...)}
|
||||
}
|
||||
|
||||
// Scope returns the scope required to access the registry.
|
||||
func (r Registry) Scope(string) string {
|
||||
// The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z
|
||||
|
|
|
|||
|
|
@ -19,6 +19,10 @@ import (
|
|||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
api "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
|
@ -30,7 +34,9 @@ type image struct {
|
|||
ref name.Reference
|
||||
opener *imageOpener
|
||||
tarballImage v1.Image
|
||||
computed bool
|
||||
id *v1.Hash
|
||||
configFile *v1.ConfigFile
|
||||
|
||||
once sync.Once
|
||||
err error
|
||||
|
|
@ -121,6 +127,28 @@ func (i *image) initialize() error {
|
|||
return i.err
|
||||
}
|
||||
|
||||
func (i *image) compute() error {
|
||||
// Don't re-compute if already computed.
|
||||
if i.computed {
|
||||
return nil
|
||||
}
|
||||
|
||||
inspect, _, err := i.opener.client.ImageInspectWithRaw(i.opener.ctx, i.ref.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile, err := i.computeConfigFile(inspect)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i.configFile = configFile
|
||||
i.computed = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *image) Layers() ([]v1.Layer, error) {
|
||||
if err := i.initialize(); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -154,16 +182,19 @@ func (i *image) ConfigName() (v1.Hash, error) {
|
|||
}
|
||||
|
||||
func (i *image) ConfigFile() (*v1.ConfigFile, error) {
|
||||
if err := i.initialize(); err != nil {
|
||||
if err := i.compute(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return i.tarballImage.ConfigFile()
|
||||
return i.configFile.DeepCopy(), nil
|
||||
}
|
||||
|
||||
func (i *image) RawConfigFile() ([]byte, error) {
|
||||
if err := i.initialize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// RawConfigFile cannot be generated from "docker inspect" because Docker Engine API returns serialized data,
|
||||
// and formatting information of the raw config such as indent and prefix will be lost.
|
||||
return i.tarballImage.RawConfigFile()
|
||||
}
|
||||
|
||||
|
|
@ -201,3 +232,119 @@ func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) {
|
|||
}
|
||||
return i.tarballImage.LayerByDiffID(h)
|
||||
}
|
||||
|
||||
func (i *image) configHistory(author string) ([]v1.History, error) {
|
||||
historyItems, err := i.opener.client.ImageHistory(i.opener.ctx, i.ref.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
history := make([]v1.History, len(historyItems))
|
||||
for j, h := range historyItems {
|
||||
history[j] = v1.History{
|
||||
Author: author,
|
||||
Created: v1.Time{
|
||||
Time: time.Unix(h.Created, 0).UTC(),
|
||||
},
|
||||
CreatedBy: h.CreatedBy,
|
||||
Comment: h.Comment,
|
||||
EmptyLayer: h.Size == 0,
|
||||
}
|
||||
}
|
||||
return history, nil
|
||||
}
|
||||
|
||||
func (i *image) diffIDs(rootFS api.RootFS) ([]v1.Hash, error) {
|
||||
diffIDs := make([]v1.Hash, len(rootFS.Layers))
|
||||
for j, l := range rootFS.Layers {
|
||||
h, err := v1.NewHash(l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffIDs[j] = h
|
||||
}
|
||||
return diffIDs, nil
|
||||
}
|
||||
|
||||
func (i *image) computeConfigFile(inspect api.ImageInspect) (*v1.ConfigFile, error) {
|
||||
diffIDs, err := i.diffIDs(inspect.RootFS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
history, err := i.configHistory(inspect.Author)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
created, err := time.Parse(time.RFC3339Nano, inspect.Created)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.ConfigFile{
|
||||
Architecture: inspect.Architecture,
|
||||
Author: inspect.Author,
|
||||
Container: inspect.Container,
|
||||
Created: v1.Time{Time: created},
|
||||
DockerVersion: inspect.DockerVersion,
|
||||
History: history,
|
||||
OS: inspect.Os,
|
||||
RootFS: v1.RootFS{
|
||||
Type: inspect.RootFS.Type,
|
||||
DiffIDs: diffIDs,
|
||||
},
|
||||
Config: i.computeImageConfig(inspect.Config),
|
||||
OSVersion: inspect.OsVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (i *image) computeImageConfig(config *container.Config) v1.Config {
|
||||
if config == nil {
|
||||
return v1.Config{}
|
||||
}
|
||||
|
||||
c := v1.Config{
|
||||
AttachStderr: config.AttachStderr,
|
||||
AttachStdin: config.AttachStdin,
|
||||
AttachStdout: config.AttachStdout,
|
||||
Cmd: config.Cmd,
|
||||
Domainname: config.Domainname,
|
||||
Entrypoint: config.Entrypoint,
|
||||
Env: config.Env,
|
||||
Hostname: config.Hostname,
|
||||
Image: config.Image,
|
||||
Labels: config.Labels,
|
||||
OnBuild: config.OnBuild,
|
||||
OpenStdin: config.OpenStdin,
|
||||
StdinOnce: config.StdinOnce,
|
||||
Tty: config.Tty,
|
||||
User: config.User,
|
||||
Volumes: config.Volumes,
|
||||
WorkingDir: config.WorkingDir,
|
||||
ArgsEscaped: config.ArgsEscaped,
|
||||
NetworkDisabled: config.NetworkDisabled,
|
||||
MacAddress: config.MacAddress,
|
||||
StopSignal: config.StopSignal,
|
||||
Shell: config.Shell,
|
||||
}
|
||||
|
||||
if config.Healthcheck != nil {
|
||||
c.Healthcheck = &v1.HealthConfig{
|
||||
Test: config.Healthcheck.Test,
|
||||
Interval: config.Healthcheck.Interval,
|
||||
Timeout: config.Healthcheck.Timeout,
|
||||
StartPeriod: config.Healthcheck.StartPeriod,
|
||||
Retries: config.Healthcheck.Retries,
|
||||
}
|
||||
}
|
||||
|
||||
if len(config.ExposedPorts) > 0 {
|
||||
c.ExposedPorts = map[string]struct{}{}
|
||||
for port := range c.ExposedPorts {
|
||||
c.ExposedPorts[port] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ import (
|
|||
"io"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
api "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
|
|
@ -100,4 +101,5 @@ type Client interface {
|
|||
ImageLoad(context.Context, io.Reader, bool) (types.ImageLoadResponse, error)
|
||||
ImageTag(context.Context, string, string) error
|
||||
ImageInspectWithRaw(context.Context, string) (types.ImageInspect, []byte, error)
|
||||
ImageHistory(context.Context, string) ([]api.HistoryResponseItem, error)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -60,5 +60,6 @@ func base() *v1.IndexManifest {
|
|||
return &v1.IndexManifest{
|
||||
SchemaVersion: 2,
|
||||
MediaType: types.OCIImageIndex,
|
||||
Manifests: []v1.Descriptor{},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -53,11 +53,7 @@ type googleKeychain struct {
|
|||
// gcloud configuration in the scope of this one process.
|
||||
func (gk *googleKeychain) Resolve(target authn.Resource) (authn.Authenticator, error) {
|
||||
// Only authenticate GCR and AR so it works with authn.NewMultiKeychain to fallback.
|
||||
host := target.RegistryStr()
|
||||
if host != "gcr.io" &&
|
||||
!strings.HasSuffix(host, ".gcr.io") &&
|
||||
!strings.HasSuffix(host, ".pkg.dev") &&
|
||||
!strings.HasSuffix(host, ".google.com") {
|
||||
if !isGoogle(target.RegistryStr()) {
|
||||
return authn.Anonymous, nil
|
||||
}
|
||||
|
||||
|
|
@ -90,3 +86,10 @@ func resolve() authn.Authenticator {
|
|||
}
|
||||
return authn.Anonymous
|
||||
}
|
||||
|
||||
func isGoogle(host string) bool {
|
||||
return host == "gcr.io" ||
|
||||
strings.HasSuffix(host, ".gcr.io") ||
|
||||
strings.HasSuffix(host, ".pkg.dev") ||
|
||||
strings.HasSuffix(host, ".google.com")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -89,12 +89,16 @@ func newLister(repo name.Repository, options ...Option) (*lister, error) {
|
|||
|
||||
func (l *lister) list(repo name.Repository) (*Tags, error) {
|
||||
uri := &url.URL{
|
||||
Scheme: repo.Registry.Scheme(),
|
||||
Host: repo.Registry.RegistryStr(),
|
||||
Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()),
|
||||
// ECR returns an error if n > 1000:
|
||||
// https://github.com/google/go-containerregistry/issues/681
|
||||
RawQuery: "n=1000",
|
||||
Scheme: repo.Registry.Scheme(),
|
||||
Host: repo.Registry.RegistryStr(),
|
||||
Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()),
|
||||
RawQuery: "n=10000",
|
||||
}
|
||||
|
||||
// ECR returns an error if n > 1000:
|
||||
// https://github.com/google/go-containerregistry/issues/681
|
||||
if !isGoogle(repo.RegistryStr()) {
|
||||
uri.RawQuery = "n=1000"
|
||||
}
|
||||
|
||||
tags := Tags{}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
|
|
@ -38,6 +39,8 @@ type image struct {
|
|||
diffIDMap map[v1.Hash]v1.Layer
|
||||
digestMap map[v1.Hash]v1.Layer
|
||||
subject *v1.Descriptor
|
||||
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
var _ v1.Image = (*image)(nil)
|
||||
|
|
@ -50,6 +53,9 @@ func (i *image) MediaType() (types.MediaType, error) {
|
|||
}
|
||||
|
||||
func (i *image) compute() error {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
|
||||
// Don't re-compute if already computed.
|
||||
if i.computed {
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -16,12 +16,15 @@ package mutate
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/match"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/stream"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
|
|
@ -71,6 +74,8 @@ type index struct {
|
|||
indexMap map[v1.Hash]v1.ImageIndex
|
||||
layerMap map[v1.Hash]v1.Layer
|
||||
subject *v1.Descriptor
|
||||
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
var _ v1.ImageIndex = (*index)(nil)
|
||||
|
|
@ -85,6 +90,9 @@ func (i *index) MediaType() (types.MediaType, error) {
|
|||
func (i *index) Size() (int64, error) { return partial.Size(i) }
|
||||
|
||||
func (i *index) compute() error {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
|
||||
// Don't re-compute if already computed.
|
||||
if i.computed {
|
||||
return nil
|
||||
|
|
@ -202,3 +210,23 @@ func (i *index) RawManifest() ([]byte, error) {
|
|||
}
|
||||
return json.Marshal(i.manifest)
|
||||
}
|
||||
|
||||
func (i *index) Manifests() ([]partial.Describable, error) {
|
||||
if err := i.compute(); errors.Is(err, stream.ErrNotComputed) {
|
||||
// Index contains a streamable layer which has not yet been
|
||||
// consumed. Just return the manifests we have in case the caller
|
||||
// is going to consume the streamable layers.
|
||||
manifests, err := partial.Manifests(i.base)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, add := range i.adds {
|
||||
manifests = append(manifests, add.Add)
|
||||
}
|
||||
return manifests, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return partial.ComputeManifests(i)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ import (
|
|||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/match"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// FindManifests given a v1.ImageIndex, find the manifests that fit the matcher.
|
||||
|
|
@ -83,3 +84,82 @@ func FindIndexes(index v1.ImageIndex, matcher match.Matcher) ([]v1.ImageIndex, e
|
|||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
type withManifests interface {
|
||||
Manifests() ([]Describable, error)
|
||||
}
|
||||
|
||||
type withLayer interface {
|
||||
Layer(v1.Hash) (v1.Layer, error)
|
||||
}
|
||||
|
||||
type describable struct {
|
||||
desc v1.Descriptor
|
||||
}
|
||||
|
||||
func (d describable) Digest() (v1.Hash, error) {
|
||||
return d.desc.Digest, nil
|
||||
}
|
||||
|
||||
func (d describable) Size() (int64, error) {
|
||||
return d.desc.Size, nil
|
||||
}
|
||||
|
||||
func (d describable) MediaType() (types.MediaType, error) {
|
||||
return d.desc.MediaType, nil
|
||||
}
|
||||
|
||||
func (d describable) Descriptor() (*v1.Descriptor, error) {
|
||||
return &d.desc, nil
|
||||
}
|
||||
|
||||
// Manifests is analogous to v1.Image.Layers in that it allows values in the
|
||||
// returned list to be lazily evaluated, which enables an index to contain
|
||||
// an image that contains a streaming layer.
|
||||
//
|
||||
// This should have been part of the v1.ImageIndex interface, but wasn't.
|
||||
// It is instead usable through this extension interface.
|
||||
func Manifests(idx v1.ImageIndex) ([]Describable, error) {
|
||||
if wm, ok := idx.(withManifests); ok {
|
||||
return wm.Manifests()
|
||||
}
|
||||
|
||||
return ComputeManifests(idx)
|
||||
}
|
||||
|
||||
// ComputeManifests provides a fallback implementation for Manifests.
|
||||
func ComputeManifests(idx v1.ImageIndex) ([]Describable, error) {
|
||||
m, err := idx.IndexManifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests := []Describable{}
|
||||
for _, desc := range m.Manifests {
|
||||
switch {
|
||||
case desc.MediaType.IsImage():
|
||||
img, err := idx.Image(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests = append(manifests, img)
|
||||
case desc.MediaType.IsIndex():
|
||||
idx, err := idx.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests = append(manifests, idx)
|
||||
default:
|
||||
if wl, ok := idx.(withLayer); ok {
|
||||
layer, err := wl.Layer(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests = append(manifests, layer)
|
||||
} else {
|
||||
manifests = append(manifests, describable{desc})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return manifests, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,12 +18,10 @@ import (
|
|||
"archive/tar"
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
mrand "math/rand"
|
||||
"time"
|
||||
"math/rand"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
|
|
@ -57,10 +55,10 @@ func (ul *uncompressedLayer) MediaType() (types.MediaType, error) {
|
|||
var _ partial.UncompressedLayer = (*uncompressedLayer)(nil)
|
||||
|
||||
// Image returns a pseudo-randomly generated Image.
|
||||
func Image(byteSize, layers int64) (v1.Image, error) {
|
||||
func Image(byteSize, layers int64, options ...Option) (v1.Image, error) {
|
||||
adds := make([]mutate.Addendum, 0, 5)
|
||||
for i := int64(0); i < layers; i++ {
|
||||
layer, err := Layer(byteSize, types.DockerLayer)
|
||||
layer, err := Layer(byteSize, types.DockerLayer, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -70,7 +68,6 @@ func Image(byteSize, layers int64) (v1.Image, error) {
|
|||
Author: "random.Image",
|
||||
Comment: fmt.Sprintf("this is a random history %d of %d", i, layers),
|
||||
CreatedBy: "random",
|
||||
Created: v1.Time{Time: time.Now()},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
|
@ -79,8 +76,11 @@ func Image(byteSize, layers int64) (v1.Image, error) {
|
|||
}
|
||||
|
||||
// Layer returns a layer with pseudo-randomly generated content.
|
||||
func Layer(byteSize int64, mt types.MediaType) (v1.Layer, error) {
|
||||
fileName := fmt.Sprintf("random_file_%d.txt", mrand.Int()) //nolint: gosec
|
||||
func Layer(byteSize int64, mt types.MediaType, options ...Option) (v1.Layer, error) {
|
||||
o := getOptions(options)
|
||||
rng := rand.New(o.source) //nolint:gosec
|
||||
|
||||
fileName := fmt.Sprintf("random_file_%d.txt", rng.Int())
|
||||
|
||||
// Hash the contents as we write it out to the buffer.
|
||||
var b bytes.Buffer
|
||||
|
|
@ -96,7 +96,7 @@ func Layer(byteSize int64, mt types.MediaType) (v1.Layer, error) {
|
|||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := io.CopyN(tw, rand.Reader, byteSize); err != nil {
|
||||
if _, err := io.CopyN(tw, rng, byteSize); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ type randomIndex struct {
|
|||
|
||||
// Index returns a pseudo-randomly generated ImageIndex with count images, each
|
||||
// having the given number of layers of size byteSize.
|
||||
func Index(byteSize, layers, count int64) (v1.ImageIndex, error) {
|
||||
func Index(byteSize, layers, count int64, options ...Option) (v1.ImageIndex, error) {
|
||||
manifest := v1.IndexManifest{
|
||||
SchemaVersion: 2,
|
||||
MediaType: types.OCIImageIndex,
|
||||
|
|
@ -40,7 +40,7 @@ func Index(byteSize, layers, count int64) (v1.ImageIndex, error) {
|
|||
|
||||
images := make(map[v1.Hash]v1.Image)
|
||||
for i := int64(0); i < count; i++ {
|
||||
img, err := Image(byteSize, layers)
|
||||
img, err := Image(byteSize, layers, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
60
vendor/github.com/google/go-containerregistry/pkg/v1/random/options.go
generated
vendored
Normal file
60
vendor/github.com/google/go-containerregistry/pkg/v1/random/options.go
generated
vendored
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package random
|
||||
|
||||
import "math/rand"
|
||||
|
||||
// Option is an optional parameter to the random functions
|
||||
type Option func(opts *options)
|
||||
|
||||
type options struct {
|
||||
source rand.Source
|
||||
|
||||
// TODO opens the door to add this in the future
|
||||
// algorithm digest.Algorithm
|
||||
}
|
||||
|
||||
func getOptions(opts []Option) *options {
|
||||
// get a random seed
|
||||
|
||||
// TODO in go 1.20 this is fine (it will be random)
|
||||
seed := rand.Int63() //nolint:gosec
|
||||
/*
|
||||
// in prior go versions this needs to come from crypto/rand
|
||||
var b [8]byte
|
||||
_, err := crypto_rand.Read(b[:])
|
||||
if err != nil {
|
||||
panic("cryptographically secure random number generator is not working")
|
||||
}
|
||||
seed := int64(binary.LittleEndian.Int64(b[:]))
|
||||
*/
|
||||
|
||||
// defaults
|
||||
o := &options{
|
||||
source: rand.NewSource(seed),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// WithSource sets the random number generator source
|
||||
func WithSource(source rand.Source) Option {
|
||||
return func(opts *options) {
|
||||
opts.source = source
|
||||
}
|
||||
}
|
||||
|
|
@ -25,38 +25,35 @@ import (
|
|||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
type catalog struct {
|
||||
type Catalogs struct {
|
||||
Repos []string `json:"repositories"`
|
||||
Next string `json:"next,omitempty"`
|
||||
}
|
||||
|
||||
// CatalogPage calls /_catalog, returning the list of repositories on the registry.
|
||||
func CatalogPage(target name.Registry, last string, n int, options ...Option) ([]string, error) {
|
||||
o, err := makeOptions(target, options...)
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scopes := []string{target.Scope(transport.PullScope)}
|
||||
tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes)
|
||||
f, err := newPuller(o).fetcher(o.context, target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := fmt.Sprintf("last=%s&n=%d", url.QueryEscape(last), n)
|
||||
|
||||
uri := url.URL{
|
||||
Scheme: target.Scheme(),
|
||||
Host: target.RegistryStr(),
|
||||
Path: "/v2/_catalog",
|
||||
RawQuery: query,
|
||||
RawQuery: fmt.Sprintf("last=%s&n=%d", url.QueryEscape(last), n),
|
||||
}
|
||||
|
||||
client := http.Client{Transport: tr}
|
||||
req, err := http.NewRequest(http.MethodGet, uri.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Do(req.WithContext(o.context))
|
||||
resp, err := f.client.Do(req.WithContext(o.context))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -66,7 +63,7 @@ func CatalogPage(target name.Registry, last string, n int, options ...Option) ([
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var parsed catalog
|
||||
var parsed Catalogs
|
||||
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -76,79 +73,87 @@ func CatalogPage(target name.Registry, last string, n int, options ...Option) ([
|
|||
|
||||
// Catalog calls /_catalog, returning the list of repositories on the registry.
|
||||
func Catalog(ctx context.Context, target name.Registry, options ...Option) ([]string, error) {
|
||||
o, err := makeOptions(target, options...)
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scopes := []string{target.Scope(transport.PullScope)}
|
||||
tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uri := &url.URL{
|
||||
Scheme: target.Scheme(),
|
||||
Host: target.RegistryStr(),
|
||||
Path: "/v2/_catalog",
|
||||
}
|
||||
|
||||
if o.pageSize > 0 {
|
||||
uri.RawQuery = fmt.Sprintf("n=%d", o.pageSize)
|
||||
}
|
||||
|
||||
client := http.Client{Transport: tr}
|
||||
|
||||
// WithContext overrides the ctx passed directly.
|
||||
if o.context != context.Background() {
|
||||
ctx = o.context
|
||||
}
|
||||
|
||||
var (
|
||||
parsed catalog
|
||||
repoList []string
|
||||
)
|
||||
|
||||
// get responses until there is no next page
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", uri.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repoList = append(repoList, parsed.Repos...)
|
||||
|
||||
uri, err = getNextPageURL(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// no next page
|
||||
if uri == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return repoList, nil
|
||||
return newPuller(o).catalog(ctx, target, o.pageSize)
|
||||
}
|
||||
|
||||
func (f *fetcher) catalogPage(ctx context.Context, reg name.Registry, next string, pageSize int) (*Catalogs, error) {
|
||||
if next == "" {
|
||||
uri := &url.URL{
|
||||
Scheme: reg.Scheme(),
|
||||
Host: reg.RegistryStr(),
|
||||
Path: "/v2/_catalog",
|
||||
}
|
||||
if pageSize > 0 {
|
||||
uri.RawQuery = fmt.Sprintf("n=%d", pageSize)
|
||||
}
|
||||
next = uri.String()
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", next, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parsed := Catalogs{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uri, err := getNextPageURL(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uri != nil {
|
||||
parsed.Next = uri.String()
|
||||
}
|
||||
|
||||
return &parsed, nil
|
||||
}
|
||||
|
||||
type Catalogger struct {
|
||||
f *fetcher
|
||||
reg name.Registry
|
||||
pageSize int
|
||||
|
||||
page *Catalogs
|
||||
err error
|
||||
|
||||
needMore bool
|
||||
}
|
||||
|
||||
func (l *Catalogger) Next(ctx context.Context) (*Catalogs, error) {
|
||||
if l.needMore {
|
||||
l.page, l.err = l.f.catalogPage(ctx, l.reg, l.page.Next, l.pageSize)
|
||||
} else {
|
||||
l.needMore = true
|
||||
}
|
||||
return l.page, l.err
|
||||
}
|
||||
|
||||
func (l *Catalogger) HasNext() bool {
|
||||
return l.page != nil && (!l.needMore || l.page.Next != "")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,47 +15,14 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
// Delete removes the specified image reference from the remote registry.
|
||||
func Delete(ref name.Reference, options ...Option) error {
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scopes := []string{ref.Scope(transport.DeleteScope)}
|
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c := &http.Client{Transport: tr}
|
||||
|
||||
u := url.URL{
|
||||
Scheme: ref.Context().Registry.Scheme(),
|
||||
Host: ref.Context().RegistryStr(),
|
||||
Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()),
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodDelete, u.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := c.Do(req.WithContext(o.context))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return transport.CheckError(resp, http.StatusOK, http.StatusAccepted)
|
||||
|
||||
// TODO(jason): If the manifest had a `subject`, and if the registry
|
||||
// doesn't support Referrers, update the index pointed to by the
|
||||
// subject's fallback tag to remove the descriptor for this manifest.
|
||||
return newPusher(o).Delete(o.context, ref)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,26 +15,21 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/redact"
|
||||
"github.com/google/go-containerregistry/internal/verify"
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
var allManifestMediaTypes = append(append([]types.MediaType{
|
||||
types.DockerManifestSchema1,
|
||||
types.DockerManifestSchema1Signed,
|
||||
}, acceptableImageMediaTypes...), acceptableIndexMediaTypes...)
|
||||
|
||||
// ErrSchema1 indicates that we received a schema1 manifest from the registry.
|
||||
// This library doesn't have plans to support this legacy image format:
|
||||
// https://github.com/google/go-containerregistry/issues/377
|
||||
|
|
@ -57,14 +52,21 @@ func (e *ErrSchema1) Error() string {
|
|||
// Descriptor provides access to metadata about remote artifact and accessors
|
||||
// for efficiently converting it into a v1.Image or v1.ImageIndex.
|
||||
type Descriptor struct {
|
||||
fetcher
|
||||
fetcher fetcher
|
||||
v1.Descriptor
|
||||
|
||||
ref name.Reference
|
||||
Manifest []byte
|
||||
ctx context.Context
|
||||
|
||||
// So we can share this implementation with Image.
|
||||
platform v1.Platform
|
||||
}
|
||||
|
||||
func (d *Descriptor) toDesc() v1.Descriptor {
|
||||
return d.Descriptor
|
||||
}
|
||||
|
||||
// RawManifest exists to satisfy the Taggable interface.
|
||||
func (d *Descriptor) RawManifest() ([]byte, error) {
|
||||
return d.Manifest, nil
|
||||
|
|
@ -76,14 +78,7 @@ func (d *Descriptor) RawManifest() ([]byte, error) {
|
|||
//
|
||||
// See Head if you don't need the response body.
|
||||
func Get(ref name.Reference, options ...Option) (*Descriptor, error) {
|
||||
acceptable := []types.MediaType{
|
||||
// Just to look at them.
|
||||
types.DockerManifestSchema1,
|
||||
types.DockerManifestSchema1Signed,
|
||||
}
|
||||
acceptable = append(acceptable, acceptableImageMediaTypes...)
|
||||
acceptable = append(acceptable, acceptableIndexMediaTypes...)
|
||||
return get(ref, acceptable, options...)
|
||||
return get(ref, allManifestMediaTypes, options...)
|
||||
}
|
||||
|
||||
// Head returns a v1.Descriptor for the given reference by issuing a HEAD
|
||||
|
|
@ -92,48 +87,22 @@ func Get(ref name.Reference, options ...Option) (*Descriptor, error) {
|
|||
// Note that the server response will not have a body, so any errors encountered
|
||||
// should be retried with Get to get more details.
|
||||
func Head(ref name.Reference, options ...Option) (*v1.Descriptor, error) {
|
||||
acceptable := []types.MediaType{
|
||||
// Just to look at them.
|
||||
types.DockerManifestSchema1,
|
||||
types.DockerManifestSchema1Signed,
|
||||
}
|
||||
acceptable = append(acceptable, acceptableImageMediaTypes...)
|
||||
acceptable = append(acceptable, acceptableIndexMediaTypes...)
|
||||
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err := makeFetcher(ref, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.headManifest(ref, acceptable)
|
||||
return newPuller(o).Head(o.context, ref)
|
||||
}
|
||||
|
||||
// Handle options and fetch the manifest with the acceptable MediaTypes in the
|
||||
// Accept header.
|
||||
func get(ref name.Reference, acceptable []types.MediaType, options ...Option) (*Descriptor, error) {
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := makeFetcher(ref, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, desc, err := f.fetchManifest(ref, acceptable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Descriptor{
|
||||
fetcher: *f,
|
||||
Manifest: b,
|
||||
Descriptor: *desc,
|
||||
platform: o.platform,
|
||||
}, nil
|
||||
return newPuller(o).get(o.context, ref, acceptable, o.platform)
|
||||
}
|
||||
|
||||
// Image converts the Descriptor into a v1.Image.
|
||||
|
|
@ -169,7 +138,28 @@ func (d *Descriptor) Image() (v1.Image, error) {
|
|||
}
|
||||
return &mountableImage{
|
||||
Image: imgCore,
|
||||
Reference: d.Ref,
|
||||
Reference: d.ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Schema1 converts the Descriptor into a v1.Image for v2 schema 1 media types.
|
||||
//
|
||||
// The v1.Image returned by this method does not implement the entire interface because it would be inefficient.
|
||||
// This exists mostly to make it easier to copy schema 1 images around or look at their filesystems.
|
||||
// This is separate from Image() to avoid a backward incompatible change for callers expecting ErrSchema1.
|
||||
func (d *Descriptor) Schema1() (v1.Image, error) {
|
||||
i := &schema1{
|
||||
ref: d.ref,
|
||||
fetcher: d.fetcher,
|
||||
ctx: d.ctx,
|
||||
manifest: d.Manifest,
|
||||
mediaType: d.MediaType,
|
||||
descriptor: &d.Descriptor,
|
||||
}
|
||||
|
||||
return &mountableImage{
|
||||
Image: i,
|
||||
Reference: d.ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -195,6 +185,8 @@ func (d *Descriptor) ImageIndex() (v1.ImageIndex, error) {
|
|||
|
||||
func (d *Descriptor) remoteImage() *remoteImage {
|
||||
return &remoteImage{
|
||||
ref: d.ref,
|
||||
ctx: d.ctx,
|
||||
fetcher: d.fetcher,
|
||||
manifest: d.Manifest,
|
||||
mediaType: d.MediaType,
|
||||
|
|
@ -204,308 +196,11 @@ func (d *Descriptor) remoteImage() *remoteImage {
|
|||
|
||||
func (d *Descriptor) remoteIndex() *remoteIndex {
|
||||
return &remoteIndex{
|
||||
ref: d.ref,
|
||||
ctx: d.ctx,
|
||||
fetcher: d.fetcher,
|
||||
manifest: d.Manifest,
|
||||
mediaType: d.MediaType,
|
||||
descriptor: &d.Descriptor,
|
||||
}
|
||||
}
|
||||
|
||||
// fetcher implements methods for reading from a registry.
|
||||
type fetcher struct {
|
||||
Ref name.Reference
|
||||
Client *http.Client
|
||||
context context.Context
|
||||
}
|
||||
|
||||
func makeFetcher(ref name.Reference, o *options) (*fetcher, error) {
|
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, []string{ref.Scope(transport.PullScope)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fetcher{
|
||||
Ref: ref,
|
||||
Client: &http.Client{Transport: tr},
|
||||
context: o.context,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// url returns a url.Url for the specified path in the context of this remote image reference.
|
||||
func (f *fetcher) url(resource, identifier string) url.URL {
|
||||
return url.URL{
|
||||
Scheme: f.Ref.Context().Registry.Scheme(),
|
||||
Host: f.Ref.Context().RegistryStr(),
|
||||
Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier),
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#referrers-tag-schema
|
||||
func fallbackTag(d name.Digest) name.Tag {
|
||||
return d.Context().Tag(strings.Replace(d.DigestStr(), ":", "-", 1))
|
||||
}
|
||||
|
||||
func (f *fetcher) fetchReferrers(ctx context.Context, filter map[string]string, d name.Digest) (*v1.IndexManifest, error) {
|
||||
// Check the Referrers API endpoint first.
|
||||
u := f.url("referrers", d.DigestStr())
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", string(types.OCIImageIndex))
|
||||
|
||||
resp, err := f.Client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
var im v1.IndexManifest
|
||||
if err := json.NewDecoder(resp.Body).Decode(&im); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return filterReferrersResponse(filter, &im), nil
|
||||
}
|
||||
|
||||
// The registry doesn't support the Referrers API endpoint, so we'll use the fallback tag scheme.
|
||||
b, _, err := f.fetchManifest(fallbackTag(d), []types.MediaType{types.OCIImageIndex})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var terr *transport.Error
|
||||
if ok := errors.As(err, &terr); ok && terr.StatusCode == http.StatusNotFound {
|
||||
// Not found just means there are no attachments yet. Start with an empty manifest.
|
||||
return &v1.IndexManifest{MediaType: types.OCIImageIndex}, nil
|
||||
}
|
||||
|
||||
var im v1.IndexManifest
|
||||
if err := json.Unmarshal(b, &im); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return filterReferrersResponse(filter, &im), nil
|
||||
}
|
||||
|
||||
func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) {
|
||||
u := f.url("manifests", ref.Identifier())
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
accept := []string{}
|
||||
for _, mt := range acceptable {
|
||||
accept = append(accept, string(mt))
|
||||
}
|
||||
req.Header.Set("Accept", strings.Join(accept, ","))
|
||||
|
||||
resp, err := f.Client.Do(req.WithContext(f.context))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
manifest, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
digest, size, err := v1.SHA256(bytes.NewReader(manifest))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
mediaType := types.MediaType(resp.Header.Get("Content-Type"))
|
||||
contentDigest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest"))
|
||||
if err == nil && mediaType == types.DockerManifestSchema1Signed {
|
||||
// If we can parse the digest from the header, and it's a signed schema 1
|
||||
// manifest, let's use that for the digest to appease older registries.
|
||||
digest = contentDigest
|
||||
}
|
||||
|
||||
// Validate the digest matches what we asked for, if pulling by digest.
|
||||
if dgst, ok := ref.(name.Digest); ok {
|
||||
if digest.String() != dgst.DigestStr() {
|
||||
return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
var artifactType string
|
||||
mf, _ := v1.ParseManifest(bytes.NewReader(manifest))
|
||||
// Failing to parse as a manifest should just be ignored.
|
||||
// The manifest might not be valid, and that's okay.
|
||||
if mf != nil && !mf.Config.MediaType.IsConfig() {
|
||||
artifactType = string(mf.Config.MediaType)
|
||||
}
|
||||
|
||||
// Do nothing for tags; I give up.
|
||||
//
|
||||
// We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry,
|
||||
// but so many registries implement this incorrectly that it's not worth checking.
|
||||
//
|
||||
// For reference:
|
||||
// https://github.com/GoogleContainerTools/kaniko/issues/298
|
||||
|
||||
// Return all this info since we have to calculate it anyway.
|
||||
desc := v1.Descriptor{
|
||||
Digest: digest,
|
||||
Size: size,
|
||||
MediaType: mediaType,
|
||||
ArtifactType: artifactType,
|
||||
}
|
||||
|
||||
return manifest, &desc, nil
|
||||
}
|
||||
|
||||
func (f *fetcher) headManifest(ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) {
|
||||
u := f.url("manifests", ref.Identifier())
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accept := []string{}
|
||||
for _, mt := range acceptable {
|
||||
accept = append(accept, string(mt))
|
||||
}
|
||||
req.Header.Set("Accept", strings.Join(accept, ","))
|
||||
|
||||
resp, err := f.Client.Do(req.WithContext(f.context))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mth := resp.Header.Get("Content-Type")
|
||||
if mth == "" {
|
||||
return nil, fmt.Errorf("HEAD %s: response did not include Content-Type header", u.String())
|
||||
}
|
||||
mediaType := types.MediaType(mth)
|
||||
|
||||
size := resp.ContentLength
|
||||
if size == -1 {
|
||||
return nil, fmt.Errorf("GET %s: response did not include Content-Length header", u.String())
|
||||
}
|
||||
|
||||
dh := resp.Header.Get("Docker-Content-Digest")
|
||||
if dh == "" {
|
||||
return nil, fmt.Errorf("HEAD %s: response did not include Docker-Content-Digest header", u.String())
|
||||
}
|
||||
digest, err := v1.NewHash(dh)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate the digest matches what we asked for, if pulling by digest.
|
||||
if dgst, ok := ref.(name.Digest); ok {
|
||||
if digest.String() != dgst.DigestStr() {
|
||||
return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
// Return all this info since we have to calculate it anyway.
|
||||
return &v1.Descriptor{
|
||||
Digest: digest,
|
||||
Size: size,
|
||||
MediaType: mediaType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fetcher) fetchBlob(ctx context.Context, size int64, h v1.Hash) (io.ReadCloser, error) {
|
||||
u := f.url("blobs", h.String())
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := f.Client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, redact.Error(err)
|
||||
}
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do whatever we can.
|
||||
// If we have an expected size and Content-Length doesn't match, return an error.
|
||||
// If we don't have an expected size and we do have a Content-Length, use Content-Length.
|
||||
if hsize := resp.ContentLength; hsize != -1 {
|
||||
if size == verify.SizeUnknown {
|
||||
size = hsize
|
||||
} else if hsize != size {
|
||||
return nil, fmt.Errorf("GET %s: Content-Length header %d does not match expected size %d", u.String(), hsize, size)
|
||||
}
|
||||
}
|
||||
|
||||
return verify.ReadCloser(resp.Body, size, h)
|
||||
}
|
||||
|
||||
func (f *fetcher) headBlob(h v1.Hash) (*http.Response, error) {
|
||||
u := f.url("blobs", h.String())
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := f.Client.Do(req.WithContext(f.context))
|
||||
if err != nil {
|
||||
return nil, redact.Error(err)
|
||||
}
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (f *fetcher) blobExists(h v1.Hash) (bool, error) {
|
||||
u := f.url("blobs", h.String())
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
resp, err := f.Client.Do(req.WithContext(f.context))
|
||||
if err != nil {
|
||||
return false, redact.Error(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return resp.StatusCode == http.StatusOK, nil
|
||||
}
|
||||
|
||||
// If filter applied, filter out by artifactType.
|
||||
// See https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers
|
||||
func filterReferrersResponse(filter map[string]string, origIndex *v1.IndexManifest) *v1.IndexManifest {
|
||||
newIndex := origIndex
|
||||
if filter == nil {
|
||||
return newIndex
|
||||
}
|
||||
if v, ok := filter["artifactType"]; ok {
|
||||
tmp := []v1.Descriptor{}
|
||||
for _, desc := range newIndex.Manifests {
|
||||
if desc.ArtifactType == v {
|
||||
tmp = append(tmp, desc)
|
||||
}
|
||||
}
|
||||
newIndex.Manifests = tmp
|
||||
}
|
||||
return newIndex
|
||||
}
|
||||
|
|
|
|||
318
vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go
generated
vendored
Normal file
318
vendor/github.com/google/go-containerregistry/pkg/v1/remote/fetcher.go
generated
vendored
Normal file
|
|
@ -0,0 +1,318 @@
|
|||
// Copyright 2023 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/redact"
|
||||
"github.com/google/go-containerregistry/internal/verify"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
func fetcherFromWriter(w *writer) *fetcher {
|
||||
return &fetcher{
|
||||
target: w.repo,
|
||||
client: w.client,
|
||||
}
|
||||
}
|
||||
|
||||
// fetcher implements methods for reading from a registry.
|
||||
type fetcher struct {
|
||||
target resource
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func makeFetcher(ctx context.Context, target resource, o *options) (*fetcher, error) {
|
||||
auth := o.auth
|
||||
if o.keychain != nil {
|
||||
kauth, err := o.keychain.Resolve(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth = kauth
|
||||
}
|
||||
|
||||
reg, ok := target.(name.Registry)
|
||||
if !ok {
|
||||
repo, ok := target.(name.Repository)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected resource: %T", target)
|
||||
}
|
||||
reg = repo.Registry
|
||||
}
|
||||
|
||||
tr, err := transport.NewWithContext(ctx, reg, auth, o.transport, []string{target.Scope(transport.PullScope)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fetcher{
|
||||
target: target,
|
||||
client: &http.Client{Transport: tr},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fetcher) Do(req *http.Request) (*http.Response, error) {
|
||||
return f.client.Do(req)
|
||||
}
|
||||
|
||||
type resource interface {
|
||||
Scheme() string
|
||||
RegistryStr() string
|
||||
Scope(string) string
|
||||
|
||||
authn.Resource
|
||||
}
|
||||
|
||||
// url returns a url.Url for the specified path in the context of this remote image reference.
|
||||
func (f *fetcher) url(resource, identifier string) url.URL {
|
||||
u := url.URL{
|
||||
Scheme: f.target.Scheme(),
|
||||
Host: f.target.RegistryStr(),
|
||||
// Default path if this is not a repository.
|
||||
Path: "/v2/_catalog",
|
||||
}
|
||||
if repo, ok := f.target.(name.Repository); ok {
|
||||
u.Path = fmt.Sprintf("/v2/%s/%s/%s", repo.RepositoryStr(), resource, identifier)
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func (f *fetcher) get(ctx context.Context, ref name.Reference, acceptable []types.MediaType, platform v1.Platform) (*Descriptor, error) {
|
||||
b, desc, err := f.fetchManifest(ctx, ref, acceptable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Descriptor{
|
||||
ref: ref,
|
||||
ctx: ctx,
|
||||
fetcher: *f,
|
||||
Manifest: b,
|
||||
Descriptor: *desc,
|
||||
platform: platform,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fetcher) fetchManifest(ctx context.Context, ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) {
|
||||
u := f.url("manifests", ref.Identifier())
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
accept := []string{}
|
||||
for _, mt := range acceptable {
|
||||
accept = append(accept, string(mt))
|
||||
}
|
||||
req.Header.Set("Accept", strings.Join(accept, ","))
|
||||
|
||||
resp, err := f.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
manifest, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
digest, size, err := v1.SHA256(bytes.NewReader(manifest))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
mediaType := types.MediaType(resp.Header.Get("Content-Type"))
|
||||
contentDigest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest"))
|
||||
if err == nil && mediaType == types.DockerManifestSchema1Signed {
|
||||
// If we can parse the digest from the header, and it's a signed schema 1
|
||||
// manifest, let's use that for the digest to appease older registries.
|
||||
digest = contentDigest
|
||||
}
|
||||
|
||||
// Validate the digest matches what we asked for, if pulling by digest.
|
||||
if dgst, ok := ref.(name.Digest); ok {
|
||||
if digest.String() != dgst.DigestStr() {
|
||||
return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), ref)
|
||||
}
|
||||
}
|
||||
|
||||
var artifactType string
|
||||
mf, _ := v1.ParseManifest(bytes.NewReader(manifest))
|
||||
// Failing to parse as a manifest should just be ignored.
|
||||
// The manifest might not be valid, and that's okay.
|
||||
if mf != nil && !mf.Config.MediaType.IsConfig() {
|
||||
artifactType = string(mf.Config.MediaType)
|
||||
}
|
||||
|
||||
// Do nothing for tags; I give up.
|
||||
//
|
||||
// We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry,
|
||||
// but so many registries implement this incorrectly that it's not worth checking.
|
||||
//
|
||||
// For reference:
|
||||
// https://github.com/GoogleContainerTools/kaniko/issues/298
|
||||
|
||||
// Return all this info since we have to calculate it anyway.
|
||||
desc := v1.Descriptor{
|
||||
Digest: digest,
|
||||
Size: size,
|
||||
MediaType: mediaType,
|
||||
ArtifactType: artifactType,
|
||||
}
|
||||
|
||||
return manifest, &desc, nil
|
||||
}
|
||||
|
||||
func (f *fetcher) headManifest(ctx context.Context, ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) {
|
||||
u := f.url("manifests", ref.Identifier())
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accept := []string{}
|
||||
for _, mt := range acceptable {
|
||||
accept = append(accept, string(mt))
|
||||
}
|
||||
req.Header.Set("Accept", strings.Join(accept, ","))
|
||||
|
||||
resp, err := f.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mth := resp.Header.Get("Content-Type")
|
||||
if mth == "" {
|
||||
return nil, fmt.Errorf("HEAD %s: response did not include Content-Type header", u.String())
|
||||
}
|
||||
mediaType := types.MediaType(mth)
|
||||
|
||||
size := resp.ContentLength
|
||||
if size == -1 {
|
||||
return nil, fmt.Errorf("GET %s: response did not include Content-Length header", u.String())
|
||||
}
|
||||
|
||||
dh := resp.Header.Get("Docker-Content-Digest")
|
||||
if dh == "" {
|
||||
return nil, fmt.Errorf("HEAD %s: response did not include Docker-Content-Digest header", u.String())
|
||||
}
|
||||
digest, err := v1.NewHash(dh)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate the digest matches what we asked for, if pulling by digest.
|
||||
if dgst, ok := ref.(name.Digest); ok {
|
||||
if digest.String() != dgst.DigestStr() {
|
||||
return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), ref)
|
||||
}
|
||||
}
|
||||
|
||||
// Return all this info since we have to calculate it anyway.
|
||||
return &v1.Descriptor{
|
||||
Digest: digest,
|
||||
Size: size,
|
||||
MediaType: mediaType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fetcher) fetchBlob(ctx context.Context, size int64, h v1.Hash) (io.ReadCloser, error) {
|
||||
u := f.url("blobs", h.String())
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, redact.Error(err)
|
||||
}
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do whatever we can.
|
||||
// If we have an expected size and Content-Length doesn't match, return an error.
|
||||
// If we don't have an expected size and we do have a Content-Length, use Content-Length.
|
||||
if hsize := resp.ContentLength; hsize != -1 {
|
||||
if size == verify.SizeUnknown {
|
||||
size = hsize
|
||||
} else if hsize != size {
|
||||
return nil, fmt.Errorf("GET %s: Content-Length header %d does not match expected size %d", u.String(), hsize, size)
|
||||
}
|
||||
}
|
||||
|
||||
return verify.ReadCloser(resp.Body, size, h)
|
||||
}
|
||||
|
||||
func (f *fetcher) headBlob(ctx context.Context, h v1.Hash) (*http.Response, error) {
|
||||
u := f.url("blobs", h.String())
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, redact.Error(err)
|
||||
}
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (f *fetcher) blobExists(ctx context.Context, h v1.Hash) (bool, error) {
|
||||
u := f.url("blobs", h.String())
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
resp, err := f.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return false, redact.Error(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return resp.StatusCode == http.StatusOK, nil
|
||||
}
|
||||
|
|
@ -16,6 +16,7 @@ package remote
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
@ -37,7 +38,9 @@ var acceptableImageMediaTypes = []types.MediaType{
|
|||
|
||||
// remoteImage accesses an image from a remote registry
|
||||
type remoteImage struct {
|
||||
fetcher
|
||||
fetcher fetcher
|
||||
ref name.Reference
|
||||
ctx context.Context
|
||||
manifestLock sync.Mutex // Protects manifest
|
||||
manifest []byte
|
||||
configLock sync.Mutex // Protects config
|
||||
|
|
@ -84,7 +87,7 @@ func (r *remoteImage) RawManifest() ([]byte, error) {
|
|||
// NOTE(jonjohnsonjr): We should never get here because the public entrypoints
|
||||
// do type-checking via remote.Descriptor. I've left this here for tests that
|
||||
// directly instantiate a remoteImage.
|
||||
manifest, desc, err := r.fetchManifest(r.Ref, acceptableImageMediaTypes)
|
||||
manifest, desc, err := r.fetcher.fetchManifest(r.ctx, r.ref, acceptableImageMediaTypes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -117,7 +120,7 @@ func (r *remoteImage) RawConfigFile() ([]byte, error) {
|
|||
return r.config, nil
|
||||
}
|
||||
|
||||
body, err := r.fetchBlob(r.context, m.Config.Size, m.Config.Digest)
|
||||
body, err := r.fetcher.fetchBlob(r.ctx, m.Config.Size, m.Config.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -139,9 +142,26 @@ func (r *remoteImage) Descriptor() (*v1.Descriptor, error) {
|
|||
return r.descriptor, err
|
||||
}
|
||||
|
||||
func (r *remoteImage) ConfigLayer() (v1.Layer, error) {
|
||||
if _, err := r.RawManifest(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m, err := partial.Manifest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return partial.CompressedToLayer(&remoteImageLayer{
|
||||
ri: r,
|
||||
ctx: r.ctx,
|
||||
digest: m.Config.Digest,
|
||||
})
|
||||
}
|
||||
|
||||
// remoteImageLayer implements partial.CompressedLayer
|
||||
type remoteImageLayer struct {
|
||||
ri *remoteImage
|
||||
ctx context.Context
|
||||
digest v1.Hash
|
||||
}
|
||||
|
||||
|
|
@ -152,7 +172,7 @@ func (rl *remoteImageLayer) Digest() (v1.Hash, error) {
|
|||
|
||||
// Compressed implements partial.CompressedLayer
|
||||
func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) {
|
||||
urls := []url.URL{rl.ri.url("blobs", rl.digest.String())}
|
||||
urls := []url.URL{rl.ri.fetcher.url("blobs", rl.digest.String())}
|
||||
|
||||
// Add alternative layer sources from URLs (usually none).
|
||||
d, err := partial.BlobDescriptor(rl, rl.digest)
|
||||
|
|
@ -165,7 +185,7 @@ func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) {
|
|||
}
|
||||
|
||||
// We don't want to log binary layers -- this can break terminals.
|
||||
ctx := redact.NewContext(rl.ri.context, "omitting binary blobs from logs")
|
||||
ctx := redact.NewContext(rl.ctx, "omitting binary blobs from logs")
|
||||
|
||||
for _, s := range d.URLs {
|
||||
u, err := url.Parse(s)
|
||||
|
|
@ -186,7 +206,7 @@ func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := rl.ri.Client.Do(req.WithContext(ctx))
|
||||
resp, err := rl.ri.fetcher.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
|
|
@ -244,13 +264,14 @@ func (rl *remoteImageLayer) Descriptor() (*v1.Descriptor, error) {
|
|||
|
||||
// See partial.Exists.
|
||||
func (rl *remoteImageLayer) Exists() (bool, error) {
|
||||
return rl.ri.blobExists(rl.digest)
|
||||
return rl.ri.fetcher.blobExists(rl.ri.ctx, rl.digest)
|
||||
}
|
||||
|
||||
// LayerByDigest implements partial.CompressedLayer
|
||||
func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) {
|
||||
return &remoteImageLayer{
|
||||
ri: r,
|
||||
ctx: r.ctx,
|
||||
digest: h,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ package remote
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
|
|
@ -33,7 +34,9 @@ var acceptableIndexMediaTypes = []types.MediaType{
|
|||
|
||||
// remoteIndex accesses an index from a remote registry
|
||||
type remoteIndex struct {
|
||||
fetcher
|
||||
fetcher fetcher
|
||||
ref name.Reference
|
||||
ctx context.Context
|
||||
manifestLock sync.Mutex // Protects manifest
|
||||
manifest []byte
|
||||
mediaType types.MediaType
|
||||
|
|
@ -75,7 +78,7 @@ func (r *remoteIndex) RawManifest() ([]byte, error) {
|
|||
// NOTE(jonjohnsonjr): We should never get here because the public entrypoints
|
||||
// do type-checking via remote.Descriptor. I've left this here for tests that
|
||||
// directly instantiate a remoteIndex.
|
||||
manifest, desc, err := r.fetchManifest(r.Ref, acceptableIndexMediaTypes)
|
||||
manifest, desc, err := r.fetcher.fetchManifest(r.ctx, r.ref, acceptableIndexMediaTypes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -133,6 +136,7 @@ func (r *remoteIndex) Layer(h v1.Hash) (v1.Layer, error) {
|
|||
if h == childDesc.Digest {
|
||||
l, err := partial.CompressedToLayer(&remoteLayer{
|
||||
fetcher: r.fetcher,
|
||||
ctx: r.ctx,
|
||||
digest: h,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -140,47 +144,13 @@ func (r *remoteIndex) Layer(h v1.Hash) (v1.Layer, error) {
|
|||
}
|
||||
return &MountableLayer{
|
||||
Layer: l,
|
||||
Reference: r.Ref.Context().Digest(h.String()),
|
||||
Reference: r.ref.Context().Digest(h.String()),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("layer not found: %s", h)
|
||||
}
|
||||
|
||||
// Experiment with a better API for v1.ImageIndex. We might want to move this
|
||||
// to partial?
|
||||
func (r *remoteIndex) Manifests() ([]partial.Describable, error) {
|
||||
m, err := r.IndexManifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests := []partial.Describable{}
|
||||
for _, desc := range m.Manifests {
|
||||
switch {
|
||||
case desc.MediaType.IsImage():
|
||||
img, err := r.Image(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests = append(manifests, img)
|
||||
case desc.MediaType.IsIndex():
|
||||
idx, err := r.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests = append(manifests, idx)
|
||||
default:
|
||||
layer, err := r.Layer(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifests = append(manifests, layer)
|
||||
}
|
||||
}
|
||||
|
||||
return manifests, nil
|
||||
}
|
||||
|
||||
func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) {
|
||||
desc, err := r.childByPlatform(platform)
|
||||
if err != nil {
|
||||
|
|
@ -216,7 +186,7 @@ func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error)
|
|||
return r.childDescriptor(childDesc, platform)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no child with platform %+v in index %s", platform, r.Ref)
|
||||
return nil, fmt.Errorf("no child with platform %+v in index %s", platform, r.ref)
|
||||
}
|
||||
|
||||
func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) {
|
||||
|
|
@ -229,12 +199,12 @@ func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) {
|
|||
return r.childDescriptor(childDesc, defaultPlatform)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no child with digest %s in index %s", h, r.Ref)
|
||||
return nil, fmt.Errorf("no child with digest %s in index %s", h, r.ref)
|
||||
}
|
||||
|
||||
// Convert one of this index's child's v1.Descriptor into a remote.Descriptor, with the given platform option.
|
||||
func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform) (*Descriptor, error) {
|
||||
ref := r.Ref.Context().Digest(child.Digest.String())
|
||||
ref := r.ref.Context().Digest(child.Digest.String())
|
||||
var (
|
||||
manifest []byte
|
||||
err error
|
||||
|
|
@ -245,7 +215,7 @@ func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform)
|
|||
}
|
||||
manifest = child.Data
|
||||
} else {
|
||||
manifest, _, err = r.fetchManifest(ref, []types.MediaType{child.MediaType})
|
||||
manifest, _, err = r.fetcher.fetchManifest(r.ctx, ref, []types.MediaType{child.MediaType})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -261,11 +231,9 @@ func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform)
|
|||
}
|
||||
|
||||
return &Descriptor{
|
||||
fetcher: fetcher{
|
||||
Ref: ref,
|
||||
Client: r.Client,
|
||||
context: r.context,
|
||||
},
|
||||
ref: ref,
|
||||
ctx: r.ctx,
|
||||
fetcher: r.fetcher,
|
||||
Manifest: manifest,
|
||||
Descriptor: child,
|
||||
platform: platform,
|
||||
|
|
|
|||
|
|
@ -15,32 +15,33 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/redact"
|
||||
"github.com/google/go-containerregistry/internal/verify"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// remoteImagelayer implements partial.CompressedLayer
|
||||
type remoteLayer struct {
|
||||
fetcher
|
||||
digest v1.Hash
|
||||
ctx context.Context
|
||||
fetcher fetcher
|
||||
digest v1.Hash
|
||||
}
|
||||
|
||||
// Compressed implements partial.CompressedLayer
|
||||
func (rl *remoteLayer) Compressed() (io.ReadCloser, error) {
|
||||
// We don't want to log binary layers -- this can break terminals.
|
||||
ctx := redact.NewContext(rl.context, "omitting binary blobs from logs")
|
||||
return rl.fetchBlob(ctx, verify.SizeUnknown, rl.digest)
|
||||
ctx := redact.NewContext(rl.ctx, "omitting binary blobs from logs")
|
||||
return rl.fetcher.fetchBlob(ctx, verify.SizeUnknown, rl.digest)
|
||||
}
|
||||
|
||||
// Compressed implements partial.CompressedLayer
|
||||
func (rl *remoteLayer) Size() (int64, error) {
|
||||
resp, err := rl.headBlob(rl.digest)
|
||||
resp, err := rl.fetcher.headBlob(rl.ctx, rl.digest)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
|
@ -60,7 +61,7 @@ func (rl *remoteLayer) MediaType() (types.MediaType, error) {
|
|||
|
||||
// See partial.Exists.
|
||||
func (rl *remoteLayer) Exists() (bool, error) {
|
||||
return rl.blobExists(rl.digest)
|
||||
return rl.fetcher.blobExists(rl.ctx, rl.digest)
|
||||
}
|
||||
|
||||
// Layer reads the given blob reference from a registry as a Layer. A blob
|
||||
|
|
@ -68,27 +69,9 @@ func (rl *remoteLayer) Exists() (bool, error) {
|
|||
// digest of the blob to be read and the repository portion is the repo where
|
||||
// that blob lives.
|
||||
func Layer(ref name.Digest, options ...Option) (v1.Layer, error) {
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := makeFetcher(ref, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := v1.NewHash(ref.Identifier())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l, err := partial.CompressedToLayer(&remoteLayer{
|
||||
fetcher: *f,
|
||||
digest: h,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MountableLayer{
|
||||
Layer: l,
|
||||
Reference: ref,
|
||||
}, nil
|
||||
return newPuller(o).Layer(o.context, ref)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,11 +26,6 @@ import (
|
|||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
type tags struct {
|
||||
Name string `json:"name"`
|
||||
Tags []string `json:"tags"`
|
||||
}
|
||||
|
||||
// ListWithContext calls List with the given context.
|
||||
//
|
||||
// Deprecated: Use List and WithContext. This will be removed in a future release.
|
||||
|
|
@ -41,73 +36,65 @@ func ListWithContext(ctx context.Context, repo name.Repository, options ...Optio
|
|||
// List calls /tags/list for the given repository, returning the list of tags
|
||||
// in the "tags" property.
|
||||
func List(repo name.Repository, options ...Option) ([]string, error) {
|
||||
o, err := makeOptions(repo, options...)
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scopes := []string{repo.Scope(transport.PullScope)}
|
||||
tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes)
|
||||
return newPuller(o).List(o.context, repo)
|
||||
}
|
||||
|
||||
type Tags struct {
|
||||
Name string `json:"name"`
|
||||
Tags []string `json:"tags"`
|
||||
Next string `json:"next,omitempty"`
|
||||
}
|
||||
|
||||
func (f *fetcher) listPage(ctx context.Context, repo name.Repository, next string, pageSize int) (*Tags, error) {
|
||||
if next == "" {
|
||||
uri := &url.URL{
|
||||
Scheme: repo.Scheme(),
|
||||
Host: repo.RegistryStr(),
|
||||
Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()),
|
||||
}
|
||||
if pageSize > 0 {
|
||||
uri.RawQuery = fmt.Sprintf("n=%d", pageSize)
|
||||
}
|
||||
next = uri.String()
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", next, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uri := &url.URL{
|
||||
Scheme: repo.Registry.Scheme(),
|
||||
Host: repo.Registry.RegistryStr(),
|
||||
Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()),
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if o.pageSize > 0 {
|
||||
uri.RawQuery = fmt.Sprintf("n=%d", o.pageSize)
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := http.Client{Transport: tr}
|
||||
tagList := []string{}
|
||||
parsed := tags{}
|
||||
|
||||
// get responses until there is no next page
|
||||
for {
|
||||
select {
|
||||
case <-o.context.Done():
|
||||
return nil, o.context.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(o.context, "GET", uri.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tagList = append(tagList, parsed.Tags...)
|
||||
|
||||
uri, err = getNextPageURL(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// no next page
|
||||
if uri == nil {
|
||||
break
|
||||
}
|
||||
parsed := Tags{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tagList, nil
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uri, err := getNextPageURL(resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uri != nil {
|
||||
parsed.Next = uri.String()
|
||||
}
|
||||
|
||||
return &parsed, nil
|
||||
}
|
||||
|
||||
// getNextPageURL checks if there is a Link header in a http.Response which
|
||||
|
|
@ -139,3 +126,27 @@ func getNextPageURL(resp *http.Response) (*url.URL, error) {
|
|||
linkURL = resp.Request.URL.ResolveReference(linkURL)
|
||||
return linkURL, nil
|
||||
}
|
||||
|
||||
type Lister struct {
|
||||
f *fetcher
|
||||
repo name.Repository
|
||||
pageSize int
|
||||
|
||||
page *Tags
|
||||
err error
|
||||
|
||||
needMore bool
|
||||
}
|
||||
|
||||
func (l *Lister) Next(ctx context.Context) (*Tags, error) {
|
||||
if l.needMore {
|
||||
l.page, l.err = l.f.listPage(ctx, l.repo, l.page.Next, l.pageSize)
|
||||
} else {
|
||||
l.needMore = true
|
||||
}
|
||||
return l.page, l.err
|
||||
}
|
||||
|
||||
func (l *Lister) HasNext() bool {
|
||||
return l.page != nil && (!l.needMore || l.page.Next != "")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,288 +15,32 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// MultiWrite writes the given Images or ImageIndexes to the given refs, as
|
||||
// efficiently as possible, by deduping shared layer blobs and uploading layers
|
||||
// in parallel, then uploading all manifests in parallel.
|
||||
//
|
||||
// Current limitations:
|
||||
// - All refs must share the same repository.
|
||||
// - Images cannot consist of stream.Layers.
|
||||
func MultiWrite(m map[name.Reference]Taggable, options ...Option) (rerr error) {
|
||||
// Determine the repository being pushed to; if asked to push to
|
||||
// multiple repositories, give up.
|
||||
var repo, zero name.Repository
|
||||
for ref := range m {
|
||||
if repo == zero {
|
||||
repo = ref.Context()
|
||||
} else if ref.Context() != repo {
|
||||
return fmt.Errorf("MultiWrite can only push to the same repository (saw %q and %q)", repo, ref.Context())
|
||||
}
|
||||
}
|
||||
|
||||
o, err := makeOptions(repo, options...)
|
||||
// efficiently as possible, by deduping shared layer blobs while uploading them
|
||||
// in parallel.
|
||||
func MultiWrite(todo map[name.Reference]Taggable, options ...Option) (rerr error) {
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if o.progress != nil {
|
||||
defer func() { o.progress.Close(rerr) }()
|
||||
}
|
||||
p := newPusher(o)
|
||||
|
||||
// Collect unique blobs (layers and config blobs).
|
||||
blobs := map[v1.Hash]v1.Layer{}
|
||||
newManifests := []map[name.Reference]Taggable{}
|
||||
// Separate originally requested images and indexes, so we can push images first.
|
||||
images, indexes := map[name.Reference]Taggable{}, map[name.Reference]Taggable{}
|
||||
for ref, i := range m {
|
||||
if img, ok := i.(v1.Image); ok {
|
||||
images[ref] = i
|
||||
if err := addImageBlobs(img, blobs, o.allowNondistributableArtifacts); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if idx, ok := i.(v1.ImageIndex); ok {
|
||||
indexes[ref] = i
|
||||
newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, 0, o.allowNondistributableArtifacts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("pushable resource was not Image or ImageIndex: %T", i)
|
||||
}
|
||||
g, ctx := errgroup.WithContext(o.context)
|
||||
g.SetLimit(o.jobs)
|
||||
|
||||
// Determine if any of the layers are Mountable, because if so we need
|
||||
// to request Pull scope too.
|
||||
ls := []v1.Layer{}
|
||||
for _, l := range blobs {
|
||||
ls = append(ls, l)
|
||||
}
|
||||
scopes := scopesForUploadingImage(repo, ls)
|
||||
tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := writer{
|
||||
repo: repo,
|
||||
client: &http.Client{Transport: tr},
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
||||
// Collect the total size of blobs and manifests we're about to write.
|
||||
if o.updates != nil {
|
||||
w.progress = &progress{updates: o.updates}
|
||||
w.progress.lastUpdate = &v1.Update{}
|
||||
defer close(o.updates)
|
||||
defer func() { _ = w.progress.err(rerr) }()
|
||||
for _, b := range blobs {
|
||||
size, err := b.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.progress.total(size)
|
||||
}
|
||||
countManifest := func(t Taggable) error {
|
||||
b, err := t.RawManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.progress.total(int64(len(b)))
|
||||
return nil
|
||||
}
|
||||
for _, i := range images {
|
||||
if err := countManifest(i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, nm := range newManifests {
|
||||
for _, i := range nm {
|
||||
if err := countManifest(i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, i := range indexes {
|
||||
if err := countManifest(i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Upload individual blobs and collect any errors.
|
||||
blobChan := make(chan v1.Layer, 2*o.jobs)
|
||||
ctx := o.context
|
||||
g, gctx := errgroup.WithContext(o.context)
|
||||
for i := 0; i < o.jobs; i++ {
|
||||
// Start N workers consuming blobs to upload.
|
||||
for ref, t := range todo {
|
||||
ref, t := ref, t
|
||||
g.Go(func() error {
|
||||
for b := range blobChan {
|
||||
if err := w.uploadOne(gctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return p.Push(ctx, ref, t)
|
||||
})
|
||||
}
|
||||
g.Go(func() error {
|
||||
defer close(blobChan)
|
||||
for _, b := range blobs {
|
||||
select {
|
||||
case blobChan <- b:
|
||||
case <-gctx.Done():
|
||||
return gctx.Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
commitMany := func(ctx context.Context, m map[name.Reference]Taggable) error {
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
// With all of the constituent elements uploaded, upload the manifests
|
||||
// to commit the images and indexes, and collect any errors.
|
||||
type task struct {
|
||||
i Taggable
|
||||
ref name.Reference
|
||||
}
|
||||
taskChan := make(chan task, 2*o.jobs)
|
||||
for i := 0; i < o.jobs; i++ {
|
||||
// Start N workers consuming tasks to upload manifests.
|
||||
g.Go(func() error {
|
||||
for t := range taskChan {
|
||||
if err := w.commitManifest(ctx, t.i, t.ref); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
go func() {
|
||||
for ref, i := range m {
|
||||
taskChan <- task{i, ref}
|
||||
}
|
||||
close(taskChan)
|
||||
}()
|
||||
return g.Wait()
|
||||
}
|
||||
// Push originally requested image manifests. These have no
|
||||
// dependencies.
|
||||
if err := commitMany(ctx, images); err != nil {
|
||||
return err
|
||||
}
|
||||
// Push new manifests from lowest levels up.
|
||||
for i := len(newManifests) - 1; i >= 0; i-- {
|
||||
if err := commitMany(ctx, newManifests[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Push originally requested index manifests, which might depend on
|
||||
// newly discovered manifests.
|
||||
|
||||
return commitMany(ctx, indexes)
|
||||
}
|
||||
|
||||
// addIndexBlobs adds blobs to the set of blobs we intend to upload, and
|
||||
// returns the latest copy of the ordered collection of manifests to upload.
|
||||
func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repository, newManifests []map[name.Reference]Taggable, lvl int, allowNondistributableArtifacts bool) ([]map[name.Reference]Taggable, error) {
|
||||
if lvl > len(newManifests)-1 {
|
||||
newManifests = append(newManifests, map[name.Reference]Taggable{})
|
||||
}
|
||||
|
||||
im, err := idx.IndexManifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, desc := range im.Manifests {
|
||||
switch desc.MediaType {
|
||||
case types.OCIImageIndex, types.DockerManifestList:
|
||||
idx, err := idx.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, lvl+1, allowNondistributableArtifacts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Also track the sub-index manifest to upload later by digest.
|
||||
newManifests[lvl][repo.Digest(desc.Digest.String())] = idx
|
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2:
|
||||
img, err := idx.Image(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addImageBlobs(img, blobs, allowNondistributableArtifacts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Also track the sub-image manifest to upload later by digest.
|
||||
newManifests[lvl][repo.Digest(desc.Digest.String())] = img
|
||||
default:
|
||||
// Workaround for #819.
|
||||
if wl, ok := idx.(withLayer); ok {
|
||||
layer, err := wl.Layer(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := addLayerBlob(layer, blobs, allowNondistributableArtifacts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("unknown media type: %v", desc.MediaType)
|
||||
}
|
||||
}
|
||||
}
|
||||
return newManifests, nil
|
||||
}
|
||||
|
||||
func addLayerBlob(l v1.Layer, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error {
|
||||
// Ignore foreign layers.
|
||||
mt, err := l.MediaType()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mt.IsDistributable() || allowNondistributableArtifacts {
|
||||
d, err := l.Digest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blobs[d] = l
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func addImageBlobs(img v1.Image, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error {
|
||||
ls, err := img.Layers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Collect all layers.
|
||||
for _, l := range ls {
|
||||
if err := addLayerBlob(l, blobs, allowNondistributableArtifacts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Collect config blob.
|
||||
cl, err := partial.ConfigLayer(img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return addLayerBlob(cl, blobs, allowNondistributableArtifacts)
|
||||
return g.Wait()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,16 +37,23 @@ type options struct {
|
|||
auth authn.Authenticator
|
||||
keychain authn.Keychain
|
||||
transport http.RoundTripper
|
||||
platform v1.Platform
|
||||
context context.Context
|
||||
jobs int
|
||||
userAgent string
|
||||
allowNondistributableArtifacts bool
|
||||
updates chan<- v1.Update
|
||||
pageSize int
|
||||
progress *progress
|
||||
retryBackoff Backoff
|
||||
retryPredicate retry.Predicate
|
||||
filter map[string]string
|
||||
retryStatusCodes []int
|
||||
|
||||
// Only these options can overwrite Reuse()d options.
|
||||
platform v1.Platform
|
||||
pageSize int
|
||||
filter map[string]string
|
||||
|
||||
// Set by Reuse, we currently store one or the other.
|
||||
puller *Puller
|
||||
pusher *Pusher
|
||||
}
|
||||
|
||||
var defaultPlatform = v1.Platform{
|
||||
|
|
@ -60,7 +67,7 @@ type Backoff = retry.Backoff
|
|||
var defaultRetryPredicate retry.Predicate = func(err error) bool {
|
||||
// Various failure modes here, as we're often reading from and writing to
|
||||
// the network.
|
||||
if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF) || errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) {
|
||||
if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF) || errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) || errors.Is(err, net.ErrClosed) {
|
||||
logs.Warn.Printf("retrying %v", err)
|
||||
return true
|
||||
}
|
||||
|
|
@ -83,12 +90,13 @@ var fastBackoff = Backoff{
|
|||
Steps: 3,
|
||||
}
|
||||
|
||||
var retryableStatusCodes = []int{
|
||||
var defaultRetryStatusCodes = []int{
|
||||
http.StatusRequestTimeout,
|
||||
http.StatusInternalServerError,
|
||||
http.StatusBadGateway,
|
||||
http.StatusServiceUnavailable,
|
||||
http.StatusGatewayTimeout,
|
||||
499,
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
@ -112,17 +120,20 @@ var DefaultTransport http.RoundTripper = &http.Transport{
|
|||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
// We usually are dealing with 2 hosts (at most), split MaxIdleConns between them.
|
||||
MaxIdleConnsPerHost: 50,
|
||||
}
|
||||
|
||||
func makeOptions(target authn.Resource, opts ...Option) (*options, error) {
|
||||
func makeOptions(opts ...Option) (*options, error) {
|
||||
o := &options{
|
||||
transport: DefaultTransport,
|
||||
platform: defaultPlatform,
|
||||
context: context.Background(),
|
||||
jobs: defaultJobs,
|
||||
pageSize: defaultPageSize,
|
||||
retryPredicate: defaultRetryPredicate,
|
||||
retryBackoff: defaultRetryBackoff,
|
||||
transport: DefaultTransport,
|
||||
platform: defaultPlatform,
|
||||
context: context.Background(),
|
||||
jobs: defaultJobs,
|
||||
pageSize: defaultPageSize,
|
||||
retryPredicate: defaultRetryPredicate,
|
||||
retryBackoff: defaultRetryBackoff,
|
||||
retryStatusCodes: defaultRetryStatusCodes,
|
||||
}
|
||||
|
||||
for _, option := range opts {
|
||||
|
|
@ -136,12 +147,6 @@ func makeOptions(target authn.Resource, opts ...Option) (*options, error) {
|
|||
// It is a better experience to explicitly tell a caller their auth is misconfigured
|
||||
// than potentially fail silently when the correct auth is overridden by option misuse.
|
||||
return nil, errors.New("provide an option for either authn.Authenticator or authn.Keychain, not both")
|
||||
case o.keychain != nil:
|
||||
auth, err := o.keychain.Resolve(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.auth = auth
|
||||
case o.auth == nil:
|
||||
o.auth = authn.Anonymous
|
||||
}
|
||||
|
|
@ -157,7 +162,7 @@ func makeOptions(target authn.Resource, opts ...Option) (*options, error) {
|
|||
}
|
||||
|
||||
// Wrap the transport in something that can retry network flakes.
|
||||
o.transport = transport.NewRetry(o.transport, transport.WithRetryPredicate(defaultRetryPredicate), transport.WithRetryStatusCodes(retryableStatusCodes...))
|
||||
o.transport = transport.NewRetry(o.transport, transport.WithRetryPredicate(defaultRetryPredicate), transport.WithRetryStatusCodes(o.retryStatusCodes...))
|
||||
|
||||
// Wrap this last to prevent transport.New from double-wrapping.
|
||||
if o.userAgent != "" {
|
||||
|
|
@ -273,7 +278,8 @@ func WithNondistributable(o *options) error {
|
|||
// should provide a buffered channel to avoid potential deadlocks.
|
||||
func WithProgress(updates chan<- v1.Update) Option {
|
||||
return func(o *options) error {
|
||||
o.updates = updates
|
||||
o.progress = &progress{updates: updates}
|
||||
o.progress.lastUpdate = &v1.Update{}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
@ -305,6 +311,14 @@ func WithRetryPredicate(predicate retry.Predicate) Option {
|
|||
}
|
||||
}
|
||||
|
||||
// WithRetryStatusCodes sets which http response codes will be retried.
|
||||
func WithRetryStatusCodes(codes ...int) Option {
|
||||
return func(o *options) error {
|
||||
o.retryStatusCodes = codes
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithFilter sets the filter querystring for HTTP operations.
|
||||
func WithFilter(key string, value string) Option {
|
||||
return func(o *options) error {
|
||||
|
|
@ -315,3 +329,20 @@ func WithFilter(key string, value string) Option {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Reuse takes a Puller or Pusher and reuses it for remote interactions
|
||||
// rather than starting from a clean slate. For example, it will reuse token exchanges
|
||||
// when possible and avoid sending redundant HEAD requests.
|
||||
//
|
||||
// Reuse will take precedence over other options passed to most remote functions because
|
||||
// most options deal with setting up auth and transports, which Reuse intetionally skips.
|
||||
func Reuse[I *Puller | *Pusher](i I) Option {
|
||||
return func(o *options) error {
|
||||
if puller, ok := any(i).(*Puller); ok {
|
||||
o.puller = puller
|
||||
} else if pusher, ok := any(i).(*Pusher); ok {
|
||||
o.pusher = pusher
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,6 +29,8 @@ type progress struct {
|
|||
}
|
||||
|
||||
func (p *progress) total(delta int64) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
atomic.AddInt64(&p.lastUpdate.Total, delta)
|
||||
}
|
||||
|
||||
|
|
@ -48,6 +50,11 @@ func (p *progress) err(err error) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (p *progress) Close(err error) {
|
||||
_ = p.err(err)
|
||||
close(p.updates)
|
||||
}
|
||||
|
||||
type progressReader struct {
|
||||
rc io.ReadCloser
|
||||
|
||||
|
|
|
|||
238
vendor/github.com/google/go-containerregistry/pkg/v1/remote/puller.go
generated
vendored
Normal file
238
vendor/github.com/google/go-containerregistry/pkg/v1/remote/puller.go
generated
vendored
Normal file
|
|
@ -0,0 +1,238 @@
|
|||
// Copyright 2023 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
type Puller struct {
|
||||
o *options
|
||||
|
||||
// map[resource]*reader
|
||||
readers sync.Map
|
||||
}
|
||||
|
||||
func NewPuller(options ...Option) (*Puller, error) {
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newPuller(o), nil
|
||||
}
|
||||
|
||||
func newPuller(o *options) *Puller {
|
||||
if o.puller != nil {
|
||||
return o.puller
|
||||
}
|
||||
return &Puller{
|
||||
o: o,
|
||||
}
|
||||
}
|
||||
|
||||
type reader struct {
|
||||
// in
|
||||
target resource
|
||||
o *options
|
||||
|
||||
// f()
|
||||
once sync.Once
|
||||
|
||||
// out
|
||||
f *fetcher
|
||||
err error
|
||||
}
|
||||
|
||||
// this will run once per reader instance
|
||||
func (r *reader) init(ctx context.Context) error {
|
||||
r.once.Do(func() {
|
||||
r.f, r.err = makeFetcher(ctx, r.target, r.o)
|
||||
})
|
||||
return r.err
|
||||
}
|
||||
|
||||
func (p *Puller) fetcher(ctx context.Context, target resource) (*fetcher, error) {
|
||||
// If we are Reuse()ing a Pusher, we want to use that for token handshakes and scopes,
|
||||
// but we want to do read requests via a fetcher{}.
|
||||
//
|
||||
// TODO(jonjohnsonjr): Unify fetcher, writer, and repoWriter.
|
||||
if p.o.pusher != nil {
|
||||
if repo, ok := target.(name.Repository); ok {
|
||||
w, err := p.o.pusher.writer(ctx, repo, p.o)
|
||||
if err == nil {
|
||||
return fetcherFromWriter(w.w), nil
|
||||
}
|
||||
logs.Debug.Printf("reusing Pusher failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Normal path for NewPuller.
|
||||
v, _ := p.readers.LoadOrStore(target, &reader{
|
||||
target: target,
|
||||
o: p.o,
|
||||
})
|
||||
rr := v.(*reader)
|
||||
return rr.f, rr.init(ctx)
|
||||
}
|
||||
|
||||
// Head is like remote.Head, but avoids re-authenticating when possible.
|
||||
func (p *Puller) Head(ctx context.Context, ref name.Reference) (*v1.Descriptor, error) {
|
||||
f, err := p.fetcher(ctx, ref.Context())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f.headManifest(ctx, ref, allManifestMediaTypes)
|
||||
}
|
||||
|
||||
// Get is like remote.Get, but avoids re-authenticating when possible.
|
||||
func (p *Puller) Get(ctx context.Context, ref name.Reference) (*Descriptor, error) {
|
||||
return p.get(ctx, ref, allManifestMediaTypes, p.o.platform)
|
||||
}
|
||||
|
||||
func (p *Puller) get(ctx context.Context, ref name.Reference, acceptable []types.MediaType, platform v1.Platform) (*Descriptor, error) {
|
||||
f, err := p.fetcher(ctx, ref.Context())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.get(ctx, ref, acceptable, platform)
|
||||
}
|
||||
|
||||
// Layer is like remote.Layer, but avoids re-authenticating when possible.
|
||||
func (p *Puller) Layer(ctx context.Context, ref name.Digest) (v1.Layer, error) {
|
||||
f, err := p.fetcher(ctx, ref.Context())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h, err := v1.NewHash(ref.Identifier())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l, err := partial.CompressedToLayer(&remoteLayer{
|
||||
fetcher: *f,
|
||||
ctx: ctx,
|
||||
digest: h,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MountableLayer{
|
||||
Layer: l,
|
||||
Reference: ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// List lists tags in a repo and handles pagination, returning the full list of tags.
|
||||
func (p *Puller) List(ctx context.Context, repo name.Repository) ([]string, error) {
|
||||
lister, err := p.Lister(ctx, repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tagList := []string{}
|
||||
for lister.HasNext() {
|
||||
tags, err := lister.Next(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tagList = append(tagList, tags.Tags...)
|
||||
}
|
||||
|
||||
return tagList, nil
|
||||
}
|
||||
|
||||
// Lister lists tags in a repo and returns a Lister for paginating through the results.
|
||||
func (p *Puller) Lister(ctx context.Context, repo name.Repository) (*Lister, error) {
|
||||
return p.lister(ctx, repo, p.o.pageSize)
|
||||
}
|
||||
|
||||
func (p *Puller) lister(ctx context.Context, repo name.Repository, pageSize int) (*Lister, error) {
|
||||
f, err := p.fetcher(ctx, repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
page, err := f.listPage(ctx, repo, "", pageSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Lister{
|
||||
f: f,
|
||||
repo: repo,
|
||||
pageSize: pageSize,
|
||||
page: page,
|
||||
err: err,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Catalog lists repos in a registry and handles pagination, returning the full list of repos.
|
||||
func (p *Puller) Catalog(ctx context.Context, reg name.Registry) ([]string, error) {
|
||||
return p.catalog(ctx, reg, p.o.pageSize)
|
||||
}
|
||||
|
||||
func (p *Puller) catalog(ctx context.Context, reg name.Registry, pageSize int) ([]string, error) {
|
||||
catalogger, err := p.catalogger(ctx, reg, pageSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
repoList := []string{}
|
||||
for catalogger.HasNext() {
|
||||
repos, err := catalogger.Next(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
repoList = append(repoList, repos.Repos...)
|
||||
}
|
||||
return repoList, nil
|
||||
}
|
||||
|
||||
// Catalogger lists repos in a registry and returns a Catalogger for paginating through the results.
|
||||
func (p *Puller) Catalogger(ctx context.Context, reg name.Registry) (*Catalogger, error) {
|
||||
return p.catalogger(ctx, reg, p.o.pageSize)
|
||||
}
|
||||
|
||||
func (p *Puller) catalogger(ctx context.Context, reg name.Registry, pageSize int) (*Catalogger, error) {
|
||||
f, err := p.fetcher(ctx, reg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
page, err := f.catalogPage(ctx, reg, "", pageSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Catalogger{
|
||||
f: f,
|
||||
reg: reg,
|
||||
pageSize: pageSize,
|
||||
page: page,
|
||||
err: err,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Puller) referrers(ctx context.Context, d name.Digest, filter map[string]string) (v1.ImageIndex, error) {
|
||||
f, err := p.fetcher(ctx, d.Context())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.fetchReferrers(ctx, filter, d)
|
||||
}
|
||||
548
vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go
generated
vendored
Normal file
548
vendor/github.com/google/go-containerregistry/pkg/v1/remote/pusher.go
generated
vendored
Normal file
|
|
@ -0,0 +1,548 @@
|
|||
// Copyright 2023 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/stream"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type manifest interface {
|
||||
Taggable
|
||||
partial.Describable
|
||||
}
|
||||
|
||||
// key is either v1.Hash or v1.Layer (for stream.Layer)
|
||||
type workers struct {
|
||||
// map[v1.Hash|v1.Layer]*sync.Once
|
||||
onces sync.Map
|
||||
|
||||
// map[v1.Hash|v1.Layer]error
|
||||
errors sync.Map
|
||||
}
|
||||
|
||||
func nop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *workers) err(digest v1.Hash) error {
|
||||
v, ok := w.errors.Load(digest)
|
||||
if !ok || v == nil {
|
||||
return nil
|
||||
}
|
||||
return v.(error)
|
||||
}
|
||||
|
||||
func (w *workers) Do(digest v1.Hash, f func() error) error {
|
||||
// We don't care if it was loaded or not because the sync.Once will do it for us.
|
||||
once, _ := w.onces.LoadOrStore(digest, &sync.Once{})
|
||||
|
||||
once.(*sync.Once).Do(func() {
|
||||
w.errors.Store(digest, f())
|
||||
})
|
||||
|
||||
err := w.err(digest)
|
||||
if err != nil {
|
||||
// Allow this to be retried by another caller.
|
||||
w.onces.Delete(digest)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *workers) Stream(layer v1.Layer, f func() error) error {
|
||||
// We don't care if it was loaded or not because the sync.Once will do it for us.
|
||||
once, _ := w.onces.LoadOrStore(layer, &sync.Once{})
|
||||
|
||||
once.(*sync.Once).Do(func() {
|
||||
w.errors.Store(layer, f())
|
||||
})
|
||||
|
||||
v, ok := w.errors.Load(layer)
|
||||
if !ok || v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return v.(error)
|
||||
}
|
||||
|
||||
type Pusher struct {
|
||||
o *options
|
||||
|
||||
// map[name.Repository]*repoWriter
|
||||
writers sync.Map
|
||||
}
|
||||
|
||||
func NewPusher(options ...Option) (*Pusher, error) {
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newPusher(o), nil
|
||||
}
|
||||
|
||||
func newPusher(o *options) *Pusher {
|
||||
if o.pusher != nil {
|
||||
return o.pusher
|
||||
}
|
||||
return &Pusher{
|
||||
o: o,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pusher) writer(ctx context.Context, repo name.Repository, o *options) (*repoWriter, error) {
|
||||
v, _ := p.writers.LoadOrStore(repo, &repoWriter{
|
||||
repo: repo,
|
||||
o: o,
|
||||
})
|
||||
rw := v.(*repoWriter)
|
||||
return rw, rw.init(ctx)
|
||||
}
|
||||
|
||||
func (p *Pusher) Push(ctx context.Context, ref name.Reference, t Taggable) error {
|
||||
w, err := p.writer(ctx, ref.Context(), p.o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return w.writeManifest(ctx, ref, t)
|
||||
}
|
||||
|
||||
func (p *Pusher) Upload(ctx context.Context, repo name.Repository, l v1.Layer) error {
|
||||
w, err := p.writer(ctx, repo, p.o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return w.writeLayer(ctx, l)
|
||||
}
|
||||
|
||||
func (p *Pusher) Delete(ctx context.Context, ref name.Reference) error {
|
||||
w, err := p.writer(ctx, ref.Context(), p.o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u := url.URL{
|
||||
Scheme: ref.Context().Registry.Scheme(),
|
||||
Host: ref.Context().RegistryStr(),
|
||||
Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()),
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(http.MethodDelete, u.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := w.w.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return transport.CheckError(resp, http.StatusOK, http.StatusAccepted)
|
||||
|
||||
// TODO(jason): If the manifest had a `subject`, and if the registry
|
||||
// doesn't support Referrers, update the index pointed to by the
|
||||
// subject's fallback tag to remove the descriptor for this manifest.
|
||||
}
|
||||
|
||||
type repoWriter struct {
|
||||
repo name.Repository
|
||||
o *options
|
||||
once sync.Once
|
||||
|
||||
w *writer
|
||||
err error
|
||||
|
||||
work *workers
|
||||
}
|
||||
|
||||
// this will run once per repoWriter instance
|
||||
func (rw *repoWriter) init(ctx context.Context) error {
|
||||
rw.once.Do(func() {
|
||||
rw.work = &workers{}
|
||||
rw.w, rw.err = makeWriter(ctx, rw.repo, nil, rw.o)
|
||||
})
|
||||
return rw.err
|
||||
}
|
||||
|
||||
func (rw *repoWriter) writeDeps(ctx context.Context, m manifest) error {
|
||||
if img, ok := m.(v1.Image); ok {
|
||||
return rw.writeLayers(ctx, img)
|
||||
}
|
||||
|
||||
if idx, ok := m.(v1.ImageIndex); ok {
|
||||
return rw.writeChildren(ctx, idx)
|
||||
}
|
||||
|
||||
// This has no deps, not an error (e.g. something you want to just PUT).
|
||||
return nil
|
||||
}
|
||||
|
||||
type describable struct {
|
||||
desc v1.Descriptor
|
||||
}
|
||||
|
||||
func (d describable) Digest() (v1.Hash, error) {
|
||||
return d.desc.Digest, nil
|
||||
}
|
||||
|
||||
func (d describable) Size() (int64, error) {
|
||||
return d.desc.Size, nil
|
||||
}
|
||||
|
||||
func (d describable) MediaType() (types.MediaType, error) {
|
||||
return d.desc.MediaType, nil
|
||||
}
|
||||
|
||||
type tagManifest struct {
|
||||
Taggable
|
||||
partial.Describable
|
||||
}
|
||||
|
||||
func taggableToManifest(t Taggable) (manifest, error) {
|
||||
if m, ok := t.(manifest); ok {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
if d, ok := t.(*Descriptor); ok {
|
||||
if d.MediaType.IsIndex() {
|
||||
return d.ImageIndex()
|
||||
}
|
||||
|
||||
if d.MediaType.IsImage() {
|
||||
return d.Image()
|
||||
}
|
||||
|
||||
if d.MediaType.IsSchema1() {
|
||||
return d.Schema1()
|
||||
}
|
||||
|
||||
return tagManifest{t, describable{d.toDesc()}}, nil
|
||||
}
|
||||
|
||||
desc := v1.Descriptor{
|
||||
// A reasonable default if Taggable doesn't implement MediaType.
|
||||
MediaType: types.DockerManifestSchema2,
|
||||
}
|
||||
|
||||
b, err := t.RawManifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if wmt, ok := t.(withMediaType); ok {
|
||||
desc.MediaType, err = wmt.MediaType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
desc.Digest, desc.Size, err = v1.SHA256(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tagManifest{t, describable{desc}}, nil
|
||||
}
|
||||
|
||||
func (rw *repoWriter) writeManifest(ctx context.Context, ref name.Reference, t Taggable) error {
|
||||
m, err := taggableToManifest(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
needDeps := true
|
||||
|
||||
digest, err := m.Digest()
|
||||
if errors.Is(err, stream.ErrNotComputed) {
|
||||
if err := rw.writeDeps(ctx, m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
needDeps = false
|
||||
|
||||
digest, err = m.Digest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This may be a lazy child where we have no ref until digest is computed.
|
||||
if ref == nil {
|
||||
ref = rw.repo.Digest(digest.String())
|
||||
}
|
||||
|
||||
// For tags, we want to do this check outside of our Work.Do closure because
|
||||
// we don't want to dedupe based on the manifest digest.
|
||||
_, byTag := ref.(name.Tag)
|
||||
if byTag {
|
||||
if exists, err := rw.manifestExists(ctx, ref, t); err != nil {
|
||||
return err
|
||||
} else if exists {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// The following work.Do will get deduped by digest, so it won't happen unless
|
||||
// this tag happens to be the first commitManifest to run for that digest.
|
||||
needPut := byTag
|
||||
|
||||
if err := rw.work.Do(digest, func() error {
|
||||
if !byTag {
|
||||
if exists, err := rw.manifestExists(ctx, ref, t); err != nil {
|
||||
return err
|
||||
} else if exists {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if needDeps {
|
||||
if err := rw.writeDeps(ctx, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
needPut = false
|
||||
return rw.commitManifest(ctx, ref, m)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !needPut {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only runs for tags that got deduped by digest.
|
||||
return rw.commitManifest(ctx, ref, m)
|
||||
}
|
||||
|
||||
func (rw *repoWriter) writeChildren(ctx context.Context, idx v1.ImageIndex) error {
|
||||
children, err := partial.Manifests(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(rw.o.jobs)
|
||||
|
||||
for _, child := range children {
|
||||
child := child
|
||||
if err := rw.writeChild(ctx, child, g); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
func (rw *repoWriter) writeChild(ctx context.Context, child partial.Describable, g *errgroup.Group) error {
|
||||
switch child := child.(type) {
|
||||
case v1.ImageIndex:
|
||||
// For recursive index, we want to do a depth-first launching of goroutines
|
||||
// to avoid deadlocking.
|
||||
//
|
||||
// Note that this is rare, so the impact of this should be really small.
|
||||
return rw.writeManifest(ctx, nil, child)
|
||||
case v1.Image:
|
||||
g.Go(func() error {
|
||||
return rw.writeManifest(ctx, nil, child)
|
||||
})
|
||||
case v1.Layer:
|
||||
g.Go(func() error {
|
||||
return rw.writeLayer(ctx, child)
|
||||
})
|
||||
default:
|
||||
// This can't happen.
|
||||
return fmt.Errorf("encountered unknown child: %T", child)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rw *repoWriter) manifestExists(ctx context.Context, ref name.Reference, t Taggable) (bool, error) {
|
||||
f := &fetcher{
|
||||
target: ref.Context(),
|
||||
client: rw.w.client,
|
||||
}
|
||||
|
||||
m, err := taggableToManifest(t)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
digest, err := m.Digest()
|
||||
if err != nil {
|
||||
// Possibly due to streaming layers.
|
||||
return false, nil
|
||||
}
|
||||
got, err := f.headManifest(ctx, ref, allManifestMediaTypes)
|
||||
if err != nil {
|
||||
var terr *transport.Error
|
||||
if errors.As(err, &terr) {
|
||||
if terr.StatusCode == http.StatusNotFound {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
if digest != got.Digest {
|
||||
// Mark that we saw this digest in the registry so we don't have to check it again.
|
||||
rw.work.Do(got.Digest, nop)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if tag, ok := ref.(name.Tag); ok {
|
||||
logs.Progress.Printf("existing manifest: %s@%s", tag.Identifier(), got.Digest)
|
||||
} else {
|
||||
logs.Progress.Print("existing manifest: ", got.Digest)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (rw *repoWriter) commitManifest(ctx context.Context, ref name.Reference, m manifest) error {
|
||||
if rw.o.progress != nil {
|
||||
size, err := m.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rw.o.progress.total(size)
|
||||
}
|
||||
|
||||
return rw.w.commitManifest(ctx, m, ref)
|
||||
}
|
||||
|
||||
func (rw *repoWriter) writeLayers(pctx context.Context, img v1.Image) error {
|
||||
ls, err := img.Layers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(pctx)
|
||||
g.SetLimit(rw.o.jobs)
|
||||
|
||||
for _, l := range ls {
|
||||
l := l
|
||||
|
||||
g.Go(func() error {
|
||||
return rw.writeLayer(ctx, l)
|
||||
})
|
||||
}
|
||||
|
||||
mt, err := img.MediaType()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mt.IsSchema1() {
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
cl, err := partial.ConfigLayer(img)
|
||||
if errors.Is(err, stream.ErrNotComputed) {
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cl, err := partial.ConfigLayer(img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return rw.writeLayer(pctx, cl)
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
g.Go(func() error {
|
||||
return rw.writeLayer(ctx, cl)
|
||||
})
|
||||
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
func (rw *repoWriter) writeLayer(ctx context.Context, l v1.Layer) error {
|
||||
// Skip any non-distributable things.
|
||||
mt, err := l.MediaType()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !mt.IsDistributable() && !rw.o.allowNondistributableArtifacts {
|
||||
return nil
|
||||
}
|
||||
|
||||
digest, err := l.Digest()
|
||||
if err != nil {
|
||||
if errors.Is(err, stream.ErrNotComputed) {
|
||||
return rw.lazyWriteLayer(ctx, l)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return rw.work.Do(digest, func() error {
|
||||
if rw.o.progress != nil {
|
||||
size, err := l.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rw.o.progress.total(size)
|
||||
}
|
||||
return rw.w.uploadOne(ctx, l)
|
||||
})
|
||||
}
|
||||
|
||||
func (rw *repoWriter) lazyWriteLayer(ctx context.Context, l v1.Layer) error {
|
||||
return rw.work.Stream(l, func() error {
|
||||
if err := rw.w.uploadOne(ctx, l); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Mark this upload completed.
|
||||
digest, err := l.Digest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rw.work.Do(digest, nop)
|
||||
|
||||
if rw.o.progress != nil {
|
||||
size, err := l.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rw.o.progress.total(size)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
|
@ -15,21 +15,103 @@
|
|||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// Referrers returns a list of descriptors that refer to the given manifest digest.
|
||||
//
|
||||
// The subject manifest doesn't have to exist in the registry for there to be descriptors that refer to it.
|
||||
func Referrers(d name.Digest, options ...Option) (*v1.IndexManifest, error) {
|
||||
o, err := makeOptions(d.Context(), options...)
|
||||
func Referrers(d name.Digest, options ...Option) (v1.ImageIndex, error) {
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, err := makeFetcher(d, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.fetchReferrers(o.context, o.filter, d)
|
||||
return newPuller(o).referrers(o.context, d, o.filter)
|
||||
}
|
||||
|
||||
// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#referrers-tag-schema
|
||||
func fallbackTag(d name.Digest) name.Tag {
|
||||
return d.Context().Tag(strings.Replace(d.DigestStr(), ":", "-", 1))
|
||||
}
|
||||
|
||||
func (f *fetcher) fetchReferrers(ctx context.Context, filter map[string]string, d name.Digest) (v1.ImageIndex, error) {
|
||||
// Check the Referrers API endpoint first.
|
||||
u := f.url("referrers", d.DigestStr())
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Accept", string(types.OCIImageIndex))
|
||||
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound, http.StatusBadRequest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var b []byte
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
b, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// The registry doesn't support the Referrers API endpoint, so we'll use the fallback tag scheme.
|
||||
b, _, err = f.fetchManifest(ctx, fallbackTag(d), []types.MediaType{types.OCIImageIndex})
|
||||
var terr *transport.Error
|
||||
if errors.As(err, &terr) && terr.StatusCode == http.StatusNotFound {
|
||||
// Not found just means there are no attachments yet. Start with an empty manifest.
|
||||
return empty.Index, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
h, sz, err := v1.SHA256(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idx := &remoteIndex{
|
||||
fetcher: *f,
|
||||
ctx: ctx,
|
||||
manifest: b,
|
||||
mediaType: types.OCIImageIndex,
|
||||
descriptor: &v1.Descriptor{
|
||||
Digest: h,
|
||||
MediaType: types.OCIImageIndex,
|
||||
Size: sz,
|
||||
},
|
||||
}
|
||||
return filterReferrersResponse(filter, idx), nil
|
||||
}
|
||||
|
||||
// If filter applied, filter out by artifactType.
|
||||
// See https://github.com/opencontainers/distribution-spec/blob/main/spec.md#listing-referrers
|
||||
func filterReferrersResponse(filter map[string]string, in v1.ImageIndex) v1.ImageIndex {
|
||||
if filter == nil {
|
||||
return in
|
||||
}
|
||||
v, ok := filter["artifactType"]
|
||||
if !ok {
|
||||
return in
|
||||
}
|
||||
return mutate.RemoveManifests(in, func(desc v1.Descriptor) bool {
|
||||
return desc.ArtifactType != v
|
||||
})
|
||||
}
|
||||
|
|
|
|||
118
vendor/github.com/google/go-containerregistry/pkg/v1/remote/schema1.go
generated
vendored
Normal file
118
vendor/github.com/google/go-containerregistry/pkg/v1/remote/schema1.go
generated
vendored
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
// Copyright 2023 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
type schema1 struct {
|
||||
ref name.Reference
|
||||
ctx context.Context
|
||||
fetcher fetcher
|
||||
manifest []byte
|
||||
mediaType types.MediaType
|
||||
descriptor *v1.Descriptor
|
||||
}
|
||||
|
||||
func (s *schema1) Layers() ([]v1.Layer, error) {
|
||||
m := schema1Manifest{}
|
||||
if err := json.NewDecoder(bytes.NewReader(s.manifest)).Decode(&m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layers := []v1.Layer{}
|
||||
for i := len(m.FSLayers) - 1; i >= 0; i-- {
|
||||
fsl := m.FSLayers[i]
|
||||
|
||||
h, err := v1.NewHash(fsl.BlobSum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l, err := s.LayerByDigest(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layers = append(layers, l)
|
||||
}
|
||||
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func (s *schema1) MediaType() (types.MediaType, error) {
|
||||
return s.mediaType, nil
|
||||
}
|
||||
|
||||
func (s *schema1) Size() (int64, error) {
|
||||
return s.descriptor.Size, nil
|
||||
}
|
||||
|
||||
func (s *schema1) ConfigName() (v1.Hash, error) {
|
||||
return partial.ConfigName(s)
|
||||
}
|
||||
|
||||
func (s *schema1) ConfigFile() (*v1.ConfigFile, error) {
|
||||
return nil, newErrSchema1(s.mediaType)
|
||||
}
|
||||
|
||||
func (s *schema1) RawConfigFile() ([]byte, error) {
|
||||
return []byte("{}"), nil
|
||||
}
|
||||
|
||||
func (s *schema1) Digest() (v1.Hash, error) {
|
||||
return s.descriptor.Digest, nil
|
||||
}
|
||||
|
||||
func (s *schema1) Manifest() (*v1.Manifest, error) {
|
||||
return nil, newErrSchema1(s.mediaType)
|
||||
}
|
||||
|
||||
func (s *schema1) RawManifest() ([]byte, error) {
|
||||
return s.manifest, nil
|
||||
}
|
||||
|
||||
func (s *schema1) LayerByDigest(h v1.Hash) (v1.Layer, error) {
|
||||
l, err := partial.CompressedToLayer(&remoteLayer{
|
||||
fetcher: s.fetcher,
|
||||
ctx: s.ctx,
|
||||
digest: h,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MountableLayer{
|
||||
Layer: l,
|
||||
Reference: s.ref.Context().Digest(h.String()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *schema1) LayerByDiffID(v1.Hash) (v1.Layer, error) {
|
||||
return nil, newErrSchema1(s.mediaType)
|
||||
}
|
||||
|
||||
type fslayer struct {
|
||||
BlobSum string `json:"blobSum"`
|
||||
}
|
||||
|
||||
type schema1Manifest struct {
|
||||
FSLayers []fslayer `json:"fsLayers"`
|
||||
}
|
||||
27
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go
generated
vendored
27
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go
generated
vendored
|
|
@ -34,6 +34,9 @@ type Error struct {
|
|||
Request *http.Request
|
||||
// The raw body if we couldn't understand it.
|
||||
rawBody string
|
||||
|
||||
// Bit of a hack to make it easier to force a retry.
|
||||
temporary bool
|
||||
}
|
||||
|
||||
// Check that Error implements error
|
||||
|
|
@ -72,6 +75,10 @@ func (e *Error) responseErr() string {
|
|||
|
||||
// Temporary returns whether the request that preceded the error is temporary.
|
||||
func (e *Error) Temporary() bool {
|
||||
if e.temporary {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(e.Errors) == 0 {
|
||||
_, ok := temporaryStatusCodes[e.StatusCode]
|
||||
return ok
|
||||
|
|
@ -153,21 +160,37 @@ func CheckError(resp *http.Response, codes ...int) error {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return makeError(resp, b)
|
||||
}
|
||||
|
||||
func makeError(resp *http.Response, body []byte) *Error {
|
||||
// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors
|
||||
structuredError := &Error{}
|
||||
|
||||
// This can fail if e.g. the response body is not valid JSON. That's fine,
|
||||
// we'll construct an appropriate error string from the body and status code.
|
||||
_ = json.Unmarshal(b, structuredError)
|
||||
_ = json.Unmarshal(body, structuredError)
|
||||
|
||||
structuredError.rawBody = string(b)
|
||||
structuredError.rawBody = string(body)
|
||||
structuredError.StatusCode = resp.StatusCode
|
||||
structuredError.Request = resp.Request
|
||||
|
||||
return structuredError
|
||||
}
|
||||
|
||||
func retryError(resp *http.Response) error {
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rerr := makeError(resp, b)
|
||||
rerr.temporary = true
|
||||
return rerr
|
||||
}
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err er
|
|||
if out != nil {
|
||||
for _, code := range t.codes {
|
||||
if out.StatusCode == code {
|
||||
return CheckError(out)
|
||||
return retryError(out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,17 +25,17 @@ import (
|
|||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/redact"
|
||||
"github.com/google/go-containerregistry/internal/retry"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/google/go-containerregistry/pkg/v1/stream"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Taggable is an interface that enables a manifest PUT (e.g. for tagging).
|
||||
|
|
@ -45,137 +45,64 @@ type Taggable interface {
|
|||
|
||||
// Write pushes the provided img to the specified image reference.
|
||||
func Write(ref name.Reference, img v1.Image, options ...Option) (rerr error) {
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var p *progress
|
||||
if o.updates != nil {
|
||||
p = &progress{updates: o.updates}
|
||||
p.lastUpdate = &v1.Update{}
|
||||
p.lastUpdate.Total, err = countImage(img, o.allowNondistributableArtifacts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer close(o.updates)
|
||||
defer func() { _ = p.err(rerr) }()
|
||||
if o.progress != nil {
|
||||
defer func() { o.progress.Close(rerr) }()
|
||||
}
|
||||
return writeImage(o.context, ref, img, o, p)
|
||||
}
|
||||
|
||||
func writeImage(ctx context.Context, ref name.Reference, img v1.Image, o *options, progress *progress) error {
|
||||
ls, err := img.Layers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scopes := scopesForUploadingImage(ref.Context(), ls)
|
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := writer{
|
||||
repo: ref.Context(),
|
||||
client: &http.Client{Transport: tr},
|
||||
progress: progress,
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
||||
// Upload individual blobs and collect any errors.
|
||||
blobChan := make(chan v1.Layer, 2*o.jobs)
|
||||
g, gctx := errgroup.WithContext(ctx)
|
||||
for i := 0; i < o.jobs; i++ {
|
||||
// Start N workers consuming blobs to upload.
|
||||
g.Go(func() error {
|
||||
for b := range blobChan {
|
||||
if err := w.uploadOne(gctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Upload individual layers in goroutines and collect any errors.
|
||||
// If we can dedupe by the layer digest, try to do so. If we can't determine
|
||||
// the digest for whatever reason, we can't dedupe and might re-upload.
|
||||
g.Go(func() error {
|
||||
defer close(blobChan)
|
||||
uploaded := map[v1.Hash]bool{}
|
||||
for _, l := range ls {
|
||||
l := l
|
||||
|
||||
// Handle foreign layers.
|
||||
mt, err := l.MediaType()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !mt.IsDistributable() && !o.allowNondistributableArtifacts {
|
||||
continue
|
||||
}
|
||||
|
||||
// Streaming layers calculate their digests while uploading them. Assume
|
||||
// an error here indicates we need to upload the layer.
|
||||
h, err := l.Digest()
|
||||
if err == nil {
|
||||
// If we can determine the layer's digest ahead of
|
||||
// time, use it to dedupe uploads.
|
||||
if uploaded[h] {
|
||||
continue // Already uploading.
|
||||
}
|
||||
uploaded[h] = true
|
||||
}
|
||||
select {
|
||||
case blobChan <- l:
|
||||
case <-gctx.Done():
|
||||
return gctx.Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if l, err := partial.ConfigLayer(img); err != nil {
|
||||
// We can't read the ConfigLayer, possibly because of streaming layers,
|
||||
// since the layer DiffIDs haven't been calculated yet. Attempt to wait
|
||||
// for the other layers to be uploaded, then try the config again.
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now that all the layers are uploaded, try to upload the config file blob.
|
||||
l, err := partial.ConfigLayer(img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.uploadOne(ctx, l); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// We *can* read the ConfigLayer, so upload it concurrently with the layers.
|
||||
g.Go(func() error {
|
||||
return w.uploadOne(gctx, l)
|
||||
})
|
||||
|
||||
// Wait for the layers + config.
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// With all of the constituent elements uploaded, upload the manifest
|
||||
// to commit the image.
|
||||
return w.commitManifest(ctx, img, ref)
|
||||
return newPusher(o).Push(o.context, ref, img)
|
||||
}
|
||||
|
||||
// writer writes the elements of an image to a remote image reference.
|
||||
type writer struct {
|
||||
repo name.Repository
|
||||
repo name.Repository
|
||||
auth authn.Authenticator
|
||||
transport http.RoundTripper
|
||||
|
||||
client *http.Client
|
||||
|
||||
progress *progress
|
||||
backoff Backoff
|
||||
predicate retry.Predicate
|
||||
|
||||
scopeLock sync.Mutex
|
||||
// Keep track of scopes that we have already requested.
|
||||
scopeSet map[string]struct{}
|
||||
scopes []string
|
||||
}
|
||||
|
||||
func makeWriter(ctx context.Context, repo name.Repository, ls []v1.Layer, o *options) (*writer, error) {
|
||||
auth := o.auth
|
||||
if o.keychain != nil {
|
||||
kauth, err := o.keychain.Resolve(repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth = kauth
|
||||
}
|
||||
scopes := scopesForUploadingImage(repo, ls)
|
||||
tr, err := transport.NewWithContext(ctx, repo.Registry, auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scopeSet := map[string]struct{}{}
|
||||
for _, scope := range scopes {
|
||||
scopeSet[scope] = struct{}{}
|
||||
}
|
||||
return &writer{
|
||||
repo: repo,
|
||||
client: &http.Client{Transport: tr},
|
||||
auth: auth,
|
||||
transport: o.transport,
|
||||
progress: o.progress,
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
scopes: scopes,
|
||||
scopeSet: scopeSet,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// url returns a url.Url for the specified path in the context of this remote image reference.
|
||||
|
|
@ -187,6 +114,34 @@ func (w *writer) url(path string) url.URL {
|
|||
}
|
||||
}
|
||||
|
||||
func (w *writer) maybeUpdateScopes(ctx context.Context, ml *MountableLayer) error {
|
||||
if ml.Reference.Context().String() == w.repo.String() {
|
||||
return nil
|
||||
}
|
||||
if ml.Reference.Context().Registry.String() != w.repo.Registry.String() {
|
||||
return nil
|
||||
}
|
||||
|
||||
scope := ml.Reference.Scope(transport.PullScope)
|
||||
|
||||
w.scopeLock.Lock()
|
||||
defer w.scopeLock.Unlock()
|
||||
|
||||
if _, ok := w.scopeSet[scope]; !ok {
|
||||
w.scopeSet[scope] = struct{}{}
|
||||
w.scopes = append(w.scopes, scope)
|
||||
|
||||
logs.Debug.Printf("Refreshing token to add scope %q", scope)
|
||||
wt, err := transport.NewWithContext(ctx, w.repo.Registry, w.auth, w.transport, w.scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.client = &http.Client{Transport: wt}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence.
|
||||
func (w *writer) nextLocation(resp *http.Response) (string, error) {
|
||||
loc := resp.Header.Get("Location")
|
||||
|
|
@ -228,30 +183,6 @@ func (w *writer) checkExistingBlob(ctx context.Context, h v1.Hash) (bool, error)
|
|||
return resp.StatusCode == http.StatusOK, nil
|
||||
}
|
||||
|
||||
// checkExistingManifest checks if a manifest exists already in the repository
|
||||
// by making a HEAD request to the manifest API.
|
||||
func (w *writer) checkExistingManifest(ctx context.Context, h v1.Hash, mt types.MediaType) (bool, error) {
|
||||
u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), h.String()))
|
||||
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req.Header.Set("Accept", string(mt))
|
||||
|
||||
resp, err := w.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return resp.StatusCode == http.StatusOK, nil
|
||||
}
|
||||
|
||||
// initiateUpload initiates the blob upload, which starts with a POST that can
|
||||
// optionally include the hash of the layer and a list of repositories from
|
||||
// which that layer might be read. On failure, an error is returned.
|
||||
|
|
@ -279,6 +210,11 @@ func (w *writer) initiateUpload(ctx context.Context, from, mount, origin string)
|
|||
req.Header.Set("Content-Type", "application/json")
|
||||
resp, err := w.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
if origin != "" && origin != w.repo.RegistryStr() {
|
||||
// https://github.com/google/go-containerregistry/issues/1679
|
||||
logs.Warn.Printf("retrying without mount: %v", err)
|
||||
return w.initiateUpload(ctx, "", "", "")
|
||||
}
|
||||
return "", false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
|
@ -421,6 +357,9 @@ func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error {
|
|||
mount = h.String()
|
||||
}
|
||||
if ml, ok := l.(*MountableLayer); ok {
|
||||
if err := w.maybeUpdateScopes(ctx, ml); err != nil {
|
||||
return err
|
||||
}
|
||||
from = ml.Reference.Context().RepositoryStr()
|
||||
origin = ml.Reference.Context().RegistryStr()
|
||||
}
|
||||
|
|
@ -474,69 +413,6 @@ func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error {
|
|||
return retry.Retry(tryUpload, w.predicate, w.backoff)
|
||||
}
|
||||
|
||||
type withLayer interface {
|
||||
Layer(v1.Hash) (v1.Layer, error)
|
||||
}
|
||||
|
||||
func (w *writer) writeIndex(ctx context.Context, ref name.Reference, ii v1.ImageIndex, options ...Option) error {
|
||||
index, err := ii.IndexManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(#803): Pipe through remote.WithJobs and upload these in parallel.
|
||||
for _, desc := range index.Manifests {
|
||||
ref := ref.Context().Digest(desc.Digest.String())
|
||||
exists, err := w.checkExistingManifest(ctx, desc.Digest, desc.MediaType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
logs.Progress.Print("existing manifest: ", desc.Digest)
|
||||
continue
|
||||
}
|
||||
|
||||
switch desc.MediaType {
|
||||
case types.OCIImageIndex, types.DockerManifestList:
|
||||
ii, err := ii.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.writeIndex(ctx, ref, ii, options...); err != nil {
|
||||
return err
|
||||
}
|
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2:
|
||||
img, err := ii.Image(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeImage(ctx, ref, img, o, w.progress); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
// Workaround for #819.
|
||||
if wl, ok := ii.(withLayer); ok {
|
||||
layer, err := wl.Layer(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.uploadOne(ctx, layer); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// With all of the constituent elements uploaded, upload the manifest
|
||||
// to commit the image.
|
||||
return w.commitManifest(ctx, ii, ref)
|
||||
}
|
||||
|
||||
type withMediaType interface {
|
||||
MediaType() (types.MediaType, error)
|
||||
}
|
||||
|
|
@ -655,10 +531,7 @@ func (w *writer) commitSubjectReferrers(ctx context.Context, sub name.Digest, ad
|
|||
return im.Manifests[i].Digest.String() < im.Manifests[j].Digest.String()
|
||||
})
|
||||
logs.Progress.Printf("updating fallback tag %s with new referrer", t.Identifier())
|
||||
if err := w.commitManifest(ctx, fallbackTaggable{im}, t); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return w.commitManifest(ctx, fallbackTaggable{im}, t)
|
||||
}
|
||||
|
||||
type fallbackTaggable struct {
|
||||
|
|
@ -770,183 +643,26 @@ func scopesForUploadingImage(repo name.Repository, layers []v1.Layer) []string {
|
|||
// WriteIndex will attempt to push all of the referenced manifests before
|
||||
// attempting to push the ImageIndex, to retain referential integrity.
|
||||
func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr error) {
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scopes := []string{ref.Scope(transport.PushScope)}
|
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
if o.progress != nil {
|
||||
defer func() { o.progress.Close(rerr) }()
|
||||
}
|
||||
w := writer{
|
||||
repo: ref.Context(),
|
||||
client: &http.Client{Transport: tr},
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
||||
if o.updates != nil {
|
||||
w.progress = &progress{updates: o.updates}
|
||||
w.progress.lastUpdate = &v1.Update{}
|
||||
|
||||
defer close(o.updates)
|
||||
defer func() { w.progress.err(rerr) }()
|
||||
|
||||
w.progress.lastUpdate.Total, err = countIndex(ii, o.allowNondistributableArtifacts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return w.writeIndex(o.context, ref, ii, options...)
|
||||
}
|
||||
|
||||
// countImage counts the total size of all layers + config blob + manifest for
|
||||
// an image. It de-dupes duplicate layers.
|
||||
func countImage(img v1.Image, allowNondistributableArtifacts bool) (int64, error) {
|
||||
var total int64
|
||||
ls, err := img.Layers()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
seen := map[v1.Hash]bool{}
|
||||
for _, l := range ls {
|
||||
// Handle foreign layers.
|
||||
mt, err := l.MediaType()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !mt.IsDistributable() && !allowNondistributableArtifacts {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: support streaming layers which update the total count as they write.
|
||||
if _, ok := l.(*stream.Layer); ok {
|
||||
return 0, errors.New("cannot use stream.Layer and WithProgress")
|
||||
}
|
||||
|
||||
// Dedupe layers.
|
||||
d, err := l.Digest()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if seen[d] {
|
||||
continue
|
||||
}
|
||||
seen[d] = true
|
||||
|
||||
size, err := l.Size()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += size
|
||||
}
|
||||
b, err := img.RawConfigFile()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += int64(len(b))
|
||||
size, err := img.Size()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += size
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// countIndex counts the total size of all images + sub-indexes for an index.
|
||||
// It does not attempt to de-dupe duplicate images, etc.
|
||||
func countIndex(idx v1.ImageIndex, allowNondistributableArtifacts bool) (int64, error) {
|
||||
var total int64
|
||||
mf, err := idx.IndexManifest()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, desc := range mf.Manifests {
|
||||
switch desc.MediaType {
|
||||
case types.OCIImageIndex, types.DockerManifestList:
|
||||
sidx, err := idx.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size, err := countIndex(sidx, allowNondistributableArtifacts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += size
|
||||
case types.OCIManifestSchema1, types.DockerManifestSchema2:
|
||||
simg, err := idx.Image(desc.Digest)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size, err := countImage(simg, allowNondistributableArtifacts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += size
|
||||
default:
|
||||
// Workaround for #819.
|
||||
if wl, ok := idx.(withLayer); ok {
|
||||
layer, err := wl.Layer(desc.Digest)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size, err := layer.Size()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size, err := idx.Size()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
total += size
|
||||
return total, nil
|
||||
return newPusher(o).Push(o.context, ref, ii)
|
||||
}
|
||||
|
||||
// WriteLayer uploads the provided Layer to the specified repo.
|
||||
func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr error) {
|
||||
o, err := makeOptions(repo, options...)
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scopes := scopesForUploadingImage(repo, []v1.Layer{layer})
|
||||
tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
if o.progress != nil {
|
||||
defer func() { o.progress.Close(rerr) }()
|
||||
}
|
||||
w := writer{
|
||||
repo: repo,
|
||||
client: &http.Client{Transport: tr},
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
||||
if o.updates != nil {
|
||||
w.progress = &progress{updates: o.updates}
|
||||
w.progress.lastUpdate = &v1.Update{}
|
||||
|
||||
defer close(o.updates)
|
||||
defer func() { w.progress.err(rerr) }()
|
||||
|
||||
// TODO: support streaming layers which update the total count as they write.
|
||||
if _, ok := layer.(*stream.Layer); ok {
|
||||
return errors.New("cannot use stream.Layer and WithProgress")
|
||||
}
|
||||
size, err := layer.Size()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.progress.total(size)
|
||||
}
|
||||
return w.uploadOne(o.context, layer)
|
||||
return newPusher(o).Upload(o.context, repo, layer)
|
||||
}
|
||||
|
||||
// Tag adds a tag to the given Taggable via PUT /v2/.../manifests/<tag>
|
||||
|
|
@ -976,28 +692,9 @@ func Tag(tag name.Tag, t Taggable, options ...Option) error {
|
|||
// should ensure that all blobs or manifests that are referenced by t exist
|
||||
// in the target registry.
|
||||
func Put(ref name.Reference, t Taggable, options ...Option) error {
|
||||
o, err := makeOptions(ref.Context(), options...)
|
||||
o, err := makeOptions(options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scopes := []string{ref.Scope(transport.PushScope)}
|
||||
|
||||
// TODO: This *always* does a token exchange. For some registries,
|
||||
// that's pretty slow. Some ideas;
|
||||
// * Tag could take a list of tags.
|
||||
// * Allow callers to pass in a transport.Transport, typecheck
|
||||
// it to allow them to reuse the transport across multiple calls.
|
||||
// * WithTag option to do multiple manifest PUTs in commitManifest.
|
||||
tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w := writer{
|
||||
repo: ref.Context(),
|
||||
client: &http.Client{Transport: tr},
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
||||
return w.commitManifest(o.context, t, ref)
|
||||
return newPusher(o).Push(o.context, ref, t)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -130,6 +130,8 @@ func (l *Layer) Uncompressed() (io.ReadCloser, error) {
|
|||
|
||||
// Compressed implements v1.Layer.
|
||||
func (l *Layer) Compressed() (io.ReadCloser, error) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.consumed {
|
||||
return nil, ErrConsumed
|
||||
}
|
||||
|
|
|
|||
|
|
@ -223,9 +223,9 @@ func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
close := true
|
||||
needClose := true
|
||||
defer func() {
|
||||
if close {
|
||||
if needClose {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
|
@ -244,7 +244,7 @@ func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) {
|
|||
currentDir := filepath.Dir(filePath)
|
||||
return extractFileFromTar(opener, path.Join(currentDir, path.Clean(hdr.Linkname)))
|
||||
}
|
||||
close = false
|
||||
needClose = false
|
||||
return tarFile{
|
||||
Reader: tf,
|
||||
Closer: f,
|
||||
|
|
|
|||
|
|
@ -160,6 +160,8 @@ func WithCompressedCaching(l *layer) {
|
|||
// WithEstargzOptions is a functional option that allow the caller to pass
|
||||
// through estargz.Options to the underlying compression layer. This is
|
||||
// only meaningful when estargz is enabled.
|
||||
//
|
||||
// Deprecated: WithEstargz is deprecated, and will be removed in a future release.
|
||||
func WithEstargzOptions(opts ...estargz.Option) LayerOption {
|
||||
return func(l *layer) {
|
||||
l.estgzopts = opts
|
||||
|
|
@ -167,6 +169,8 @@ func WithEstargzOptions(opts ...estargz.Option) LayerOption {
|
|||
}
|
||||
|
||||
// WithEstargz is a functional option that explicitly enables estargz support.
|
||||
//
|
||||
// Deprecated: WithEstargz is deprecated, and will be removed in a future release.
|
||||
func WithEstargz(l *layer) {
|
||||
oguncompressed := l.uncompressedopener
|
||||
estargz := func() (io.ReadCloser, error) {
|
||||
|
|
@ -238,6 +242,7 @@ func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) {
|
|||
}
|
||||
|
||||
if estgz := os.Getenv("GGCR_EXPERIMENT_ESTARGZ"); estgz == "1" {
|
||||
logs.Warn.Println("GGCR_EXPERIMENT_ESTARGZ is deprecated, and will be removed in a future release.")
|
||||
opts = append([]LayerOption{WithEstargz}, opts...)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -80,3 +80,19 @@ func (m MediaType) IsConfig() bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m MediaType) IsSchema1() bool {
|
||||
switch m {
|
||||
case DockerManifestSchema1, DockerManifestSchema1Signed:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m MediaType) IsLayer() bool {
|
||||
switch m {
|
||||
case DockerLayer, DockerUncompressedLayer, OCILayer, OCILayerZStd, OCIUncompressedLayer, DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,30 @@ This package provides various compression algorithms.
|
|||
|
||||
# changelog
|
||||
|
||||
* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
|
||||
* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
|
||||
* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
|
||||
* zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785
|
||||
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
|
||||
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
|
||||
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
|
||||
* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
|
||||
|
||||
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
|
||||
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
|
||||
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
|
||||
* s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766
|
||||
* zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773
|
||||
* huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774
|
||||
|
||||
* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)
|
||||
* s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685
|
||||
* s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752
|
||||
* s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755
|
||||
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
|
||||
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
|
||||
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
|
||||
|
||||
* Jan 21st, 2023 (v1.15.15)
|
||||
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
||||
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
||||
|
|
@ -600,6 +624,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
|
|||
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
|
||||
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
|
||||
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
|
||||
* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
|
||||
* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
|
||||
|
||||
# license
|
||||
|
||||
|
|
|
|||
|
|
@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error {
|
|||
// If the buffer is over-read an error is returned.
|
||||
func (s *Scratch) decompress() error {
|
||||
br := &s.bits
|
||||
br.init(s.br.unread())
|
||||
if err := br.init(s.br.unread()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var s1, s2 decoder
|
||||
// Initialize and decode first state and symbol.
|
||||
|
|
|
|||
|
|
@ -60,6 +60,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
|
|||
b.nBits += encA.nBits + encB.nBits
|
||||
}
|
||||
|
||||
// encFourSymbols adds up to 32 bits from four symbols.
|
||||
// It will not check if there is space for them,
|
||||
// so the caller must ensure that b has been flushed recently.
|
||||
func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
|
||||
bitsA := encA.nBits
|
||||
bitsB := bitsA + encB.nBits
|
||||
bitsC := bitsB + encC.nBits
|
||||
bitsD := bitsC + encD.nBits
|
||||
combined := uint64(encA.val) |
|
||||
(uint64(encB.val) << (bitsA & 63)) |
|
||||
(uint64(encC.val) << (bitsB & 63)) |
|
||||
(uint64(encD.val) << (bitsC & 63))
|
||||
b.bitContainer |= combined << (b.nBits & 63)
|
||||
b.nBits += bitsD
|
||||
}
|
||||
|
||||
// flush32 will flush out, so there are at least 32 bits available for writing.
|
||||
func (b *bitWriter) flush32() {
|
||||
if b.nBits < 32 {
|
||||
|
|
|
|||
|
|
@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
|
|||
tmp := src[n : n+4]
|
||||
// tmp should be len 4
|
||||
bw.flush32()
|
||||
bw.encTwoSymbols(cTable, tmp[3], tmp[2])
|
||||
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
|
||||
bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
|
||||
}
|
||||
} else {
|
||||
for ; n >= 0; n -= 4 {
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
|
@ -442,6 +443,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
|
|||
}
|
||||
}
|
||||
var err error
|
||||
if debugDecoder {
|
||||
println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
|
||||
}
|
||||
huff, literals, err = huff0.ReadTable(literals, huff)
|
||||
if err != nil {
|
||||
println("reading huffman table:", err)
|
||||
|
|
|
|||
|
|
@ -473,7 +473,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||
return b.encodeLits(b.literals, rawAllLits)
|
||||
}
|
||||
// We want some difference to at least account for the headers.
|
||||
saved := b.size - len(b.literals) - (b.size >> 5)
|
||||
saved := b.size - len(b.literals) - (b.size >> 6)
|
||||
if saved < 16 {
|
||||
if org == nil {
|
||||
return errIncompressible
|
||||
|
|
@ -779,10 +779,13 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||
}
|
||||
b.output = wr.out
|
||||
|
||||
// Maybe even add a bigger margin.
|
||||
if len(b.output)-3-bhOffset >= b.size {
|
||||
// Maybe even add a bigger margin.
|
||||
// Discard and encode as raw block.
|
||||
b.output = b.encodeRawTo(b.output[:bhOffset], org)
|
||||
b.popOffsets()
|
||||
b.litEnc.Reuse = huff0.ReusePolicyNone
|
||||
return errIncompressible
|
||||
return nil
|
||||
}
|
||||
|
||||
// Size is output minus block header.
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
|
|||
func (b *byteBuf) readByte() (byte, error) {
|
||||
bb := *b
|
||||
if len(bb) < 1 {
|
||||
return 0, nil
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
r := bb[0]
|
||||
*b = bb[1:]
|
||||
|
|
@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (r *readerWrapper) readByte() (byte, error) {
|
||||
n2, err := r.r.Read(r.tmp[:1])
|
||||
n2, err := io.ReadFull(r.r, r.tmp[:1])
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
|
|
|
|||
|
|
@ -455,12 +455,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
|||
}
|
||||
|
||||
if len(next.b) > 0 {
|
||||
n, err := d.current.crc.Write(next.b)
|
||||
if err == nil {
|
||||
if n != len(next.b) {
|
||||
d.current.err = io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
d.current.crc.Write(next.b)
|
||||
}
|
||||
if next.err == nil && next.d != nil && next.d.hasCRC {
|
||||
got := uint32(d.current.crc.Sum64())
|
||||
|
|
|
|||
|
|
@ -32,10 +32,9 @@ type match struct {
|
|||
length int32
|
||||
rep int32
|
||||
est int32
|
||||
_ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
|
||||
}
|
||||
|
||||
const highScore = 25000
|
||||
const highScore = maxMatchLen * 8
|
||||
|
||||
// estBits will estimate output bits from predefined tables.
|
||||
func (m *match) estBits(bitsPerByte int32) {
|
||||
|
|
@ -160,7 +159,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := s
|
||||
cv := load6432(src, s)
|
||||
|
||||
// Relative offsets
|
||||
offset1 := int32(blk.recentOffsets[0])
|
||||
|
|
@ -174,7 +172,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||
blk.literals = append(blk.literals, src[nextEmit:until]...)
|
||||
s.litLen = uint32(until - nextEmit)
|
||||
}
|
||||
_ = addLiterals
|
||||
|
||||
if debugEncoder {
|
||||
println("recent offsets:", blk.recentOffsets)
|
||||
|
|
@ -189,53 +186,96 @@ encodeLoop:
|
|||
panic("offset0 was 0")
|
||||
}
|
||||
|
||||
bestOf := func(a, b *match) *match {
|
||||
if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
const goodEnough = 100
|
||||
const goodEnough = 250
|
||||
|
||||
cv := load6432(src, s)
|
||||
|
||||
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
||||
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
candidateS := e.table[nextHashS]
|
||||
|
||||
matchAt := func(offset int32, s int32, first uint32, rep int32) match {
|
||||
// Set m to a match at offset if it looks like that will improve compression.
|
||||
improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
|
||||
if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
|
||||
return match{s: s, est: highScore}
|
||||
return
|
||||
}
|
||||
if debugAsserts {
|
||||
if offset <= 0 {
|
||||
panic(offset)
|
||||
}
|
||||
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
|
||||
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
|
||||
}
|
||||
}
|
||||
m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
|
||||
m.estBits(bitsPerByte)
|
||||
return m
|
||||
// Try to quick reject if we already have a long match.
|
||||
if m.length > 16 {
|
||||
left := len(src) - int(m.s+m.length)
|
||||
// If we are too close to the end, keep as is.
|
||||
if left <= 0 {
|
||||
return
|
||||
}
|
||||
checkLen := m.length - (s - m.s) - 8
|
||||
if left > 2 && checkLen > 4 {
|
||||
// Check 4 bytes, 4 bytes from the end of the current match.
|
||||
a := load3232(src, offset+checkLen)
|
||||
b := load3232(src, s+checkLen)
|
||||
if a != b {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
l := 4 + e.matchlen(s+4, offset+4, src)
|
||||
if rep < 0 {
|
||||
// Extend candidate match backwards as far as possible.
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
|
||||
s--
|
||||
offset--
|
||||
l++
|
||||
}
|
||||
}
|
||||
|
||||
cand := match{offset: offset, s: s, length: l, rep: rep}
|
||||
cand.estBits(bitsPerByte)
|
||||
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
|
||||
*m = cand
|
||||
}
|
||||
}
|
||||
|
||||
m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||
m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||
m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||
m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)
|
||||
best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4))
|
||||
best := match{s: s, est: highScore}
|
||||
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||
improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
|
||||
|
||||
if canRepeat && best.length < goodEnough {
|
||||
cv32 := uint32(cv >> 8)
|
||||
spp := s + 1
|
||||
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
||||
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
||||
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
||||
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
||||
if best.length > 0 {
|
||||
cv32 = uint32(cv >> 24)
|
||||
spp += 2
|
||||
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
||||
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
||||
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
||||
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
||||
if s == nextEmit {
|
||||
// Check repeats straight after a match.
|
||||
improve(&best, s-offset2, s, uint32(cv), 1|4)
|
||||
improve(&best, s-offset3, s, uint32(cv), 2|4)
|
||||
if offset1 > 1 {
|
||||
improve(&best, s-(offset1-1), s, uint32(cv), 3|4)
|
||||
}
|
||||
}
|
||||
|
||||
// If either no match or a non-repeat match, check at + 1
|
||||
if best.rep <= 0 {
|
||||
cv32 := uint32(cv >> 8)
|
||||
spp := s + 1
|
||||
improve(&best, spp-offset1, spp, cv32, 1)
|
||||
improve(&best, spp-offset2, spp, cv32, 2)
|
||||
improve(&best, spp-offset3, spp, cv32, 3)
|
||||
if best.rep < 0 {
|
||||
cv32 = uint32(cv >> 24)
|
||||
spp += 2
|
||||
improve(&best, spp-offset1, spp, cv32, 1)
|
||||
improve(&best, spp-offset2, spp, cv32, 2)
|
||||
improve(&best, spp-offset3, spp, cv32, 3)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Load next and check...
|
||||
|
|
@ -250,47 +290,45 @@ encodeLoop:
|
|||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
continue
|
||||
}
|
||||
|
||||
s++
|
||||
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
|
||||
cv = load6432(src, s)
|
||||
cv2 := load6432(src, s+1)
|
||||
cv = load6432(src, s+1)
|
||||
cv2 := load6432(src, s+2)
|
||||
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
|
||||
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
||||
|
||||
// Short at s+1
|
||||
m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||
improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1)
|
||||
// Long at s+1, s+2
|
||||
m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||
m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||
m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
|
||||
m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
|
||||
best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
|
||||
improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1)
|
||||
improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1)
|
||||
improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1)
|
||||
improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1)
|
||||
if false {
|
||||
// Short at s+3.
|
||||
// Too often worse...
|
||||
m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
|
||||
best = bestOf(best, &m)
|
||||
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1)
|
||||
}
|
||||
// See if we can find a better match by checking where the current best ends.
|
||||
// Use that offset to see if we can find a better full match.
|
||||
if sAt := best.s + best.length; sAt < sLimit {
|
||||
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
|
||||
candidateEnd := e.longTable[nextHashL]
|
||||
// Start check at a fixed offset to allow for a few mismatches.
|
||||
// For this compression level 2 yields the best results.
|
||||
const skipBeginning = 2
|
||||
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||
bestEnd := bestOf(best, &m)
|
||||
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||
bestEnd = bestOf(bestEnd, &m)
|
||||
|
||||
// Start check at a fixed offset to allow for a few mismatches.
|
||||
// For this compression level 2 yields the best results.
|
||||
// We cannot do this if we have already indexed this position.
|
||||
const skipBeginning = 2
|
||||
if best.s > s-skipBeginning {
|
||||
// See if we can find a better match by checking where the current best ends.
|
||||
// Use that offset to see if we can find a better full match.
|
||||
if sAt := best.s + best.length; sAt < sLimit {
|
||||
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
|
||||
candidateEnd := e.longTable[nextHashL]
|
||||
|
||||
if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 {
|
||||
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||
if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 {
|
||||
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||
}
|
||||
}
|
||||
best = bestEnd
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -303,51 +341,34 @@ encodeLoop:
|
|||
|
||||
// We have a match, we can store the forward value
|
||||
if best.rep > 0 {
|
||||
s = best.s
|
||||
var seq seq
|
||||
seq.matchLen = uint32(best.length - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
start := best.s
|
||||
// We end the search early, so we don't risk 0 literals
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
if debugAsserts && s <= nextEmit {
|
||||
panic("s <= nextEmit")
|
||||
}
|
||||
repIndex := best.offset
|
||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
|
||||
repIndex--
|
||||
start--
|
||||
seq.matchLen++
|
||||
}
|
||||
addLiterals(&seq, start)
|
||||
addLiterals(&seq, best.s)
|
||||
|
||||
// rep 0
|
||||
seq.offset = uint32(best.rep)
|
||||
// Repeat. If bit 4 is set, this is a non-lit repeat.
|
||||
seq.offset = uint32(best.rep & 3)
|
||||
if debugSequences {
|
||||
println("repeat sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
// Index match start+1 (long) -> s - 1
|
||||
index0 := s
|
||||
// Index old s + 1 -> s - 1
|
||||
index0 := s + 1
|
||||
s = best.s + best.length
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debugEncoder {
|
||||
println("repeat ended", s, best.length)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
}
|
||||
// Index skipped...
|
||||
off := index0 + e.cur
|
||||
for index0 < s-1 {
|
||||
for index0 < s {
|
||||
cv0 := load6432(src, index0)
|
||||
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
||||
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
||||
|
|
@ -357,17 +378,19 @@ encodeLoop:
|
|||
index0++
|
||||
}
|
||||
switch best.rep {
|
||||
case 2:
|
||||
case 2, 4 | 1:
|
||||
offset1, offset2 = offset2, offset1
|
||||
case 3:
|
||||
case 3, 4 | 2:
|
||||
offset1, offset2, offset3 = offset3, offset1, offset2
|
||||
case 4 | 3:
|
||||
offset1, offset2, offset3 = offset1-1, offset1, offset2
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
continue
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. Update recent offsets.
|
||||
// We'll later see if more than 4 bytes.
|
||||
index0 := s + 1
|
||||
s = best.s
|
||||
t := best.offset
|
||||
offset1, offset2, offset3 = s-t, offset1, offset2
|
||||
|
|
@ -380,22 +403,9 @@ encodeLoop:
|
|||
panic("invalid offset")
|
||||
}
|
||||
|
||||
// Extend the n-byte match as long as possible.
|
||||
l := best.length
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
|
||||
// Write our sequence
|
||||
var seq seq
|
||||
l := best.length
|
||||
seq.litLen = uint32(s - nextEmit)
|
||||
seq.matchLen = uint32(l - zstdMinMatch)
|
||||
if seq.litLen > 0 {
|
||||
|
|
@ -412,10 +422,8 @@ encodeLoop:
|
|||
break encodeLoop
|
||||
}
|
||||
|
||||
// Index match start+1 (long) -> s - 1
|
||||
index0 := s - l + 1
|
||||
// every entry
|
||||
for index0 < s-1 {
|
||||
// Index old s + 1 -> s - 1
|
||||
for index0 < s {
|
||||
cv0 := load6432(src, index0)
|
||||
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
||||
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
||||
|
|
@ -424,50 +432,6 @@ encodeLoop:
|
|||
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
|
||||
index0++
|
||||
}
|
||||
|
||||
cv = load6432(src, s)
|
||||
if !canRepeat {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check offset 2
|
||||
for {
|
||||
o2 := s - offset2
|
||||
if load3232(src, o2) != uint32(cv) {
|
||||
// Do regular search
|
||||
break
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
|
||||
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
||||
|
||||
// We have at least 4 byte match.
|
||||
// No need to check backwards. We come straight from a match
|
||||
l := 4 + e.matchlen(s+4, o2+4, src)
|
||||
|
||||
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
|
||||
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
|
||||
seq.matchLen = uint32(l) - zstdMinMatch
|
||||
seq.litLen = 0
|
||||
|
||||
// Since litlen is always 0, this is offset 1.
|
||||
seq.offset = 1
|
||||
s += l
|
||||
nextEmit = s
|
||||
if debugSequences {
|
||||
println("sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
// Swap offset 1 and 2.
|
||||
offset1, offset2 = offset2, offset1
|
||||
if s >= sLimit {
|
||||
// Finished
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
}
|
||||
}
|
||||
|
||||
if int(nextEmit) < len(src) {
|
||||
|
|
|
|||
|
|
@ -277,23 +277,9 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||
s.eofWritten = true
|
||||
}
|
||||
|
||||
err := errIncompressible
|
||||
// If we got the exact same number of literals as input,
|
||||
// assume the literals cannot be compressed.
|
||||
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
|
||||
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||
}
|
||||
switch err {
|
||||
case errIncompressible:
|
||||
if debugEncoder {
|
||||
println("Storing incompressible block as raw")
|
||||
}
|
||||
blk.encodeRaw(src)
|
||||
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
|
||||
case nil:
|
||||
default:
|
||||
s.err = err
|
||||
return err
|
||||
s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||
if s.err != nil {
|
||||
return s.err
|
||||
}
|
||||
_, s.err = s.w.Write(blk.output)
|
||||
s.nWritten += int64(len(blk.output))
|
||||
|
|
@ -343,22 +329,8 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||
}
|
||||
s.wWg.Done()
|
||||
}()
|
||||
err := errIncompressible
|
||||
// If we got the exact same number of literals as input,
|
||||
// assume the literals cannot be compressed.
|
||||
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
|
||||
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||
}
|
||||
switch err {
|
||||
case errIncompressible:
|
||||
if debugEncoder {
|
||||
println("Storing incompressible block as raw")
|
||||
}
|
||||
blk.encodeRaw(src)
|
||||
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
|
||||
case nil:
|
||||
default:
|
||||
s.writeErr = err
|
||||
s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||
if s.writeErr != nil {
|
||||
return
|
||||
}
|
||||
_, s.writeErr = s.w.Write(blk.output)
|
||||
|
|
@ -568,25 +540,15 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||
|
||||
// If we got the exact same number of literals as input,
|
||||
// assume the literals cannot be compressed.
|
||||
err := errIncompressible
|
||||
oldout := blk.output
|
||||
if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
|
||||
// Output directly to dst
|
||||
blk.output = dst
|
||||
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||
}
|
||||
// Output directly to dst
|
||||
blk.output = dst
|
||||
|
||||
switch err {
|
||||
case errIncompressible:
|
||||
if debugEncoder {
|
||||
println("Storing incompressible block as raw")
|
||||
}
|
||||
dst = blk.encodeRawTo(dst, src)
|
||||
case nil:
|
||||
dst = blk.output
|
||||
default:
|
||||
err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dst = blk.output
|
||||
blk.output = oldout
|
||||
} else {
|
||||
enc.Reset(e.o.dict, false)
|
||||
|
|
@ -605,25 +567,11 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||
if len(src) == 0 {
|
||||
blk.last = true
|
||||
}
|
||||
err := errIncompressible
|
||||
// If we got the exact same number of literals as input,
|
||||
// assume the literals cannot be compressed.
|
||||
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
|
||||
err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
|
||||
}
|
||||
|
||||
switch err {
|
||||
case errIncompressible:
|
||||
if debugEncoder {
|
||||
println("Storing incompressible block as raw")
|
||||
}
|
||||
dst = blk.encodeRawTo(dst, todo)
|
||||
blk.popOffsets()
|
||||
case nil:
|
||||
dst = append(dst, blk.output...)
|
||||
default:
|
||||
err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dst = append(dst, blk.output...)
|
||||
blk.reset(nil)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ func (o *encoderOptions) setDefault() {
|
|||
blockSize: maxCompressedBlockSize,
|
||||
windowSize: 8 << 20,
|
||||
level: SpeedDefault,
|
||||
allLitEntropy: true,
|
||||
allLitEntropy: false,
|
||||
lowMem: false,
|
||||
}
|
||||
}
|
||||
|
|
@ -238,7 +238,7 @@ func WithEncoderLevel(l EncoderLevel) EOption {
|
|||
}
|
||||
}
|
||||
if !o.customALEntropy {
|
||||
o.allLitEntropy = l > SpeedFastest
|
||||
o.allLitEntropy = l > SpeedDefault
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -293,13 +293,9 @@ func (d *frameDec) next(block *blockDec) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// checkCRC will check the checksum if the frame has one.
|
||||
// checkCRC will check the checksum, assuming the frame has one.
|
||||
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
|
||||
func (d *frameDec) checkCRC() error {
|
||||
if !d.HasCheckSum {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We can overwrite upper tmp now
|
||||
buf, err := d.rawInput.readSmall(4)
|
||||
if err != nil {
|
||||
|
|
@ -307,10 +303,6 @@ func (d *frameDec) checkCRC() error {
|
|||
return err
|
||||
}
|
||||
|
||||
if d.o.ignoreChecksum {
|
||||
return nil
|
||||
}
|
||||
|
||||
want := binary.LittleEndian.Uint32(buf[:4])
|
||||
got := uint32(d.crc.Sum64())
|
||||
|
||||
|
|
@ -326,17 +318,13 @@ func (d *frameDec) checkCRC() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// consumeCRC reads the checksum data if the frame has one.
|
||||
// consumeCRC skips over the checksum, assuming the frame has one.
|
||||
func (d *frameDec) consumeCRC() error {
|
||||
if d.HasCheckSum {
|
||||
_, err := d.rawInput.readSmall(4)
|
||||
if err != nil {
|
||||
println("CRC missing?", err)
|
||||
return err
|
||||
}
|
||||
_, err := d.rawInput.readSmall(4)
|
||||
if err != nil {
|
||||
println("CRC missing?", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// runDecoder will run the decoder for the remainder of the frame.
|
||||
|
|
@ -415,15 +403,8 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
|||
if d.o.ignoreChecksum {
|
||||
err = d.consumeCRC()
|
||||
} else {
|
||||
var n int
|
||||
n, err = d.crc.Write(dst[crcStart:])
|
||||
if err == nil {
|
||||
if n != len(dst)-crcStart {
|
||||
err = io.ErrShortWrite
|
||||
} else {
|
||||
err = d.checkCRC()
|
||||
}
|
||||
}
|
||||
d.crc.Write(dst[crcStart:])
|
||||
err = d.checkCRC()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -236,9 +236,12 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||
maxBlockSize = s.windowSize
|
||||
}
|
||||
|
||||
if debugDecoder {
|
||||
println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
|
||||
}
|
||||
for i := seqs - 1; i >= 0; i-- {
|
||||
if br.overread() {
|
||||
printf("reading sequence %d, exceeded available data\n", seqs-i)
|
||||
printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain())
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var ll, mo, ml int
|
||||
|
|
@ -314,9 +317,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||
}
|
||||
size := ll + ml + len(out)
|
||||
if size-startSize > maxBlockSize {
|
||||
if size-startSize == 424242 {
|
||||
panic("here")
|
||||
}
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
if size > cap(out) {
|
||||
|
|
@ -427,8 +427,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Check if space for literals
|
||||
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
|
||||
if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ package zstd
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/internal/cpuinfo"
|
||||
)
|
||||
|
|
@ -134,6 +135,9 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
|
||||
ctx.ll, ctx.litRemain+ctx.ll)
|
||||
|
||||
case errorOverread:
|
||||
return true, io.ErrUnexpectedEOF
|
||||
|
||||
case errorNotEnoughSpace:
|
||||
size := ctx.outPosition + ctx.ll + ctx.ml
|
||||
if debugDecoder {
|
||||
|
|
@ -148,7 +152,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||
s.seqSize += ctx.litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
|
|
@ -203,6 +206,9 @@ const errorNotEnoughLiterals = 4
|
|||
// error reported when capacity of `out` is too small
|
||||
const errorNotEnoughSpace = 5
|
||||
|
||||
// error reported when bits are overread.
|
||||
const errorOverread = 6
|
||||
|
||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
|
|
@ -248,6 +254,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||
litRemain: len(s.literals),
|
||||
}
|
||||
|
||||
if debugDecoder {
|
||||
println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream")
|
||||
}
|
||||
|
||||
s.seqSize = 0
|
||||
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
|
||||
var errCode int
|
||||
|
|
@ -278,6 +288,8 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||
case errorNotEnoughLiterals:
|
||||
ll := ctx.seqs[i].ll
|
||||
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
|
||||
case errorOverread:
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
|
||||
|
|
@ -292,6 +304,9 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
if debugDecoder {
|
||||
println("decode: ", br.remain(), "bits remain on stream. code:", errCode)
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ sequenceDecs_decode_amd64_main_loop:
|
|||
|
||||
sequenceDecs_decode_amd64_fill_byte_by_byte:
|
||||
CMPQ SI, $0x00
|
||||
JLE sequenceDecs_decode_amd64_fill_end
|
||||
JLE sequenceDecs_decode_amd64_fill_check_overread
|
||||
CMPQ BX, $0x07
|
||||
JLE sequenceDecs_decode_amd64_fill_end
|
||||
SHLQ $0x08, DX
|
||||
|
|
@ -49,6 +49,10 @@ sequenceDecs_decode_amd64_fill_byte_by_byte:
|
|||
ORQ AX, DX
|
||||
JMP sequenceDecs_decode_amd64_fill_byte_by_byte
|
||||
|
||||
sequenceDecs_decode_amd64_fill_check_overread:
|
||||
CMPQ BX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decode_amd64_fill_end:
|
||||
// Update offset
|
||||
MOVQ R9, AX
|
||||
|
|
@ -105,7 +109,7 @@ sequenceDecs_decode_amd64_ml_update_zero:
|
|||
|
||||
sequenceDecs_decode_amd64_fill_2_byte_by_byte:
|
||||
CMPQ SI, $0x00
|
||||
JLE sequenceDecs_decode_amd64_fill_2_end
|
||||
JLE sequenceDecs_decode_amd64_fill_2_check_overread
|
||||
CMPQ BX, $0x07
|
||||
JLE sequenceDecs_decode_amd64_fill_2_end
|
||||
SHLQ $0x08, DX
|
||||
|
|
@ -116,6 +120,10 @@ sequenceDecs_decode_amd64_fill_2_byte_by_byte:
|
|||
ORQ AX, DX
|
||||
JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
|
||||
|
||||
sequenceDecs_decode_amd64_fill_2_check_overread:
|
||||
CMPQ BX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decode_amd64_fill_2_end:
|
||||
// Update literal length
|
||||
MOVQ DI, AX
|
||||
|
|
@ -320,6 +328,11 @@ error_not_enough_literals:
|
|||
MOVQ $0x00000004, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with overread error
|
||||
error_overread:
|
||||
MOVQ $0x00000006, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
// Requires: CMOV
|
||||
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
|
||||
|
|
@ -356,7 +369,7 @@ sequenceDecs_decode_56_amd64_main_loop:
|
|||
|
||||
sequenceDecs_decode_56_amd64_fill_byte_by_byte:
|
||||
CMPQ SI, $0x00
|
||||
JLE sequenceDecs_decode_56_amd64_fill_end
|
||||
JLE sequenceDecs_decode_56_amd64_fill_check_overread
|
||||
CMPQ BX, $0x07
|
||||
JLE sequenceDecs_decode_56_amd64_fill_end
|
||||
SHLQ $0x08, DX
|
||||
|
|
@ -367,6 +380,10 @@ sequenceDecs_decode_56_amd64_fill_byte_by_byte:
|
|||
ORQ AX, DX
|
||||
JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
|
||||
|
||||
sequenceDecs_decode_56_amd64_fill_check_overread:
|
||||
CMPQ BX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decode_56_amd64_fill_end:
|
||||
// Update offset
|
||||
MOVQ R9, AX
|
||||
|
|
@ -613,6 +630,11 @@ error_not_enough_literals:
|
|||
MOVQ $0x00000004, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with overread error
|
||||
error_overread:
|
||||
MOVQ $0x00000006, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
// Requires: BMI, BMI2, CMOV
|
||||
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
|
||||
|
|
@ -649,7 +671,7 @@ sequenceDecs_decode_bmi2_main_loop:
|
|||
|
||||
sequenceDecs_decode_bmi2_fill_byte_by_byte:
|
||||
CMPQ BX, $0x00
|
||||
JLE sequenceDecs_decode_bmi2_fill_end
|
||||
JLE sequenceDecs_decode_bmi2_fill_check_overread
|
||||
CMPQ DX, $0x07
|
||||
JLE sequenceDecs_decode_bmi2_fill_end
|
||||
SHLQ $0x08, AX
|
||||
|
|
@ -660,6 +682,10 @@ sequenceDecs_decode_bmi2_fill_byte_by_byte:
|
|||
ORQ CX, AX
|
||||
JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
|
||||
|
||||
sequenceDecs_decode_bmi2_fill_check_overread:
|
||||
CMPQ DX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decode_bmi2_fill_end:
|
||||
// Update offset
|
||||
MOVQ $0x00000808, CX
|
||||
|
|
@ -700,7 +726,7 @@ sequenceDecs_decode_bmi2_fill_end:
|
|||
|
||||
sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
|
||||
CMPQ BX, $0x00
|
||||
JLE sequenceDecs_decode_bmi2_fill_2_end
|
||||
JLE sequenceDecs_decode_bmi2_fill_2_check_overread
|
||||
CMPQ DX, $0x07
|
||||
JLE sequenceDecs_decode_bmi2_fill_2_end
|
||||
SHLQ $0x08, AX
|
||||
|
|
@ -711,6 +737,10 @@ sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
|
|||
ORQ CX, AX
|
||||
JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
|
||||
|
||||
sequenceDecs_decode_bmi2_fill_2_check_overread:
|
||||
CMPQ DX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decode_bmi2_fill_2_end:
|
||||
// Update literal length
|
||||
MOVQ $0x00000808, CX
|
||||
|
|
@ -889,6 +919,11 @@ error_not_enough_literals:
|
|||
MOVQ $0x00000004, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with overread error
|
||||
error_overread:
|
||||
MOVQ $0x00000006, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
// Requires: BMI, BMI2, CMOV
|
||||
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
|
||||
|
|
@ -925,7 +960,7 @@ sequenceDecs_decode_56_bmi2_main_loop:
|
|||
|
||||
sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
|
||||
CMPQ BX, $0x00
|
||||
JLE sequenceDecs_decode_56_bmi2_fill_end
|
||||
JLE sequenceDecs_decode_56_bmi2_fill_check_overread
|
||||
CMPQ DX, $0x07
|
||||
JLE sequenceDecs_decode_56_bmi2_fill_end
|
||||
SHLQ $0x08, AX
|
||||
|
|
@ -936,6 +971,10 @@ sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
|
|||
ORQ CX, AX
|
||||
JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
|
||||
|
||||
sequenceDecs_decode_56_bmi2_fill_check_overread:
|
||||
CMPQ DX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decode_56_bmi2_fill_end:
|
||||
// Update offset
|
||||
MOVQ $0x00000808, CX
|
||||
|
|
@ -1140,6 +1179,11 @@ error_not_enough_literals:
|
|||
MOVQ $0x00000004, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with overread error
|
||||
error_overread:
|
||||
MOVQ $0x00000006, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
||||
// Requires: SSE
|
||||
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
|
||||
|
|
@ -1804,7 +1848,7 @@ sequenceDecs_decodeSync_amd64_main_loop:
|
|||
|
||||
sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
|
||||
CMPQ SI, $0x00
|
||||
JLE sequenceDecs_decodeSync_amd64_fill_end
|
||||
JLE sequenceDecs_decodeSync_amd64_fill_check_overread
|
||||
CMPQ BX, $0x07
|
||||
JLE sequenceDecs_decodeSync_amd64_fill_end
|
||||
SHLQ $0x08, DX
|
||||
|
|
@ -1815,6 +1859,10 @@ sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
|
|||
ORQ AX, DX
|
||||
JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
|
||||
|
||||
sequenceDecs_decodeSync_amd64_fill_check_overread:
|
||||
CMPQ BX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decodeSync_amd64_fill_end:
|
||||
// Update offset
|
||||
MOVQ R9, AX
|
||||
|
|
@ -1871,7 +1919,7 @@ sequenceDecs_decodeSync_amd64_ml_update_zero:
|
|||
|
||||
sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
|
||||
CMPQ SI, $0x00
|
||||
JLE sequenceDecs_decodeSync_amd64_fill_2_end
|
||||
JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread
|
||||
CMPQ BX, $0x07
|
||||
JLE sequenceDecs_decodeSync_amd64_fill_2_end
|
||||
SHLQ $0x08, DX
|
||||
|
|
@ -1882,6 +1930,10 @@ sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
|
|||
ORQ AX, DX
|
||||
JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
|
||||
|
||||
sequenceDecs_decodeSync_amd64_fill_2_check_overread:
|
||||
CMPQ BX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decodeSync_amd64_fill_2_end:
|
||||
// Update literal length
|
||||
MOVQ DI, AX
|
||||
|
|
@ -2291,6 +2343,11 @@ error_not_enough_literals:
|
|||
MOVQ $0x00000004, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with overread error
|
||||
error_overread:
|
||||
MOVQ $0x00000006, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with not enough output space error
|
||||
error_not_enough_space:
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
|
@ -2356,7 +2413,7 @@ sequenceDecs_decodeSync_bmi2_main_loop:
|
|||
|
||||
sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
|
||||
CMPQ BX, $0x00
|
||||
JLE sequenceDecs_decodeSync_bmi2_fill_end
|
||||
JLE sequenceDecs_decodeSync_bmi2_fill_check_overread
|
||||
CMPQ DX, $0x07
|
||||
JLE sequenceDecs_decodeSync_bmi2_fill_end
|
||||
SHLQ $0x08, AX
|
||||
|
|
@ -2367,6 +2424,10 @@ sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
|
|||
ORQ CX, AX
|
||||
JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
|
||||
|
||||
sequenceDecs_decodeSync_bmi2_fill_check_overread:
|
||||
CMPQ DX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decodeSync_bmi2_fill_end:
|
||||
// Update offset
|
||||
MOVQ $0x00000808, CX
|
||||
|
|
@ -2407,7 +2468,7 @@ sequenceDecs_decodeSync_bmi2_fill_end:
|
|||
|
||||
sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
|
||||
CMPQ BX, $0x00
|
||||
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
|
||||
JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread
|
||||
CMPQ DX, $0x07
|
||||
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
|
||||
SHLQ $0x08, AX
|
||||
|
|
@ -2418,6 +2479,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
|
|||
ORQ CX, AX
|
||||
JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
|
||||
|
||||
sequenceDecs_decodeSync_bmi2_fill_2_check_overread:
|
||||
CMPQ DX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decodeSync_bmi2_fill_2_end:
|
||||
// Update literal length
|
||||
MOVQ $0x00000808, CX
|
||||
|
|
@ -2801,6 +2866,11 @@ error_not_enough_literals:
|
|||
MOVQ $0x00000004, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with overread error
|
||||
error_overread:
|
||||
MOVQ $0x00000006, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with not enough output space error
|
||||
error_not_enough_space:
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
|
@ -2866,7 +2936,7 @@ sequenceDecs_decodeSync_safe_amd64_main_loop:
|
|||
|
||||
sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
|
||||
CMPQ SI, $0x00
|
||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
|
||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread
|
||||
CMPQ BX, $0x07
|
||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
|
||||
SHLQ $0x08, DX
|
||||
|
|
@ -2877,6 +2947,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
|
|||
ORQ AX, DX
|
||||
JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
|
||||
|
||||
sequenceDecs_decodeSync_safe_amd64_fill_check_overread:
|
||||
CMPQ BX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decodeSync_safe_amd64_fill_end:
|
||||
// Update offset
|
||||
MOVQ R9, AX
|
||||
|
|
@ -2933,7 +3007,7 @@ sequenceDecs_decodeSync_safe_amd64_ml_update_zero:
|
|||
|
||||
sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
|
||||
CMPQ SI, $0x00
|
||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
|
||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread
|
||||
CMPQ BX, $0x07
|
||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
|
||||
SHLQ $0x08, DX
|
||||
|
|
@ -2944,6 +3018,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
|
|||
ORQ AX, DX
|
||||
JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
|
||||
|
||||
sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread:
|
||||
CMPQ BX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decodeSync_safe_amd64_fill_2_end:
|
||||
// Update literal length
|
||||
MOVQ DI, AX
|
||||
|
|
@ -3455,6 +3533,11 @@ error_not_enough_literals:
|
|||
MOVQ $0x00000004, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with overread error
|
||||
error_overread:
|
||||
MOVQ $0x00000006, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with not enough output space error
|
||||
error_not_enough_space:
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
|
@ -3520,7 +3603,7 @@ sequenceDecs_decodeSync_safe_bmi2_main_loop:
|
|||
|
||||
sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
|
||||
CMPQ BX, $0x00
|
||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
|
||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread
|
||||
CMPQ DX, $0x07
|
||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
|
||||
SHLQ $0x08, AX
|
||||
|
|
@ -3531,6 +3614,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
|
|||
ORQ CX, AX
|
||||
JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
|
||||
|
||||
sequenceDecs_decodeSync_safe_bmi2_fill_check_overread:
|
||||
CMPQ DX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decodeSync_safe_bmi2_fill_end:
|
||||
// Update offset
|
||||
MOVQ $0x00000808, CX
|
||||
|
|
@ -3571,7 +3658,7 @@ sequenceDecs_decodeSync_safe_bmi2_fill_end:
|
|||
|
||||
sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
|
||||
CMPQ BX, $0x00
|
||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
|
||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread
|
||||
CMPQ DX, $0x07
|
||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
|
||||
SHLQ $0x08, AX
|
||||
|
|
@ -3582,6 +3669,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
|
|||
ORQ CX, AX
|
||||
JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
|
||||
|
||||
sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread:
|
||||
CMPQ DX, $0x40
|
||||
JA error_overread
|
||||
|
||||
sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
|
||||
// Update literal length
|
||||
MOVQ $0x00000808, CX
|
||||
|
|
@ -4067,6 +4158,11 @@ error_not_enough_literals:
|
|||
MOVQ $0x00000004, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with overread error
|
||||
error_overread:
|
||||
MOVQ $0x00000006, ret+24(FP)
|
||||
RET
|
||||
|
||||
// Return with not enough output space error
|
||||
error_not_enough_space:
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
|
|
|||
|
|
@ -128,11 +128,11 @@ func matchLen(a, b []byte) (n int) {
|
|||
}
|
||||
|
||||
func load3232(b []byte, i int32) uint32 {
|
||||
return binary.LittleEndian.Uint32(b[i:])
|
||||
return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
|
||||
}
|
||||
|
||||
func load6432(b []byte, i int32) uint64 {
|
||||
return binary.LittleEndian.Uint64(b[i:])
|
||||
return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
|
||||
}
|
||||
|
||||
type byter interface {
|
||||
|
|
|
|||
|
|
@ -7,10 +7,8 @@ package term
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
|
@ -80,7 +78,6 @@ func DisableEcho(fd uintptr, state *State) error {
|
|||
if err := tcset(fd, &newState); err != nil {
|
||||
return err
|
||||
}
|
||||
handleInterrupt(fd, state)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -92,7 +89,6 @@ func SetRawTerminal(fd uintptr) (*State, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
handleInterrupt(fd, oldState)
|
||||
return oldState, err
|
||||
}
|
||||
|
||||
|
|
@ -102,18 +98,3 @@ func SetRawTerminal(fd uintptr) (*State, error) {
|
|||
func SetRawTerminalOutput(fd uintptr) (*State, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func handleInterrupt(fd uintptr, state *State) {
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, os.Interrupt)
|
||||
go func() {
|
||||
for range sigchan {
|
||||
// quit cleanly and the new terminal item is on a new line
|
||||
fmt.Println()
|
||||
signal.Stop(sigchan)
|
||||
close(sigchan)
|
||||
RestoreTerminal(fd, state)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -65,7 +65,4 @@ const (
|
|||
|
||||
// AnnotationArtifactDescription is the annotation key for the human readable description for the artifact.
|
||||
AnnotationArtifactDescription = "org.opencontainers.artifact.description"
|
||||
|
||||
// AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing.
|
||||
AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,34 +0,0 @@
|
|||
// Copyright 2022 The Linux Foundation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v1
|
||||
|
||||
// Artifact describes an artifact manifest.
|
||||
// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON.
|
||||
type Artifact struct {
|
||||
// MediaType is the media type of the object this schema refers to.
|
||||
MediaType string `json:"mediaType"`
|
||||
|
||||
// ArtifactType is the IANA media type of the artifact this schema refers to.
|
||||
ArtifactType string `json:"artifactType"`
|
||||
|
||||
// Blobs is a collection of blobs referenced by this manifest.
|
||||
Blobs []Descriptor `json:"blobs,omitempty"`
|
||||
|
||||
// Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest.
|
||||
Subject *Descriptor `json:"subject,omitempty"`
|
||||
|
||||
// Annotations contains arbitrary metadata for the artifact manifest.
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
|
@ -49,13 +49,15 @@ type ImageConfig struct {
|
|||
// StopSignal contains the system call signal that will be sent to the container to exit.
|
||||
StopSignal string `json:"StopSignal,omitempty"`
|
||||
|
||||
// ArgsEscaped `[Deprecated]` - This field is present only for legacy
|
||||
// compatibility with Docker and should not be used by new image builders.
|
||||
// It is used by Docker for Windows images to indicate that the `Entrypoint`
|
||||
// or `Cmd` or both, contains only a single element array, that is a
|
||||
// pre-escaped, and combined into a single string `CommandLine`. If `true`
|
||||
// the value in `Entrypoint` or `Cmd` should be used as-is to avoid double
|
||||
// escaping.
|
||||
// ArgsEscaped
|
||||
//
|
||||
// Deprecated: This field is present only for legacy compatibility with
|
||||
// Docker and should not be used by new image builders. It is used by Docker
|
||||
// for Windows images to indicate that the `Entrypoint` or `Cmd` or both,
|
||||
// contains only a single element array, that is a pre-escaped, and combined
|
||||
// into a single string `CommandLine`. If `true` the value in `Entrypoint` or
|
||||
// `Cmd` should be used as-is to avoid double escaping.
|
||||
// https://github.com/opencontainers/image-spec/pull/892
|
||||
ArgsEscaped bool `json:"ArgsEscaped,omitempty"`
|
||||
}
|
||||
|
||||
|
|
@ -95,22 +97,8 @@ type Image struct {
|
|||
// Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image.
|
||||
Author string `json:"author,omitempty"`
|
||||
|
||||
// Architecture is the CPU architecture which the binaries in this image are built to run on.
|
||||
Architecture string `json:"architecture"`
|
||||
|
||||
// Variant is the variant of the specified CPU architecture which image binaries are intended to run on.
|
||||
Variant string `json:"variant,omitempty"`
|
||||
|
||||
// OS is the name of the operating system which the image is built to run on.
|
||||
OS string `json:"os"`
|
||||
|
||||
// OSVersion is an optional field specifying the operating system
|
||||
// version, for example on Windows `10.0.14393.1066`.
|
||||
OSVersion string `json:"os.version,omitempty"`
|
||||
|
||||
// OSFeatures is an optional field specifying an array of strings,
|
||||
// each listing a required OS feature (for example on Windows `win32k`).
|
||||
OSFeatures []string `json:"os.features,omitempty"`
|
||||
// Platform describes the platform which the image in the manifest runs on.
|
||||
Platform
|
||||
|
||||
// Config defines the execution parameters which should be used as a base when running a container using the image.
|
||||
Config ImageConfig `json:"config,omitempty"`
|
||||
|
|
|
|||
|
|
@ -23,6 +23,9 @@ type Manifest struct {
|
|||
// MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json`
|
||||
MediaType string `json:"mediaType,omitempty"`
|
||||
|
||||
// ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact.
|
||||
ArtifactType string `json:"artifactType,omitempty"`
|
||||
|
||||
// Config references a configuration object for a container, by digest.
|
||||
// The referenced configuration object is a JSON blob that the runtime uses to set up the container.
|
||||
Config Descriptor `json:"config"`
|
||||
|
|
@ -36,3 +39,11 @@ type Manifest struct {
|
|||
// Annotations contains arbitrary metadata for the image manifest.
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
// ScratchDescriptor is the descriptor of a blob with content of `{}`.
|
||||
var ScratchDescriptor = Descriptor{
|
||||
MediaType: MediaTypeScratch,
|
||||
Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`,
|
||||
Size: 2,
|
||||
Data: []byte(`{}`),
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,21 +40,36 @@ const (
|
|||
|
||||
// MediaTypeImageLayerNonDistributable is the media type for layers referenced by
|
||||
// the manifest but with distribution restrictions.
|
||||
//
|
||||
// Deprecated: Non-distributable layers are deprecated, and not recommended
|
||||
// for future use. Implementations SHOULD NOT produce new non-distributable
|
||||
// layers.
|
||||
// https://github.com/opencontainers/image-spec/pull/965
|
||||
MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar"
|
||||
|
||||
// MediaTypeImageLayerNonDistributableGzip is the media type for
|
||||
// gzipped layers referenced by the manifest but with distribution
|
||||
// restrictions.
|
||||
//
|
||||
// Deprecated: Non-distributable layers are deprecated, and not recommended
|
||||
// for future use. Implementations SHOULD NOT produce new non-distributable
|
||||
// layers.
|
||||
// https://github.com/opencontainers/image-spec/pull/965
|
||||
MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip"
|
||||
|
||||
// MediaTypeImageLayerNonDistributableZstd is the media type for zstd
|
||||
// compressed layers referenced by the manifest but with distribution
|
||||
// restrictions.
|
||||
//
|
||||
// Deprecated: Non-distributable layers are deprecated, and not recommended
|
||||
// for future use. Implementations SHOULD NOT produce new non-distributable
|
||||
// layers.
|
||||
// https://github.com/opencontainers/image-spec/pull/965
|
||||
MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd"
|
||||
|
||||
// MediaTypeImageConfig specifies the media type for the image configuration.
|
||||
MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json"
|
||||
|
||||
// MediaTypeArtifactManifest specifies the media type for a content descriptor.
|
||||
MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json"
|
||||
// MediaTypeScratch specifies the media type for an unused blob containing the value `{}`
|
||||
MediaTypeScratch = "application/vnd.oci.scratch.v1+json"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ const (
|
|||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = "-dev"
|
||||
VersionDev = "-rc.3"
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2013-2022 The Cobra Authors
|
||||
# Copyright 2013-2023 The Cobra Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
|
|
|||
|
|
@ -5,10 +5,6 @@ ifeq (, $(shell which golangci-lint))
|
|||
$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh")
|
||||
endif
|
||||
|
||||
ifeq (, $(shell which richgo))
|
||||
$(warning "could not find richgo in $(PATH), run: go install github.com/kyoh86/richgo@latest")
|
||||
endif
|
||||
|
||||
.PHONY: fmt lint test install_deps clean
|
||||
|
||||
default: all
|
||||
|
|
@ -25,6 +21,10 @@ lint:
|
|||
|
||||
test: install_deps
|
||||
$(info ******************** running tests ********************)
|
||||
go test -v ./...
|
||||
|
||||
richtest: install_deps
|
||||
$(info ******************** running tests with kyoh86/richgo ********************)
|
||||
richgo test -v ./...
|
||||
|
||||
install_deps:
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||

|
||||

|
||||
|
||||
Cobra is a library for creating powerful modern CLI applications.
|
||||
|
||||
|
|
@ -6,7 +6,7 @@ Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/),
|
|||
[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to
|
||||
name a few. [This list](./projects_using_cobra.md) contains a more extensive list of projects using Cobra.
|
||||
|
||||
[](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
|
||||
[](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
|
||||
[](https://pkg.go.dev/github.com/spf13/cobra)
|
||||
[](https://goreportcard.com/report/github.com/spf13/cobra)
|
||||
[](https://gophers.slack.com/archives/CD3LP1199)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
type PositionalArgs func(cmd *Command, args []string) error
|
||||
|
||||
// Legacy arg validation has the following behaviour:
|
||||
// legacyArgs validation has the following behaviour:
|
||||
// - root commands with no subcommands can take arbitrary arguments
|
||||
// - root commands with subcommands will do subcommand validity checking
|
||||
// - subcommands will always accept arbitrary arguments
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
@ -532,7 +532,7 @@ func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) {
|
|||
}
|
||||
}
|
||||
|
||||
// Setup annotations for go completions for registered flags
|
||||
// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags
|
||||
func prepareCustomAnnotationsForFlags(cmd *Command) {
|
||||
flagCompletionMutex.RLock()
|
||||
defer flagCompletionMutex.RUnlock()
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
@ -38,7 +38,7 @@ func genBashComp(buf io.StringWriter, name string, includeDesc bool) {
|
|||
|
||||
__%[1]s_debug()
|
||||
{
|
||||
if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then
|
||||
if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then
|
||||
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
|
||||
fi
|
||||
}
|
||||
|
|
@ -65,7 +65,7 @@ __%[1]s_get_completion_results() {
|
|||
lastChar=${lastParam:$((${#lastParam}-1)):1}
|
||||
__%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}"
|
||||
|
||||
if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
|
||||
if [[ -z ${cur} && ${lastChar} != = ]]; then
|
||||
# If the last parameter is complete (there is a space following it)
|
||||
# We add an extra empty parameter so we can indicate this to the go method.
|
||||
__%[1]s_debug "Adding extra empty parameter"
|
||||
|
|
@ -75,7 +75,7 @@ __%[1]s_get_completion_results() {
|
|||
# When completing a flag with an = (e.g., %[1]s -n=<TAB>)
|
||||
# bash focuses on the part after the =, so we need to remove
|
||||
# the flag part from $cur
|
||||
if [[ "${cur}" == -*=* ]]; then
|
||||
if [[ ${cur} == -*=* ]]; then
|
||||
cur="${cur#*=}"
|
||||
fi
|
||||
|
||||
|
|
@ -87,7 +87,7 @@ __%[1]s_get_completion_results() {
|
|||
directive=${out##*:}
|
||||
# Remove the directive
|
||||
out=${out%%:*}
|
||||
if [ "${directive}" = "${out}" ]; then
|
||||
if [[ ${directive} == "${out}" ]]; then
|
||||
# There is not directive specified
|
||||
directive=0
|
||||
fi
|
||||
|
|
@ -101,22 +101,36 @@ __%[1]s_process_completion_results() {
|
|||
local shellCompDirectiveNoFileComp=%[5]d
|
||||
local shellCompDirectiveFilterFileExt=%[6]d
|
||||
local shellCompDirectiveFilterDirs=%[7]d
|
||||
local shellCompDirectiveKeepOrder=%[8]d
|
||||
|
||||
if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
|
||||
if (((directive & shellCompDirectiveError) != 0)); then
|
||||
# Error code. No completion.
|
||||
__%[1]s_debug "Received error from custom completion go code"
|
||||
return
|
||||
else
|
||||
if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
|
||||
if [[ $(type -t compopt) = "builtin" ]]; then
|
||||
if (((directive & shellCompDirectiveNoSpace) != 0)); then
|
||||
if [[ $(type -t compopt) == builtin ]]; then
|
||||
__%[1]s_debug "Activating no space"
|
||||
compopt -o nospace
|
||||
else
|
||||
__%[1]s_debug "No space directive not supported in this version of bash"
|
||||
fi
|
||||
fi
|
||||
if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
|
||||
if [[ $(type -t compopt) = "builtin" ]]; then
|
||||
if (((directive & shellCompDirectiveKeepOrder) != 0)); then
|
||||
if [[ $(type -t compopt) == builtin ]]; then
|
||||
# no sort isn't supported for bash less than < 4.4
|
||||
if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then
|
||||
__%[1]s_debug "No sort directive not supported in this version of bash"
|
||||
else
|
||||
__%[1]s_debug "Activating keep order"
|
||||
compopt -o nosort
|
||||
fi
|
||||
else
|
||||
__%[1]s_debug "No sort directive not supported in this version of bash"
|
||||
fi
|
||||
fi
|
||||
if (((directive & shellCompDirectiveNoFileComp) != 0)); then
|
||||
if [[ $(type -t compopt) == builtin ]]; then
|
||||
__%[1]s_debug "Activating no file completion"
|
||||
compopt +o default
|
||||
else
|
||||
|
|
@ -130,7 +144,7 @@ __%[1]s_process_completion_results() {
|
|||
local activeHelp=()
|
||||
__%[1]s_extract_activeHelp
|
||||
|
||||
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
|
||||
if (((directive & shellCompDirectiveFilterFileExt) != 0)); then
|
||||
# File extension filtering
|
||||
local fullFilter filter filteringCmd
|
||||
|
||||
|
|
@ -143,13 +157,12 @@ __%[1]s_process_completion_results() {
|
|||
filteringCmd="_filedir $fullFilter"
|
||||
__%[1]s_debug "File filtering command: $filteringCmd"
|
||||
$filteringCmd
|
||||
elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
|
||||
elif (((directive & shellCompDirectiveFilterDirs) != 0)); then
|
||||
# File completion for directories only
|
||||
|
||||
# Use printf to strip any trailing newline
|
||||
local subdir
|
||||
subdir=$(printf "%%s" "${completions[0]}")
|
||||
if [ -n "$subdir" ]; then
|
||||
subdir=${completions[0]}
|
||||
if [[ -n $subdir ]]; then
|
||||
__%[1]s_debug "Listing directories in $subdir"
|
||||
pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
|
||||
else
|
||||
|
|
@ -164,7 +177,7 @@ __%[1]s_process_completion_results() {
|
|||
__%[1]s_handle_special_char "$cur" =
|
||||
|
||||
# Print the activeHelp statements before we finish
|
||||
if [ ${#activeHelp[*]} -ne 0 ]; then
|
||||
if ((${#activeHelp[*]} != 0)); then
|
||||
printf "\n";
|
||||
printf "%%s\n" "${activeHelp[@]}"
|
||||
printf "\n"
|
||||
|
|
@ -184,21 +197,21 @@ __%[1]s_process_completion_results() {
|
|||
# Separate activeHelp lines from real completions.
|
||||
# Fills the $activeHelp and $completions arrays.
|
||||
__%[1]s_extract_activeHelp() {
|
||||
local activeHelpMarker="%[8]s"
|
||||
local activeHelpMarker="%[9]s"
|
||||
local endIndex=${#activeHelpMarker}
|
||||
|
||||
while IFS='' read -r comp; do
|
||||
if [ "${comp:0:endIndex}" = "$activeHelpMarker" ]; then
|
||||
if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then
|
||||
comp=${comp:endIndex}
|
||||
__%[1]s_debug "ActiveHelp found: $comp"
|
||||
if [ -n "$comp" ]; then
|
||||
if [[ -n $comp ]]; then
|
||||
activeHelp+=("$comp")
|
||||
fi
|
||||
else
|
||||
# Not an activeHelp line but a normal completion
|
||||
completions+=("$comp")
|
||||
fi
|
||||
done < <(printf "%%s\n" "${out}")
|
||||
done <<<"${out}"
|
||||
}
|
||||
|
||||
__%[1]s_handle_completion_types() {
|
||||
|
|
@ -254,7 +267,7 @@ __%[1]s_handle_standard_completion_case() {
|
|||
done < <(printf "%%s\n" "${completions[@]}")
|
||||
|
||||
# If there is a single completion left, remove the description text
|
||||
if [ ${#COMPREPLY[*]} -eq 1 ]; then
|
||||
if ((${#COMPREPLY[*]} == 1)); then
|
||||
__%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
|
||||
comp="${COMPREPLY[0]%%%%$tab*}"
|
||||
__%[1]s_debug "Removed description from single completion, which is now: ${comp}"
|
||||
|
|
@ -271,8 +284,8 @@ __%[1]s_handle_special_char()
|
|||
if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then
|
||||
local word=${comp%%"${comp##*${char}}"}
|
||||
local idx=${#COMPREPLY[*]}
|
||||
while [[ $((--idx)) -ge 0 ]]; do
|
||||
COMPREPLY[$idx]=${COMPREPLY[$idx]#"$word"}
|
||||
while ((--idx >= 0)); do
|
||||
COMPREPLY[idx]=${COMPREPLY[idx]#"$word"}
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
|
@ -298,7 +311,7 @@ __%[1]s_format_comp_descriptions()
|
|||
|
||||
# Make sure we can fit a description of at least 8 characters
|
||||
# if we are to align the descriptions.
|
||||
if [[ $maxdesclength -gt 8 ]]; then
|
||||
if ((maxdesclength > 8)); then
|
||||
# Add the proper number of spaces to align the descriptions
|
||||
for ((i = ${#comp} ; i < longest ; i++)); do
|
||||
comp+=" "
|
||||
|
|
@ -310,8 +323,8 @@ __%[1]s_format_comp_descriptions()
|
|||
|
||||
# If there is enough space for any description text,
|
||||
# truncate the descriptions that are too long for the shell width
|
||||
if [ $maxdesclength -gt 0 ]; then
|
||||
if [ ${#desc} -gt $maxdesclength ]; then
|
||||
if ((maxdesclength > 0)); then
|
||||
if ((${#desc} > maxdesclength)); then
|
||||
desc=${desc:0:$(( maxdesclength - 1 ))}
|
||||
desc+="…"
|
||||
fi
|
||||
|
|
@ -332,9 +345,9 @@ __start_%[1]s()
|
|||
# Call _init_completion from the bash-completion package
|
||||
# to prepare the arguments properly
|
||||
if declare -F _init_completion >/dev/null 2>&1; then
|
||||
_init_completion -n "=:" || return
|
||||
_init_completion -n =: || return
|
||||
else
|
||||
__%[1]s_init_completion -n "=:" || return
|
||||
__%[1]s_init_completion -n =: || return
|
||||
fi
|
||||
|
||||
__%[1]s_debug
|
||||
|
|
@ -361,7 +374,7 @@ fi
|
|||
# ex: ts=4 sw=4 et filetype=sh
|
||||
`, name, compCmd,
|
||||
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
|
||||
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
|
||||
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder,
|
||||
activeHelpMarker))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
@ -167,8 +167,8 @@ func appendIfNotPresent(s, stringToAppend string) string {
|
|||
|
||||
// rpad adds padding to the right of a string.
|
||||
func rpad(s string, padding int) string {
|
||||
template := fmt.Sprintf("%%-%ds", padding)
|
||||
return fmt.Sprintf(template, s)
|
||||
formattedString := fmt.Sprintf("%%-%ds", padding)
|
||||
return fmt.Sprintf(formattedString, s)
|
||||
}
|
||||
|
||||
// tmpl executes the given template text on data, writing the result to w.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
@ -35,7 +35,7 @@ const FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra"
|
|||
// FParseErrWhitelist configures Flag parse errors to be ignored
|
||||
type FParseErrWhitelist flag.ParseErrorsWhitelist
|
||||
|
||||
// Structure to manage groups for commands
|
||||
// Group Structure to manage groups for commands
|
||||
type Group struct {
|
||||
ID string
|
||||
Title string
|
||||
|
|
@ -47,7 +47,7 @@ type Group struct {
|
|||
// definition to ensure usability.
|
||||
type Command struct {
|
||||
// Use is the one-line usage message.
|
||||
// Recommended syntax is as follow:
|
||||
// Recommended syntax is as follows:
|
||||
// [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required.
|
||||
// ... indicates that you can specify multiple values for the previous argument.
|
||||
// | indicates mutually exclusive information. You can use the argument to the left of the separator or the
|
||||
|
|
@ -321,7 +321,7 @@ func (c *Command) SetHelpCommand(cmd *Command) {
|
|||
c.helpCommand = cmd
|
||||
}
|
||||
|
||||
// SetHelpCommandGroup sets the group id of the help command.
|
||||
// SetHelpCommandGroupID sets the group id of the help command.
|
||||
func (c *Command) SetHelpCommandGroupID(groupID string) {
|
||||
if c.helpCommand != nil {
|
||||
c.helpCommand.GroupID = groupID
|
||||
|
|
@ -330,7 +330,7 @@ func (c *Command) SetHelpCommandGroupID(groupID string) {
|
|||
c.helpCommandGroupID = groupID
|
||||
}
|
||||
|
||||
// SetCompletionCommandGroup sets the group id of the completion command.
|
||||
// SetCompletionCommandGroupID sets the group id of the completion command.
|
||||
func (c *Command) SetCompletionCommandGroupID(groupID string) {
|
||||
// completionCommandGroupID is used if no completion command is defined by the user
|
||||
c.Root().completionCommandGroupID = groupID
|
||||
|
|
@ -655,20 +655,44 @@ Loop:
|
|||
|
||||
// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like
|
||||
// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
|
||||
func argsMinusFirstX(args []string, x string) []string {
|
||||
for i, y := range args {
|
||||
if x == y {
|
||||
ret := []string{}
|
||||
ret = append(ret, args[:i]...)
|
||||
ret = append(ret, args[i+1:]...)
|
||||
return ret
|
||||
// Special care needs to be taken not to remove a flag value.
|
||||
func (c *Command) argsMinusFirstX(args []string, x string) []string {
|
||||
if len(args) == 0 {
|
||||
return args
|
||||
}
|
||||
c.mergePersistentFlags()
|
||||
flags := c.Flags()
|
||||
|
||||
Loop:
|
||||
for pos := 0; pos < len(args); pos++ {
|
||||
s := args[pos]
|
||||
switch {
|
||||
case s == "--":
|
||||
// -- means we have reached the end of the parseable args. Break out of the loop now.
|
||||
break Loop
|
||||
case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
|
||||
fallthrough
|
||||
case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
|
||||
// This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip
|
||||
// over the next arg, because that is the value of this flag.
|
||||
pos++
|
||||
continue
|
||||
case !strings.HasPrefix(s, "-"):
|
||||
// This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so,
|
||||
// return the args, excluding the one at this position.
|
||||
if s == x {
|
||||
ret := []string{}
|
||||
ret = append(ret, args[:pos]...)
|
||||
ret = append(ret, args[pos+1:]...)
|
||||
return ret
|
||||
}
|
||||
}
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func isFlagArg(arg string) bool {
|
||||
return ((len(arg) >= 3 && arg[1] == '-') ||
|
||||
return ((len(arg) >= 3 && arg[0:2] == "--") ||
|
||||
(len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
|
||||
}
|
||||
|
||||
|
|
@ -686,7 +710,7 @@ func (c *Command) Find(args []string) (*Command, []string, error) {
|
|||
|
||||
cmd := c.findNext(nextSubCmd)
|
||||
if cmd != nil {
|
||||
return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd))
|
||||
return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd))
|
||||
}
|
||||
return c, innerArgs
|
||||
}
|
||||
|
|
@ -1272,7 +1296,7 @@ func (c *Command) AllChildCommandsHaveGroup() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// ContainGroups return if groupID exists in the list of command groups.
|
||||
// ContainsGroup return if groupID exists in the list of command groups.
|
||||
func (c *Command) ContainsGroup(groupID string) bool {
|
||||
for _, x := range c.commandgroups {
|
||||
if x.ID == groupID {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
@ -77,6 +77,10 @@ const (
|
|||
// obtain the same behavior but only for flags.
|
||||
ShellCompDirectiveFilterDirs
|
||||
|
||||
// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order
|
||||
// in which the completions are provided
|
||||
ShellCompDirectiveKeepOrder
|
||||
|
||||
// ===========================================================================
|
||||
|
||||
// All directives using iota should be above this one.
|
||||
|
|
@ -159,6 +163,9 @@ func (d ShellCompDirective) string() string {
|
|||
if d&ShellCompDirectiveFilterDirs != 0 {
|
||||
directives = append(directives, "ShellCompDirectiveFilterDirs")
|
||||
}
|
||||
if d&ShellCompDirectiveKeepOrder != 0 {
|
||||
directives = append(directives, "ShellCompDirectiveKeepOrder")
|
||||
}
|
||||
if len(directives) == 0 {
|
||||
directives = append(directives, "ShellCompDirectiveDefault")
|
||||
}
|
||||
|
|
@ -169,7 +176,7 @@ func (d ShellCompDirective) string() string {
|
|||
return strings.Join(directives, ", ")
|
||||
}
|
||||
|
||||
// Adds a special hidden command that can be used to request custom completions.
|
||||
// initCompleteCmd adds a special hidden command that can be used to request custom completions.
|
||||
func (c *Command) initCompleteCmd(args []string) {
|
||||
completeCmd := &Command{
|
||||
Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
|
||||
|
|
@ -727,7 +734,7 @@ to enable it. You can execute the following once:
|
|||
|
||||
To load completions in your current shell session:
|
||||
|
||||
source <(%[1]s completion zsh); compdef _%[1]s %[1]s
|
||||
source <(%[1]s completion zsh)
|
||||
|
||||
To load completions for every new session, execute once:
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
@ -53,7 +53,7 @@ function __%[1]s_perform_completion
|
|||
__%[1]s_debug "last arg: $lastArg"
|
||||
|
||||
# Disable ActiveHelp which is not supported for fish shell
|
||||
set -l requestComp "%[9]s=0 $args[1] %[3]s $args[2..-1] $lastArg"
|
||||
set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg"
|
||||
|
||||
__%[1]s_debug "Calling $requestComp"
|
||||
set -l results (eval $requestComp 2> /dev/null)
|
||||
|
|
@ -89,6 +89,60 @@ function __%[1]s_perform_completion
|
|||
printf "%%s\n" "$directiveLine"
|
||||
end
|
||||
|
||||
# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result
|
||||
function __%[1]s_perform_completion_once
|
||||
__%[1]s_debug "Starting __%[1]s_perform_completion_once"
|
||||
|
||||
if test -n "$__%[1]s_perform_completion_once_result"
|
||||
__%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion"
|
||||
return 0
|
||||
end
|
||||
|
||||
set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion)
|
||||
if test -z "$__%[1]s_perform_completion_once_result"
|
||||
__%[1]s_debug "No completions, probably due to a failure"
|
||||
return 1
|
||||
end
|
||||
|
||||
__%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result"
|
||||
return 0
|
||||
end
|
||||
|
||||
# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run
|
||||
function __%[1]s_clear_perform_completion_once_result
|
||||
__%[1]s_debug ""
|
||||
__%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable =========="
|
||||
set --erase __%[1]s_perform_completion_once_result
|
||||
__%[1]s_debug "Succesfully erased the variable __%[1]s_perform_completion_once_result"
|
||||
end
|
||||
|
||||
function __%[1]s_requires_order_preservation
|
||||
__%[1]s_debug ""
|
||||
__%[1]s_debug "========= checking if order preservation is required =========="
|
||||
|
||||
__%[1]s_perform_completion_once
|
||||
if test -z "$__%[1]s_perform_completion_once_result"
|
||||
__%[1]s_debug "Error determining if order preservation is required"
|
||||
return 1
|
||||
end
|
||||
|
||||
set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1])
|
||||
__%[1]s_debug "Directive is: $directive"
|
||||
|
||||
set -l shellCompDirectiveKeepOrder %[9]d
|
||||
set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2)
|
||||
__%[1]s_debug "Keeporder is: $keeporder"
|
||||
|
||||
if test $keeporder -ne 0
|
||||
__%[1]s_debug "This does require order preservation"
|
||||
return 0
|
||||
end
|
||||
|
||||
__%[1]s_debug "This doesn't require order preservation"
|
||||
return 1
|
||||
end
|
||||
|
||||
|
||||
# This function does two things:
|
||||
# - Obtain the completions and store them in the global __%[1]s_comp_results
|
||||
# - Return false if file completion should be performed
|
||||
|
|
@ -99,17 +153,17 @@ function __%[1]s_prepare_completions
|
|||
# Start fresh
|
||||
set --erase __%[1]s_comp_results
|
||||
|
||||
set -l results (__%[1]s_perform_completion)
|
||||
__%[1]s_debug "Completion results: $results"
|
||||
__%[1]s_perform_completion_once
|
||||
__%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result"
|
||||
|
||||
if test -z "$results"
|
||||
if test -z "$__%[1]s_perform_completion_once_result"
|
||||
__%[1]s_debug "No completion, probably due to a failure"
|
||||
# Might as well do file completion, in case it helps
|
||||
return 1
|
||||
end
|
||||
|
||||
set -l directive (string sub --start 2 $results[-1])
|
||||
set --global __%[1]s_comp_results $results[1..-2]
|
||||
set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1])
|
||||
set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2]
|
||||
|
||||
__%[1]s_debug "Completions are: $__%[1]s_comp_results"
|
||||
__%[1]s_debug "Directive is: $directive"
|
||||
|
|
@ -205,13 +259,17 @@ end
|
|||
# Remove any pre-existing completions for the program since we will be handling all of them.
|
||||
complete -c %[2]s -e
|
||||
|
||||
# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global
|
||||
complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result'
|
||||
# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results
|
||||
# which provides the program's completion choices.
|
||||
complete -c %[2]s -n '__%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
|
||||
|
||||
# If this doesn't require order preservation, we don't use the -k flag
|
||||
complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
|
||||
# otherwise we use the -k flag
|
||||
complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
|
||||
`, nameForVar, name, compCmd,
|
||||
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
|
||||
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
|
||||
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
|
||||
}
|
||||
|
||||
// GenFishCompletion generates fish completion file and writes to the passed writer.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
@ -77,6 +77,7 @@ filter __%[1]s_escapeStringWithSpecialChars {
|
|||
$ShellCompDirectiveNoFileComp=%[6]d
|
||||
$ShellCompDirectiveFilterFileExt=%[7]d
|
||||
$ShellCompDirectiveFilterDirs=%[8]d
|
||||
$ShellCompDirectiveKeepOrder=%[9]d
|
||||
|
||||
# Prepare the command to request completions for the program.
|
||||
# Split the command at the first space to separate the program and arguments.
|
||||
|
|
@ -106,13 +107,22 @@ filter __%[1]s_escapeStringWithSpecialChars {
|
|||
# If the last parameter is complete (there is a space following it)
|
||||
# We add an extra empty parameter so we can indicate this to the go method.
|
||||
__%[1]s_debug "Adding extra empty parameter"
|
||||
`+" # We need to use `\"`\" to pass an empty argument a \"\" or '' does not work!!!"+`
|
||||
`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+`
|
||||
# PowerShell 7.2+ changed the way how the arguments are passed to executables,
|
||||
# so for pre-7.2 or when Legacy argument passing is enabled we need to use
|
||||
`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+`
|
||||
if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or
|
||||
($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or
|
||||
(($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and
|
||||
$PSNativeCommandArgumentPassing -eq 'Legacy')) {
|
||||
`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+`
|
||||
} else {
|
||||
$RequestComp="$RequestComp" + ' ""'
|
||||
}
|
||||
}
|
||||
|
||||
__%[1]s_debug "Calling $RequestComp"
|
||||
# First disable ActiveHelp which is not supported for Powershell
|
||||
$env:%[9]s=0
|
||||
$env:%[10]s=0
|
||||
|
||||
#call the command store the output in $out and redirect stderr and stdout to null
|
||||
# $Out is an array contains each line per element
|
||||
|
|
@ -137,7 +147,7 @@ filter __%[1]s_escapeStringWithSpecialChars {
|
|||
}
|
||||
|
||||
$Longest = 0
|
||||
$Values = $Out | ForEach-Object {
|
||||
[Array]$Values = $Out | ForEach-Object {
|
||||
#Split the output in name and description
|
||||
`+" $Name, $Description = $_.Split(\"`t\",2)"+`
|
||||
__%[1]s_debug "Name: $Name Description: $Description"
|
||||
|
|
@ -182,6 +192,11 @@ filter __%[1]s_escapeStringWithSpecialChars {
|
|||
}
|
||||
}
|
||||
|
||||
# we sort the values in ascending order by name if keep order isn't passed
|
||||
if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) {
|
||||
$Values = $Values | Sort-Object -Property Name
|
||||
}
|
||||
|
||||
if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) {
|
||||
__%[1]s_debug "ShellCompDirectiveNoFileComp is called"
|
||||
|
||||
|
|
@ -267,7 +282,7 @@ filter __%[1]s_escapeStringWithSpecialChars {
|
|||
Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock $__%[2]sCompleterBlock
|
||||
`, name, nameForVar, compCmd,
|
||||
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
|
||||
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
|
||||
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
|
||||
}
|
||||
|
||||
func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error {
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
## Projects using Cobra
|
||||
|
||||
- [Allero](https://github.com/allero-io/allero)
|
||||
- [Arewefastyet](https://benchmark.vitess.io)
|
||||
- [Arduino CLI](https://github.com/arduino/arduino-cli)
|
||||
- [Bleve](https://blevesearch.com/)
|
||||
- [Cilium](https://cilium.io/)
|
||||
- [CloudQuery](https://github.com/cloudquery/cloudquery)
|
||||
- [CockroachDB](https://www.cockroachlabs.com/)
|
||||
- [Constellation](https://github.com/edgelesssys/constellation)
|
||||
- [Cosmos SDK](https://github.com/cosmos/cosmos-sdk)
|
||||
- [Datree](https://github.com/datreeio/datree)
|
||||
- [Delve](https://github.com/derekparker/delve)
|
||||
|
|
@ -25,7 +27,7 @@
|
|||
- [Istio](https://istio.io)
|
||||
- [Kool](https://github.com/kool-dev/kool)
|
||||
- [Kubernetes](https://kubernetes.io/)
|
||||
- [Kubescape](https://github.com/armosec/kubescape)
|
||||
- [Kubescape](https://github.com/kubescape/kubescape)
|
||||
- [KubeVirt](https://github.com/kubevirt/kubevirt)
|
||||
- [Linkerd](https://linkerd.io/)
|
||||
- [Mattermost-server](https://github.com/mattermost/mattermost-server)
|
||||
|
|
@ -51,10 +53,12 @@
|
|||
- [Random](https://github.com/erdaltsksn/random)
|
||||
- [Rclone](https://rclone.org/)
|
||||
- [Scaleway CLI](https://github.com/scaleway/scaleway-cli)
|
||||
- [Sia](https://github.com/SiaFoundation/siad)
|
||||
- [Skaffold](https://skaffold.dev/)
|
||||
- [Tendermint](https://github.com/tendermint/tendermint)
|
||||
- [Twitch CLI](https://github.com/twitchdev/twitch-cli)
|
||||
- [UpCloud CLI (`upctl`)](https://github.com/UpCloudLtd/upcloud-cli)
|
||||
- [Vitess](https://vitess.io)
|
||||
- VMware's [Tanzu Community Edition](https://github.com/vmware-tanzu/community-edition) & [Tanzu Framework](https://github.com/vmware-tanzu/tanzu-framework)
|
||||
- [Werf](https://werf.io/)
|
||||
- [ZITADEL](https://github.com/zitadel/zitadel)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ PowerShell:
|
|||
`,cmd.Root().Name()),
|
||||
DisableFlagsInUseLine: true,
|
||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||
Args: cobra.ExactValidArgs(1),
|
||||
Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
switch args[0] {
|
||||
case "bash":
|
||||
|
|
@ -162,16 +162,7 @@ cmd := &cobra.Command{
|
|||
}
|
||||
```
|
||||
|
||||
The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
|
||||
the completion algorithm if entered manually, e.g. in:
|
||||
|
||||
```bash
|
||||
$ kubectl get rc [tab][tab]
|
||||
backend frontend database
|
||||
```
|
||||
|
||||
Note that without declaring `rc` as an alias, the completion algorithm would not know to show the list of
|
||||
replication controllers following `rc`.
|
||||
The aliases are shown to the user on tab completion only if no completions were found within sub-commands or `ValidArgs`.
|
||||
|
||||
### Dynamic completion of nouns
|
||||
|
||||
|
|
@ -237,6 +228,10 @@ ShellCompDirectiveFilterFileExt
|
|||
// return []string{"themes"}, ShellCompDirectiveFilterDirs
|
||||
//
|
||||
ShellCompDirectiveFilterDirs
|
||||
|
||||
// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order
|
||||
// in which the completions are provided
|
||||
ShellCompDirectiveKeepOrder
|
||||
```
|
||||
|
||||
***Note***: When using the `ValidArgsFunction`, Cobra will call your registered function after having parsed all flags and arguments provided in the command-line. You therefore don't need to do this parsing yourself. For example, when a user calls `helm status --namespace my-rook-ns [tab][tab]`, Cobra will call your registered `ValidArgsFunction` after having parsed the `--namespace` flag, as it would have done when calling the `RunE` function.
|
||||
|
|
@ -385,6 +380,19 @@ or
|
|||
```go
|
||||
ValidArgs: []string{"bash\tCompletions for bash", "zsh\tCompletions for zsh"}
|
||||
```
|
||||
|
||||
If you don't want to show descriptions in the completions, you can add `--no-descriptions` to the default `completion` command to disable them, like:
|
||||
|
||||
```bash
|
||||
$ source <(helm completion bash)
|
||||
$ helm completion [tab][tab]
|
||||
bash (generate autocompletion script for bash) powershell (generate autocompletion script for powershell)
|
||||
fish (generate autocompletion script for fish) zsh (generate autocompletion script for zsh)
|
||||
|
||||
$ source <(helm completion bash --no-descriptions)
|
||||
$ helm completion [tab][tab]
|
||||
bash fish powershell zsh
|
||||
```
|
||||
## Bash completions
|
||||
|
||||
### Dependencies
|
||||
|
|
|
|||
|
|
@ -188,6 +188,37 @@ var versionCmd = &cobra.Command{
|
|||
}
|
||||
```
|
||||
|
||||
### Organizing subcommands
|
||||
|
||||
A command may have subcommands which in turn may have other subcommands. This is achieved by using
|
||||
`AddCommand`. In some cases, especially in larger applications, each subcommand may be defined in
|
||||
its own go package.
|
||||
|
||||
The suggested approach is for the parent command to use `AddCommand` to add its most immediate
|
||||
subcommands. For example, consider the following directory structure:
|
||||
|
||||
```text
|
||||
├── cmd
|
||||
│ ├── root.go
|
||||
│ └── sub1
|
||||
│ ├── sub1.go
|
||||
│ └── sub2
|
||||
│ ├── leafA.go
|
||||
│ ├── leafB.go
|
||||
│ └── sub2.go
|
||||
└── main.go
|
||||
```
|
||||
|
||||
In this case:
|
||||
|
||||
* The `init` function of `root.go` adds the command defined in `sub1.go` to the root command.
|
||||
* The `init` function of `sub1.go` adds the command defined in `sub2.go` to the sub1 command.
|
||||
* The `init` function of `sub2.go` adds the commands defined in `leafA.go` and `leafB.go` to the
|
||||
sub2 command.
|
||||
|
||||
This approach ensures the subcommands are always included at compile time while avoiding cyclic
|
||||
references.
|
||||
|
||||
### Returning and handling errors
|
||||
|
||||
If you wish to return an error to the caller of a command, `RunE` can be used.
|
||||
|
|
@ -313,8 +344,8 @@ rootCmd.MarkFlagsRequiredTogether("username", "password")
|
|||
You can also prevent different flags from being provided together if they represent mutually
|
||||
exclusive options such as specifying an output format as either `--json` or `--yaml` but never both:
|
||||
```go
|
||||
rootCmd.Flags().BoolVar(&u, "json", false, "Output in JSON")
|
||||
rootCmd.Flags().BoolVar(&pw, "yaml", false, "Output in YAML")
|
||||
rootCmd.Flags().BoolVar(&ofJson, "json", false, "Output in JSON")
|
||||
rootCmd.Flags().BoolVar(&ofYaml, "yaml", false, "Output in YAML")
|
||||
rootCmd.MarkFlagsMutuallyExclusive("json", "yaml")
|
||||
```
|
||||
|
||||
|
|
@ -349,7 +380,7 @@ shown below:
|
|||
```go
|
||||
var cmd = &cobra.Command{
|
||||
Short: "hello",
|
||||
Args: MatchAll(ExactArgs(2), OnlyValidArgs),
|
||||
Args: cobra.MatchAll(cobra.ExactArgs(2), cobra.OnlyValidArgs),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Println("Hello, World!")
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013-2022 The Cobra Authors
|
||||
// Copyright 2013-2023 The Cobra Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
@ -90,6 +90,7 @@ func genZshComp(buf io.StringWriter, name string, includeDesc bool) {
|
|||
compCmd = ShellCompNoDescRequestCmd
|
||||
}
|
||||
WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s
|
||||
compdef _%[1]s %[1]s
|
||||
|
||||
# zsh completion for %-36[1]s -*- shell-script -*-
|
||||
|
||||
|
|
@ -108,8 +109,9 @@ _%[1]s()
|
|||
local shellCompDirectiveNoFileComp=%[5]d
|
||||
local shellCompDirectiveFilterFileExt=%[6]d
|
||||
local shellCompDirectiveFilterDirs=%[7]d
|
||||
local shellCompDirectiveKeepOrder=%[8]d
|
||||
|
||||
local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace
|
||||
local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder
|
||||
local -a completions
|
||||
|
||||
__%[1]s_debug "\n========= starting completion logic =========="
|
||||
|
|
@ -177,7 +179,7 @@ _%[1]s()
|
|||
return
|
||||
fi
|
||||
|
||||
local activeHelpMarker="%[8]s"
|
||||
local activeHelpMarker="%[9]s"
|
||||
local endIndex=${#activeHelpMarker}
|
||||
local startIndex=$((${#activeHelpMarker}+1))
|
||||
local hasActiveHelp=0
|
||||
|
|
@ -227,6 +229,11 @@ _%[1]s()
|
|||
noSpace="-S ''"
|
||||
fi
|
||||
|
||||
if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then
|
||||
__%[1]s_debug "Activating keep order."
|
||||
keepOrder="-V"
|
||||
fi
|
||||
|
||||
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
|
||||
# File extension filtering
|
||||
local filteringCmd
|
||||
|
|
@ -262,7 +269,7 @@ _%[1]s()
|
|||
return $result
|
||||
else
|
||||
__%[1]s_debug "Calling _describe"
|
||||
if eval _describe "completions" completions $flagPrefix $noSpace; then
|
||||
if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then
|
||||
__%[1]s_debug "_describe found some completions"
|
||||
|
||||
# Return the success of having called _describe
|
||||
|
|
@ -296,6 +303,6 @@ if [ "$funcstack[1]" = "_%[1]s" ]; then
|
|||
fi
|
||||
`, name, compCmd,
|
||||
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
|
||||
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs,
|
||||
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder,
|
||||
activeHelpMarker))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ type fileReader interface {
|
|||
// RawBytes accesses the raw bytes of the archive, apart from the file payload itself.
|
||||
// This includes the header and padding.
|
||||
//
|
||||
// This call resets the current rawbytes buffer
|
||||
// # This call resets the current rawbytes buffer
|
||||
//
|
||||
// Only when RawAccounting is enabled, otherwise this returns nil
|
||||
func (tr *Reader) RawBytes() []byte {
|
||||
|
|
@ -126,7 +126,9 @@ func (tr *Reader) next() (*Header, error) {
|
|||
return nil, err
|
||||
}
|
||||
if hdr.Typeflag == TypeXGlobalHeader {
|
||||
mergePAX(hdr, paxHdrs)
|
||||
if err = mergePAX(hdr, paxHdrs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Header{
|
||||
Name: hdr.Name,
|
||||
Typeflag: hdr.Typeflag,
|
||||
|
|
@ -381,9 +383,9 @@ func parsePAX(r io.Reader) (map[string]string, error) {
|
|||
// header in case further processing is required.
|
||||
//
|
||||
// The err will be set to io.EOF only when one of the following occurs:
|
||||
// * Exactly 0 bytes are read and EOF is hit.
|
||||
// * Exactly 1 block of zeros is read and EOF is hit.
|
||||
// * At least 2 blocks of zeros are read.
|
||||
// - Exactly 0 bytes are read and EOF is hit.
|
||||
// - Exactly 1 block of zeros is read and EOF is hit.
|
||||
// - At least 2 blocks of zeros are read.
|
||||
func (tr *Reader) readHeader() (*Header, *block, error) {
|
||||
// Two blocks of zero bytes marks the end of the archive.
|
||||
n, err := io.ReadFull(tr.r, tr.blk[:])
|
||||
|
|
|
|||
|
|
@ -71,6 +71,8 @@ func WriteOutputTarStream(fg storage.FileGetter, up storage.Unpacker, w io.Write
|
|||
crcSum = make([]byte, 8)
|
||||
multiWriter = io.MultiWriter(w, crcHash)
|
||||
copyBuffer = byteBufferPool.Get().([]byte)
|
||||
// TODO once we have some benchmark or memory profile then we can experiment with using *bytes.Buffer
|
||||
//nolint:staticcheck // SA6002 not going to do a pointer here
|
||||
defer byteBufferPool.Put(copyBuffer)
|
||||
} else {
|
||||
crcHash.Reset()
|
||||
|
|
|
|||
|
|
@ -44,12 +44,12 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) {
|
|||
return out.Bytes(), err
|
||||
}
|
||||
|
||||
// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow
|
||||
// in the same executable. This function cannot import data from
|
||||
// IImportShallow decodes "shallow" types.Package data encoded by
|
||||
// IExportShallow in the same executable. This function cannot import data from
|
||||
// cmd/compile or gcexportdata.Write.
|
||||
func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) {
|
||||
func IImportShallow(fset *token.FileSet, getPackage GetPackageFunc, data []byte, path string, insert InsertType) (*types.Package, error) {
|
||||
const bundle = false
|
||||
pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert)
|
||||
pkgs, err := iimportCommon(fset, getPackage, data, bundle, path, insert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ const (
|
|||
// If the export data version is not recognized or the format is otherwise
|
||||
// compromised, an error is returned.
|
||||
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
|
||||
pkgs, err := iimportCommon(fset, imports, data, false, path, nil)
|
||||
pkgs, err := iimportCommon(fset, GetPackageFromMap(imports), data, false, path, nil)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
|
@ -94,10 +94,33 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
|||
|
||||
// IImportBundle imports a set of packages from the serialized package bundle.
|
||||
func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
|
||||
return iimportCommon(fset, imports, data, true, "", nil)
|
||||
return iimportCommon(fset, GetPackageFromMap(imports), data, true, "", nil)
|
||||
}
|
||||
|
||||
func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) {
|
||||
// A GetPackageFunc is a function that gets the package with the given path
|
||||
// from the importer state, creating it (with the specified name) if necessary.
|
||||
// It is an abstraction of the map historically used to memoize package creation.
|
||||
//
|
||||
// Two calls with the same path must return the same package.
|
||||
//
|
||||
// If the given getPackage func returns nil, the import will fail.
|
||||
type GetPackageFunc = func(path, name string) *types.Package
|
||||
|
||||
// GetPackageFromMap returns a GetPackageFunc that retrieves packages from the
|
||||
// given map of package path -> package.
|
||||
//
|
||||
// The resulting func may mutate m: if a requested package is not found, a new
|
||||
// package will be inserted into m.
|
||||
func GetPackageFromMap(m map[string]*types.Package) GetPackageFunc {
|
||||
return func(path, name string) *types.Package {
|
||||
if _, ok := m[path]; !ok {
|
||||
m[path] = types.NewPackage(path, name)
|
||||
}
|
||||
return m[path]
|
||||
}
|
||||
}
|
||||
|
||||
func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) {
|
||||
const currentVersion = iexportVersionCurrent
|
||||
version := int64(-1)
|
||||
if !debug {
|
||||
|
|
@ -195,10 +218,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
|
|||
if pkgPath == "" {
|
||||
pkgPath = path
|
||||
}
|
||||
pkg := imports[pkgPath]
|
||||
pkg := getPackage(pkgPath, pkgName)
|
||||
if pkg == nil {
|
||||
pkg = types.NewPackage(pkgPath, pkgName)
|
||||
imports[pkgPath] = pkg
|
||||
errorf("internal error: getPackage returned nil package for %s", pkgPath)
|
||||
} else if pkg.Name() != pkgName {
|
||||
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,9 @@
|
|||
package tokeninternal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
"sort"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
|
@ -57,3 +59,93 @@ func GetLines(file *token.File) []int {
|
|||
panic("unexpected token.File size")
|
||||
}
|
||||
}
|
||||
|
||||
// AddExistingFiles adds the specified files to the FileSet if they
|
||||
// are not already present. It panics if any pair of files in the
|
||||
// resulting FileSet would overlap.
|
||||
func AddExistingFiles(fset *token.FileSet, files []*token.File) {
|
||||
// Punch through the FileSet encapsulation.
|
||||
type tokenFileSet struct {
|
||||
// This type remained essentially consistent from go1.16 to go1.21.
|
||||
mutex sync.RWMutex
|
||||
base int
|
||||
files []*token.File
|
||||
_ *token.File // changed to atomic.Pointer[token.File] in go1.19
|
||||
}
|
||||
|
||||
// If the size of token.FileSet changes, this will fail to compile.
|
||||
const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
|
||||
var _ [-delta * delta]int
|
||||
|
||||
type uP = unsafe.Pointer
|
||||
var ptr *tokenFileSet
|
||||
*(*uP)(uP(&ptr)) = uP(fset)
|
||||
ptr.mutex.Lock()
|
||||
defer ptr.mutex.Unlock()
|
||||
|
||||
// Merge and sort.
|
||||
newFiles := append(ptr.files, files...)
|
||||
sort.Slice(newFiles, func(i, j int) bool {
|
||||
return newFiles[i].Base() < newFiles[j].Base()
|
||||
})
|
||||
|
||||
// Reject overlapping files.
|
||||
// Discard adjacent identical files.
|
||||
out := newFiles[:0]
|
||||
for i, file := range newFiles {
|
||||
if i > 0 {
|
||||
prev := newFiles[i-1]
|
||||
if file == prev {
|
||||
continue
|
||||
}
|
||||
if prev.Base()+prev.Size()+1 > file.Base() {
|
||||
panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
|
||||
prev.Name(), prev.Base(), prev.Base()+prev.Size(),
|
||||
file.Name(), file.Base(), file.Base()+file.Size()))
|
||||
}
|
||||
}
|
||||
out = append(out, file)
|
||||
}
|
||||
newFiles = out
|
||||
|
||||
ptr.files = newFiles
|
||||
|
||||
// Advance FileSet.Base().
|
||||
if len(newFiles) > 0 {
|
||||
last := newFiles[len(newFiles)-1]
|
||||
newBase := last.Base() + last.Size() + 1
|
||||
if ptr.base < newBase {
|
||||
ptr.base = newBase
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FileSetFor returns a new FileSet containing a sequence of new Files with
|
||||
// the same base, size, and line as the input files, for use in APIs that
|
||||
// require a FileSet.
|
||||
//
|
||||
// Precondition: the input files must be non-overlapping, and sorted in order
|
||||
// of their Base.
|
||||
func FileSetFor(files ...*token.File) *token.FileSet {
|
||||
fset := token.NewFileSet()
|
||||
for _, f := range files {
|
||||
f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
|
||||
lines := GetLines(f)
|
||||
f2.SetLines(lines)
|
||||
}
|
||||
return fset
|
||||
}
|
||||
|
||||
// CloneFileSet creates a new FileSet holding all files in fset. It does not
|
||||
// create copies of the token.Files in fset: they are added to the resulting
|
||||
// FileSet unmodified.
|
||||
func CloneFileSet(fset *token.FileSet) *token.FileSet {
|
||||
var files []*token.File
|
||||
fset.Iterate(func(f *token.File) bool {
|
||||
files = append(files, f)
|
||||
return true
|
||||
})
|
||||
newFileSet := token.NewFileSet()
|
||||
AddExistingFiles(newFileSet, files)
|
||||
return newFileSet
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ cloud.google.com/go/internal
|
|||
cloud.google.com/go/internal/optional
|
||||
cloud.google.com/go/internal/trace
|
||||
cloud.google.com/go/internal/version
|
||||
# cloud.google.com/go/compute v1.19.0
|
||||
# cloud.google.com/go/compute v1.19.1
|
||||
## explicit; go 1.19
|
||||
cloud.google.com/go/compute/internal
|
||||
# cloud.google.com/go/compute/metadata v0.2.3
|
||||
|
|
@ -59,10 +59,12 @@ github.com/Azure/go-autorest/logger
|
|||
# github.com/Azure/go-autorest/tracing v0.6.0
|
||||
## explicit; go 1.12
|
||||
github.com/Azure/go-autorest/tracing
|
||||
# github.com/Microsoft/go-winio v0.6.0
|
||||
# github.com/Microsoft/go-winio v0.6.1
|
||||
## explicit; go 1.17
|
||||
github.com/Microsoft/go-winio
|
||||
github.com/Microsoft/go-winio/internal/fs
|
||||
github.com/Microsoft/go-winio/internal/socket
|
||||
github.com/Microsoft/go-winio/internal/stringbuffer
|
||||
github.com/Microsoft/go-winio/pkg/guid
|
||||
# github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f
|
||||
## explicit; go 1.13
|
||||
|
|
@ -292,7 +294,7 @@ github.com/coreos/go-systemd/v22/dbus
|
|||
# github.com/dimchansky/utfbom v1.1.1
|
||||
## explicit
|
||||
github.com/dimchansky/utfbom
|
||||
# github.com/docker/cli v23.0.1+incompatible
|
||||
# github.com/docker/cli v23.0.5+incompatible
|
||||
## explicit
|
||||
github.com/docker/cli/cli/config
|
||||
github.com/docker/cli/cli/config/configfile
|
||||
|
|
@ -504,7 +506,7 @@ github.com/google/go-cmp/cmp/internal/diff
|
|||
github.com/google/go-cmp/cmp/internal/flags
|
||||
github.com/google/go-cmp/cmp/internal/function
|
||||
github.com/google/go-cmp/cmp/internal/value
|
||||
# github.com/google/go-containerregistry v0.14.0
|
||||
# github.com/google/go-containerregistry v0.15.1
|
||||
## explicit; go 1.18
|
||||
github.com/google/go-containerregistry/internal/and
|
||||
github.com/google/go-containerregistry/internal/compression
|
||||
|
|
@ -606,7 +608,7 @@ github.com/karrick/godirwalk
|
|||
# github.com/kevinburke/ssh_config v1.1.0
|
||||
## explicit
|
||||
github.com/kevinburke/ssh_config
|
||||
# github.com/klauspost/compress v1.16.0
|
||||
# github.com/klauspost/compress v1.16.5
|
||||
## explicit; go 1.18
|
||||
github.com/klauspost/compress
|
||||
github.com/klauspost/compress/fse
|
||||
|
|
@ -667,7 +669,7 @@ github.com/moby/sys/signal
|
|||
# github.com/moby/sys/symlink v0.2.0
|
||||
## explicit; go 1.16
|
||||
github.com/moby/sys/symlink
|
||||
# github.com/moby/term v0.0.0-20221120202655-abb19827d345
|
||||
# github.com/moby/term v0.0.0-20221205130635-1aeaba878587
|
||||
## explicit; go 1.18
|
||||
github.com/moby/term
|
||||
github.com/moby/term/windows
|
||||
|
|
@ -678,8 +680,8 @@ github.com/morikuni/aec
|
|||
## explicit; go 1.13
|
||||
github.com/opencontainers/go-digest
|
||||
github.com/opencontainers/go-digest/digestset
|
||||
# github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b
|
||||
## explicit; go 1.17
|
||||
# github.com/opencontainers/image-spec v1.1.0-rc3
|
||||
## explicit; go 1.18
|
||||
github.com/opencontainers/image-spec/specs-go
|
||||
github.com/opencontainers/image-spec/specs-go/v1
|
||||
# github.com/opencontainers/runc v1.1.5
|
||||
|
|
@ -736,7 +738,7 @@ github.com/sirupsen/logrus
|
|||
github.com/spf13/afero
|
||||
github.com/spf13/afero/internal/common
|
||||
github.com/spf13/afero/mem
|
||||
# github.com/spf13/cobra v1.6.1
|
||||
# github.com/spf13/cobra v1.7.0
|
||||
## explicit; go 1.15
|
||||
github.com/spf13/cobra
|
||||
# github.com/spf13/pflag v1.0.5
|
||||
|
|
@ -746,7 +748,7 @@ github.com/spf13/pflag
|
|||
## explicit; go 1.18
|
||||
github.com/tonistiigi/fsutil
|
||||
github.com/tonistiigi/fsutil/types
|
||||
# github.com/vbatts/tar-split v0.11.2
|
||||
# github.com/vbatts/tar-split v0.11.3
|
||||
## explicit; go 1.15
|
||||
github.com/vbatts/tar-split/archive/tar
|
||||
github.com/vbatts/tar-split/tar/asm
|
||||
|
|
@ -798,7 +800,7 @@ golang.org/x/crypto/ssh
|
|||
golang.org/x/crypto/ssh/agent
|
||||
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
||||
golang.org/x/crypto/ssh/knownhosts
|
||||
# golang.org/x/mod v0.9.0
|
||||
# golang.org/x/mod v0.10.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.9.0
|
||||
|
|
@ -844,7 +846,7 @@ golang.org/x/text/unicode/norm
|
|||
# golang.org/x/time v0.1.0
|
||||
## explicit
|
||||
golang.org/x/time/rate
|
||||
# golang.org/x/tools v0.7.0
|
||||
# golang.org/x/tools v0.8.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/tools/cmd/stringer
|
||||
golang.org/x/tools/go/gcexportdata
|
||||
|
|
|
|||
Loading…
Reference in New Issue