Bump go-containerregistry dependency (#2076)
This picks up https://github.com/google/go-containerregistry/pull/1360
This commit is contained in:
parent
96a8ee0c07
commit
816e6d52b4
12
go.mod
12
go.mod
|
|
@ -23,7 +23,7 @@ require (
|
||||||
github.com/godbus/dbus/v5 v5.0.6 // indirect
|
github.com/godbus/dbus/v5 v5.0.6 // indirect
|
||||||
github.com/golang/mock v1.6.0
|
github.com/golang/mock v1.6.0
|
||||||
github.com/google/go-cmp v0.5.7
|
github.com/google/go-cmp v0.5.7
|
||||||
github.com/google/go-containerregistry v0.8.1-0.20220214202839-625fe7b4276a
|
github.com/google/go-containerregistry v0.8.1-0.20220507185902-82405e5dfa82
|
||||||
github.com/google/go-github v17.0.0+incompatible
|
github.com/google/go-github v17.0.0+incompatible
|
||||||
github.com/google/slowjam v1.0.0
|
github.com/google/slowjam v1.0.0
|
||||||
github.com/karrick/godirwalk v1.16.1
|
github.com/karrick/godirwalk v1.16.1
|
||||||
|
|
@ -57,7 +57,7 @@ require (
|
||||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f // indirect
|
github.com/ProtonMail/go-crypto v0.0.0-20220113124808-70ae35bab23f // indirect
|
||||||
github.com/acomagu/bufpipe v1.0.3 // indirect
|
github.com/acomagu/bufpipe v1.0.3 // indirect
|
||||||
|
|
@ -79,14 +79,14 @@ require (
|
||||||
github.com/cilium/ebpf v0.8.0 // indirect
|
github.com/cilium/ebpf v0.8.0 // indirect
|
||||||
github.com/containerd/continuity v0.2.2 // indirect
|
github.com/containerd/continuity v0.2.2 // indirect
|
||||||
github.com/containerd/fifo v1.0.0 // indirect
|
github.com/containerd/fifo v1.0.0 // indirect
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.10.1 // indirect
|
github.com/containerd/stargz-snapshotter/estargz v0.11.1 // indirect
|
||||||
github.com/containerd/typeurl v1.0.2 // indirect
|
github.com/containerd/typeurl v1.0.2 // indirect
|
||||||
github.com/coreos/etcd v3.3.27+incompatible // indirect
|
github.com/coreos/etcd v3.3.27+incompatible // indirect
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
||||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||||
github.com/docker/cli v20.10.12+incompatible // indirect
|
github.com/docker/cli v20.10.12+incompatible // indirect
|
||||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
github.com/docker/distribution v2.8.0+incompatible // indirect
|
||||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||||
|
|
@ -113,7 +113,7 @@ require (
|
||||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||||
github.com/kevinburke/ssh_config v1.1.0 // indirect
|
github.com/kevinburke/ssh_config v1.1.0 // indirect
|
||||||
github.com/klauspost/compress v1.14.2 // indirect
|
github.com/klauspost/compress v1.14.4 // indirect
|
||||||
github.com/mattn/go-ieproxy v0.0.2 // indirect
|
github.com/mattn/go-ieproxy v0.0.2 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
|
|
@ -139,7 +139,7 @@ require (
|
||||||
github.com/xanzy/ssh-agent v0.3.1 // indirect
|
github.com/xanzy/ssh-agent v0.3.1 // indirect
|
||||||
go.opencensus.io v0.23.0 // indirect
|
go.opencensus.io v0.23.0 // indirect
|
||||||
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce // indirect
|
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce // indirect
|
||||||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886 // indirect
|
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
|
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
|
|
|
||||||
28
go.sum
28
go.sum
|
|
@ -193,8 +193,9 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP
|
||||||
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||||
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||||
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
|
|
||||||
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||||
|
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||||
|
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||||
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||||
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||||
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
|
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
|
||||||
|
|
@ -438,7 +439,7 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT
|
||||||
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
|
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
|
||||||
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
|
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
|
||||||
github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
|
github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
|
||||||
github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ=
|
github.com/containerd/containerd v1.6.0/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
|
||||||
github.com/containerd/containerd v1.6.2 h1:pcaPUGbYW8kBw6OgIZwIVIeEhdWVrBzsoCfVJ5BjrLU=
|
github.com/containerd/containerd v1.6.2 h1:pcaPUGbYW8kBw6OgIZwIVIeEhdWVrBzsoCfVJ5BjrLU=
|
||||||
github.com/containerd/containerd v1.6.2/go.mod h1:sidY30/InSE1j2vdD1ihtKoJz+lWdaXMdiAeIupaf+s=
|
github.com/containerd/containerd v1.6.2/go.mod h1:sidY30/InSE1j2vdD1ihtKoJz+lWdaXMdiAeIupaf+s=
|
||||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||||
|
|
@ -477,8 +478,8 @@ github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3
|
||||||
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116 h1:cj2qTm4k9TlXzzwCROQK0puJc2oauyjUiegQiqpNkuk=
|
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116 h1:cj2qTm4k9TlXzzwCROQK0puJc2oauyjUiegQiqpNkuk=
|
||||||
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4=
|
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
|
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.10.1 h1:hd1EoVjI2Ax8Cr64tdYqnJ4i4pZU49FkEf5kU8KxQng=
|
github.com/containerd/stargz-snapshotter/estargz v0.11.1 h1:mNQqxcAWmDrV6d6yUvzFhfY8puNzoQz9v4diW+Pmei4=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.10.1/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
|
github.com/containerd/stargz-snapshotter/estargz v0.11.1/go.mod h1:6VoPcf4M1wvnogWxqc4TqBWWErCS+R+ucnPZId2VbpQ=
|
||||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||||
|
|
@ -571,8 +572,9 @@ github.com/docker/cli v20.10.12+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hH
|
||||||
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
||||||
github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
|
||||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
|
github.com/docker/distribution v2.8.0+incompatible h1:l9EaZDICImO1ngI+uTifW+ZYvvz7fKISBAKpg+MbWbY=
|
||||||
|
github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v0.0.0-20200511152416-a93e9eb0e95c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v0.0.0-20200511152416-a93e9eb0e95c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
|
|
@ -842,8 +844,8 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8
|
||||||
github.com/google/go-containerregistry v0.0.0-20191010200024-a3d713f9b7f8/go.mod h1:KyKXa9ciM8+lgMXwOVsXi7UxGrsf9mM61Mzs+xKUrKE=
|
github.com/google/go-containerregistry v0.0.0-20191010200024-a3d713f9b7f8/go.mod h1:KyKXa9ciM8+lgMXwOVsXi7UxGrsf9mM61Mzs+xKUrKE=
|
||||||
github.com/google/go-containerregistry v0.1.2/go.mod h1:GPivBPgdAyd2SU+vf6EpsgOtWDuPqjW0hJZt4rNdTZ4=
|
github.com/google/go-containerregistry v0.1.2/go.mod h1:GPivBPgdAyd2SU+vf6EpsgOtWDuPqjW0hJZt4rNdTZ4=
|
||||||
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
|
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
|
||||||
github.com/google/go-containerregistry v0.8.1-0.20220214202839-625fe7b4276a h1:dc718J30nnewleBWCCDQXgpWeZWp17cgTmw6mpbF0xM=
|
github.com/google/go-containerregistry v0.8.1-0.20220507185902-82405e5dfa82 h1:bi8Rhp5N2udcb+BYH3xCZ86qZpdj/ooMPAIBPxNWQVY=
|
||||||
github.com/google/go-containerregistry v0.8.1-0.20220214202839-625fe7b4276a/go.mod h1:cwx3SjrH84Rh9VFJSIhPh43ovyOp3DCWgY3h8nWmdGQ=
|
github.com/google/go-containerregistry v0.8.1-0.20220507185902-82405e5dfa82/go.mod h1:eTLvLZaEe2FoQsb25t7BLxQQryyrwHTzFfwxN87mhAw=
|
||||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||||
github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
|
github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
|
||||||
|
|
@ -1105,9 +1107,9 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
|
||||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.14.3/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
|
github.com/klauspost/compress v1.14.4 h1:eijASRJcobkVtSt81Olfh7JX43osYLwy5krOJo6YEu4=
|
||||||
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
|
@ -1255,7 +1257,6 @@ github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZ
|
||||||
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
|
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
|
||||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||||
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ=
|
github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ=
|
||||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
|
|
||||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
||||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
||||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||||
|
|
@ -1332,7 +1333,6 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I
|
||||||
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||||
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg=
|
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 h1:+czc/J8SlhPKLOtVLMQc+xDCFBT73ZStMsRhSsUhsSg=
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q=
|
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q=
|
||||||
github.com/opencontainers/runc v1.0.0-rc92 h1:+IczUKCRzDzFDnw99O/PAqrcBBCoRp9xN3cB1SYSNS4=
|
github.com/opencontainers/runc v1.0.0-rc92 h1:+IczUKCRzDzFDnw99O/PAqrcBBCoRp9xN3cB1SYSNS4=
|
||||||
|
|
@ -1903,7 +1903,6 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20220127074510-2fabfed7e28f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220325170049-de3da57026de h1:pZB1TWnKi+o4bENlbzAgLrEbY4RMYmUIRobMcSmfeYc=
|
golang.org/x/net v0.0.0-20220325170049-de3da57026de h1:pZB1TWnKi+o4bENlbzAgLrEbY4RMYmUIRobMcSmfeYc=
|
||||||
|
|
@ -2239,7 +2238,7 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||||
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||||
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
|
@ -2398,6 +2397,7 @@ google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ6
|
||||||
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
|
google.golang.org/genproto v0.0.0-20220301145929-1ac2ace0dbf7/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
|
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package winio
|
package winio
|
||||||
|
|
@ -143,6 +144,11 @@ func (f *win32File) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsClosed checks if the file has been closed
|
||||||
|
func (f *win32File) IsClosed() bool {
|
||||||
|
return f.closing.isSet()
|
||||||
|
}
|
||||||
|
|
||||||
// prepareIo prepares for a new IO operation.
|
// prepareIo prepares for a new IO operation.
|
||||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package winio
|
package winio
|
||||||
|
|
@ -252,15 +253,23 @@ func (conn *HvsockConn) Close() error {
|
||||||
return conn.sock.Close()
|
return conn.sock.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (conn *HvsockConn) IsClosed() bool {
|
||||||
|
return conn.sock.IsClosed()
|
||||||
|
}
|
||||||
|
|
||||||
func (conn *HvsockConn) shutdown(how int) error {
|
func (conn *HvsockConn) shutdown(how int) error {
|
||||||
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
|
if conn.IsClosed() {
|
||||||
|
return ErrFileClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
err := syscall.Shutdown(conn.sock.handle, how)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return os.NewSyscallError("shutdown", err)
|
return os.NewSyscallError("shutdown", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CloseRead shuts down the read end of the socket.
|
// CloseRead shuts down the read end of the socket, preventing future read operations.
|
||||||
func (conn *HvsockConn) CloseRead() error {
|
func (conn *HvsockConn) CloseRead() error {
|
||||||
err := conn.shutdown(syscall.SHUT_RD)
|
err := conn.shutdown(syscall.SHUT_RD)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -269,8 +278,8 @@ func (conn *HvsockConn) CloseRead() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
|
// CloseWrite shuts down the write end of the socket, preventing future write operations and
|
||||||
// no more data will be written.
|
// notifying the other endpoint that no more data will be written.
|
||||||
func (conn *HvsockConn) CloseWrite() error {
|
func (conn *HvsockConn) CloseWrite() error {
|
||||||
err := conn.shutdown(syscall.SHUT_WR)
|
err := conn.shutdown(syscall.SHUT_WR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -14,8 +14,6 @@ import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||||
|
|
@ -41,13 +39,6 @@ type Version uint8
|
||||||
var _ = (encoding.TextMarshaler)(GUID{})
|
var _ = (encoding.TextMarshaler)(GUID{})
|
||||||
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||||
|
|
||||||
// GUID represents a GUID/UUID. It has the same structure as
|
|
||||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
|
||||||
// that type. It is defined as its own type so that stringification and
|
|
||||||
// marshaling can be supported. The representation matches that used by native
|
|
||||||
// Windows code.
|
|
||||||
type GUID windows.GUID
|
|
||||||
|
|
||||||
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||||
func NewV4() (GUID, error) {
|
func NewV4() (GUID, error) {
|
||||||
var b [16]byte
|
var b [16]byte
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,15 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package guid
|
||||||
|
|
||||||
|
// GUID represents a GUID/UUID. It has the same structure as
|
||||||
|
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||||
|
// that type. It is defined as its own type as that is only available to builds
|
||||||
|
// targeted at `windows`. The representation matches that used by native Windows
|
||||||
|
// code.
|
||||||
|
type GUID struct {
|
||||||
|
Data1 uint32
|
||||||
|
Data2 uint16
|
||||||
|
Data3 uint16
|
||||||
|
Data4 [8]byte
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,10 @@
|
||||||
|
package guid
|
||||||
|
|
||||||
|
import "golang.org/x/sys/windows"
|
||||||
|
|
||||||
|
// GUID represents a GUID/UUID. It has the same structure as
|
||||||
|
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||||
|
// that type. It is defined as its own type so that stringification and
|
||||||
|
// marshaling can be supported. The representation matches that used by native
|
||||||
|
// Windows code.
|
||||||
|
type GUID windows.GUID
|
||||||
|
|
@ -3,11 +3,10 @@
|
||||||
package security
|
package security
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
|
@ -72,7 +71,7 @@ func GrantVmGroupAccess(name string) error {
|
||||||
// Stat (to determine if `name` is a directory).
|
// Stat (to determine if `name` is a directory).
|
||||||
s, err := os.Stat(name)
|
s, err := os.Stat(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "%s os.Stat %s", gvmga, name)
|
return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get a handle to the file/directory. Must defer Close on success.
|
// Get a handle to the file/directory. Must defer Close on success.
|
||||||
|
|
@ -88,7 +87,7 @@ func GrantVmGroupAccess(name string) error {
|
||||||
sd := uintptr(0)
|
sd := uintptr(0)
|
||||||
origDACL := uintptr(0)
|
origDACL := uintptr(0)
|
||||||
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
|
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
|
||||||
return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name)
|
return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err)
|
||||||
}
|
}
|
||||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
|
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
|
||||||
|
|
||||||
|
|
@ -102,7 +101,7 @@ func GrantVmGroupAccess(name string) error {
|
||||||
|
|
||||||
// And finally use SetSecurityInfo to apply the updated DACL.
|
// And finally use SetSecurityInfo to apply the updated DACL.
|
||||||
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
|
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
|
||||||
return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name)
|
return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -120,7 +119,7 @@ func createFile(name string, isDir bool) (syscall.Handle, error) {
|
||||||
}
|
}
|
||||||
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
|
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name)
|
return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err)
|
||||||
}
|
}
|
||||||
return fd, nil
|
return fd, nil
|
||||||
}
|
}
|
||||||
|
|
@ -131,7 +130,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
|
||||||
// Generate pointers to the SIDs based on the string SIDs
|
// Generate pointers to the SIDs based on the string SIDs
|
||||||
sid, err := syscall.StringToSid(sidVmGroup)
|
sid, err := syscall.StringToSid(sidVmGroup)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup)
|
return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
inheritance := inheritModeNoInheritance
|
inheritance := inheritModeNoInheritance
|
||||||
|
|
@ -154,7 +153,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
|
||||||
|
|
||||||
modifiedDACL := uintptr(0)
|
modifiedDACL := uintptr(0)
|
||||||
if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
|
if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
|
||||||
return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name)
|
return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return modifiedDACL, nil
|
return modifiedDACL, nil
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,4 @@
|
||||||
|
//go:build windows
|
||||||
// +build windows
|
// +build windows
|
||||||
|
|
||||||
package vhd
|
package vhd
|
||||||
|
|
@ -7,14 +8,13 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/Microsoft/go-winio/pkg/guid"
|
"github.com/Microsoft/go-winio/pkg/guid"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
|
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
|
||||||
|
|
||||||
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk
|
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk
|
||||||
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
|
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
|
||||||
//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk
|
//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk
|
||||||
//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk
|
//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk
|
||||||
//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath
|
//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath
|
||||||
|
|
@ -62,13 +62,27 @@ type OpenVirtualDiskParameters struct {
|
||||||
Version2 OpenVersion2
|
Version2 OpenVersion2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
|
||||||
|
// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating
|
||||||
|
// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods.
|
||||||
|
type openVersion2 struct {
|
||||||
|
getInfoOnly int32
|
||||||
|
readOnly int32
|
||||||
|
resiliencyGUID guid.GUID
|
||||||
|
}
|
||||||
|
|
||||||
|
type openVirtualDiskParameters struct {
|
||||||
|
version uint32
|
||||||
|
version2 openVersion2
|
||||||
|
}
|
||||||
|
|
||||||
type AttachVersion2 struct {
|
type AttachVersion2 struct {
|
||||||
RestrictedOffset uint64
|
RestrictedOffset uint64
|
||||||
RestrictedLength uint64
|
RestrictedLength uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
type AttachVirtualDiskParameters struct {
|
type AttachVirtualDiskParameters struct {
|
||||||
Version uint32 // Must always be set to 2
|
Version uint32
|
||||||
Version2 AttachVersion2
|
Version2 AttachVersion2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -146,16 +160,13 @@ func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := syscall.CloseHandle(handle); err != nil {
|
return syscall.CloseHandle(handle)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DetachVirtualDisk detaches a virtual hard disk by handle.
|
// DetachVirtualDisk detaches a virtual hard disk by handle.
|
||||||
func DetachVirtualDisk(handle syscall.Handle) (err error) {
|
func DetachVirtualDisk(handle syscall.Handle) (err error) {
|
||||||
if err := detachVirtualDisk(handle, 0, 0); err != nil {
|
if err := detachVirtualDisk(handle, 0, 0); err != nil {
|
||||||
return errors.Wrap(err, "failed to detach virtual disk")
|
return fmt.Errorf("failed to detach virtual disk: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -185,7 +196,7 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua
|
||||||
parameters,
|
parameters,
|
||||||
nil,
|
nil,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return errors.Wrap(err, "failed to attach virtual disk")
|
return fmt.Errorf("failed to attach virtual disk: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -209,7 +220,7 @@ func AttachVhd(path string) (err error) {
|
||||||
AttachVirtualDiskFlagNone,
|
AttachVirtualDiskFlagNone,
|
||||||
¶ms,
|
¶ms,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return errors.Wrap(err, "failed to attach virtual disk")
|
return fmt.Errorf("failed to attach virtual disk: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -234,19 +245,35 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual
|
||||||
var (
|
var (
|
||||||
handle syscall.Handle
|
handle syscall.Handle
|
||||||
defaultType VirtualStorageType
|
defaultType VirtualStorageType
|
||||||
|
getInfoOnly int32
|
||||||
|
readOnly int32
|
||||||
)
|
)
|
||||||
if parameters.Version != 2 {
|
if parameters.Version != 2 {
|
||||||
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
|
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
|
||||||
}
|
}
|
||||||
|
if parameters.Version2.GetInfoOnly {
|
||||||
|
getInfoOnly = 1
|
||||||
|
}
|
||||||
|
if parameters.Version2.ReadOnly {
|
||||||
|
readOnly = 1
|
||||||
|
}
|
||||||
|
params := &openVirtualDiskParameters{
|
||||||
|
version: parameters.Version,
|
||||||
|
version2: openVersion2{
|
||||||
|
getInfoOnly,
|
||||||
|
readOnly,
|
||||||
|
parameters.Version2.ResiliencyGUID,
|
||||||
|
},
|
||||||
|
}
|
||||||
if err := openVirtualDisk(
|
if err := openVirtualDisk(
|
||||||
&defaultType,
|
&defaultType,
|
||||||
vhdPath,
|
vhdPath,
|
||||||
uint32(virtualDiskAccessMask),
|
uint32(virtualDiskAccessMask),
|
||||||
uint32(openVirtualDiskFlags),
|
uint32(openVirtualDiskFlags),
|
||||||
parameters,
|
params,
|
||||||
&handle,
|
&handle,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return 0, errors.Wrap(err, "failed to open virtual disk")
|
return 0, fmt.Errorf("failed to open virtual disk: %w", err)
|
||||||
}
|
}
|
||||||
return handle, nil
|
return handle, nil
|
||||||
}
|
}
|
||||||
|
|
@ -272,7 +299,7 @@ func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask,
|
||||||
nil,
|
nil,
|
||||||
&handle,
|
&handle,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return handle, errors.Wrap(err, "failed to create virtual disk")
|
return handle, fmt.Errorf("failed to create virtual disk: %w", err)
|
||||||
}
|
}
|
||||||
return handle, nil
|
return handle, nil
|
||||||
}
|
}
|
||||||
|
|
@ -290,7 +317,7 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
|
||||||
&diskPathSizeInBytes,
|
&diskPathSizeInBytes,
|
||||||
&diskPhysicalPathBuf[0],
|
&diskPhysicalPathBuf[0],
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return "", errors.Wrap(err, "failed to get disk physical path")
|
return "", fmt.Errorf("failed to get disk physical path: %w", err)
|
||||||
}
|
}
|
||||||
return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil
|
return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil
|
||||||
}
|
}
|
||||||
|
|
@ -314,10 +341,10 @@ func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error
|
||||||
createParams,
|
createParams,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create differencing vhd: %s", err)
|
return fmt.Errorf("failed to create differencing vhd: %w", err)
|
||||||
}
|
}
|
||||||
if err := syscall.CloseHandle(vhdHandle); err != nil {
|
if err := syscall.CloseHandle(vhdHandle); err != nil {
|
||||||
return fmt.Errorf("failed to close differencing vhd handle: %s", err)
|
return fmt.Errorf("failed to close differencing vhd handle: %w", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -88,7 +88,7 @@ func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||||
var _p0 *uint16
|
var _p0 *uint16
|
||||||
_p0, win32err = syscall.UTF16PtrFromString(path)
|
_p0, win32err = syscall.UTF16PtrFromString(path)
|
||||||
if win32err != nil {
|
if win32err != nil {
|
||||||
|
|
@ -97,7 +97,7 @@ func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtua
|
||||||
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle)
|
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle)
|
||||||
}
|
}
|
||||||
|
|
||||||
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||||
r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||||
if r0 != 0 {
|
if r0 != 0 {
|
||||||
win32err = syscall.Errno(r0)
|
win32err = syscall.Errno(r0)
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,7 @@ import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
@ -38,7 +39,6 @@ import (
|
||||||
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
||||||
"github.com/klauspost/compress/zstd"
|
"github.com/klauspost/compress/zstd"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -142,7 +142,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if rErr != nil {
|
if rErr != nil {
|
||||||
if err := layerFiles.CleanupAll(); err != nil {
|
if err := layerFiles.CleanupAll(); err != nil {
|
||||||
rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err)
|
rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
@ -307,7 +307,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri
|
||||||
// Import tar file.
|
// Import tar file.
|
||||||
intar, err := importTar(in)
|
intar, err := importTar(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to sort")
|
return nil, fmt.Errorf("failed to sort: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort the tar file respecting to the prioritized files list.
|
// Sort the tar file respecting to the prioritized files list.
|
||||||
|
|
@ -318,7 +318,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri
|
||||||
*missedPrioritized = append(*missedPrioritized, l)
|
*missedPrioritized = append(*missedPrioritized, l)
|
||||||
continue // allow not found
|
continue // allow not found
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "failed to sort tar entries")
|
return nil, fmt.Errorf("failed to sort tar entries: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(prioritized) == 0 {
|
if len(prioritized) == 0 {
|
||||||
|
|
@ -371,7 +371,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
|
||||||
tf := &tarFile{}
|
tf := &tarFile{}
|
||||||
pw, err := newCountReader(in)
|
pw, err := newCountReader(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make position watcher")
|
return nil, fmt.Errorf("failed to make position watcher: %w", err)
|
||||||
}
|
}
|
||||||
tr := tar.NewReader(pw)
|
tr := tar.NewReader(pw)
|
||||||
|
|
||||||
|
|
@ -383,7 +383,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.Wrap(err, "failed to parse tar file")
|
return nil, fmt.Errorf("failed to parse tar file, %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
switch cleanEntryName(h.Name) {
|
switch cleanEntryName(h.Name) {
|
||||||
|
|
@ -420,7 +420,7 @@ func moveRec(name string, in *tarFile, out *tarFile) error {
|
||||||
_, okIn := in.get(name)
|
_, okIn := in.get(name)
|
||||||
_, okOut := out.get(name)
|
_, okOut := out.get(name)
|
||||||
if !okIn && !okOut {
|
if !okIn && !okOut {
|
||||||
return errors.Wrapf(errNotFound, "file: %q", name)
|
return fmt.Errorf("file: %q: %w", name, errNotFound)
|
||||||
}
|
}
|
||||||
|
|
||||||
parent, _ := path.Split(strings.TrimSuffix(name, "/"))
|
parent, _ := path.Split(strings.TrimSuffix(name, "/"))
|
||||||
|
|
|
||||||
|
|
@ -27,6 +27,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
|
|
@ -40,7 +41,6 @@ import (
|
||||||
|
|
||||||
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/vbatts/tar-split/archive/tar"
|
"github.com/vbatts/tar-split/archive/tar"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -107,7 +107,7 @@ type Telemetry struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open opens a stargz file for reading.
|
// Open opens a stargz file for reading.
|
||||||
// The behaviour is configurable using options.
|
// The behavior is configurable using options.
|
||||||
//
|
//
|
||||||
// Note that each entry name is normalized as the path that is relative to root.
|
// Note that each entry name is normalized as the path that is relative to root.
|
||||||
func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
|
func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
|
||||||
|
|
@ -385,8 +385,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
|
||||||
if e.Digest != "" {
|
if e.Digest != "" {
|
||||||
d, err := digest.Parse(e.Digest)
|
d, err := digest.Parse(e.Digest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err,
|
return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err)
|
||||||
"failed to parse regular file digest %q", e.Digest)
|
|
||||||
}
|
}
|
||||||
regDigestMap[e.Offset] = d
|
regDigestMap[e.Offset] = d
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -401,8 +400,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
|
||||||
if e.ChunkDigest != "" {
|
if e.ChunkDigest != "" {
|
||||||
d, err := digest.Parse(e.ChunkDigest)
|
d, err := digest.Parse(e.ChunkDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err,
|
return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err)
|
||||||
"failed to parse chunk digest %q", e.ChunkDigest)
|
|
||||||
}
|
}
|
||||||
chunkDigestMap[e.Offset] = d
|
chunkDigestMap[e.Offset] = d
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -647,7 +645,7 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
|
||||||
}
|
}
|
||||||
blobPayloadSize, _, _, err := c.ParseFooter(footer)
|
blobPayloadSize, _, _, err := c.ParseFooter(footer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to parse footer")
|
return nil, fmt.Errorf("failed to parse footer: %w", err)
|
||||||
}
|
}
|
||||||
return c.Reader(io.LimitReader(sr, blobPayloadSize))
|
return c.Reader(io.LimitReader(sr, blobPayloadSize))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -34,7 +34,6 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type gzipCompression struct {
|
type gzipCompression struct {
|
||||||
|
|
@ -150,7 +149,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t
|
||||||
}
|
}
|
||||||
tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
|
tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
|
||||||
}
|
}
|
||||||
return tocOffset, tocOffset, 0, nil
|
return tocOffset, tocOffset, 0, nil
|
||||||
}
|
}
|
||||||
|
|
@ -179,7 +178,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff
|
||||||
}
|
}
|
||||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
|
return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err)
|
||||||
}
|
}
|
||||||
defer zr.Close()
|
defer zr.Close()
|
||||||
extra := zr.Header.Extra
|
extra := zr.Header.Extra
|
||||||
|
|
@ -191,7 +190,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff
|
||||||
}
|
}
|
||||||
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
|
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
|
||||||
}
|
}
|
||||||
return tocOffset, tocOffset, 0, nil
|
return tocOffset, tocOffset, 0, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,7 @@ import (
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
@ -41,7 +42,6 @@ import (
|
||||||
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
||||||
"github.com/klauspost/compress/zstd"
|
"github.com/klauspost/compress/zstd"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestingController is Compression with some helper methods necessary for testing.
|
// TestingController is Compression with some helper methods necessary for testing.
|
||||||
|
|
@ -1062,18 +1062,18 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
|
||||||
fSize := controller.FooterSize()
|
fSize := controller.FooterSize()
|
||||||
footer := make([]byte, fSize)
|
footer := make([]byte, fSize)
|
||||||
if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil {
|
if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil {
|
||||||
return nil, 0, errors.Wrap(err, "error reading footer")
|
return nil, 0, fmt.Errorf("error reading footer: %w", err)
|
||||||
}
|
}
|
||||||
_, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
|
_, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, errors.Wrapf(err, "failed to parse footer")
|
return nil, 0, fmt.Errorf("failed to parse footer: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode the TOC JSON
|
// Decode the TOC JSON
|
||||||
tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
|
tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
|
||||||
decodedJTOC, _, err = controller.ParseTOC(tocReader)
|
decodedJTOC, _, err = controller.ParseTOC(tocReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, errors.Wrap(err, "failed to parse TOC")
|
return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
|
||||||
}
|
}
|
||||||
return decodedJTOC, tocOffset, nil
|
return decodedJTOC, tocOffset, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- structcheck
|
||||||
|
- varcheck
|
||||||
|
- staticcheck
|
||||||
|
- unconvert
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
- golint
|
||||||
|
- ineffassign
|
||||||
|
- vet
|
||||||
|
- unused
|
||||||
|
- misspell
|
||||||
|
disable:
|
||||||
|
- errcheck
|
||||||
|
|
||||||
|
run:
|
||||||
|
deadline: 2m
|
||||||
|
skip-dirs:
|
||||||
|
- vendor
|
||||||
|
|
@ -1,16 +0,0 @@
|
||||||
{
|
|
||||||
"Vendor": true,
|
|
||||||
"Deadline": "2m",
|
|
||||||
"Sort": ["linter", "severity", "path", "line"],
|
|
||||||
"EnableGC": true,
|
|
||||||
"Enable": [
|
|
||||||
"structcheck",
|
|
||||||
"staticcheck",
|
|
||||||
"unconvert",
|
|
||||||
|
|
||||||
"gofmt",
|
|
||||||
"goimports",
|
|
||||||
"golint",
|
|
||||||
"vet"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
@ -30,3 +30,17 @@ Helen Xie <xieyulin821@harmonycloud.cn> Helen-xie <xieyulin821@harmonycloud.cn>
|
||||||
Mike Brown <brownwm@us.ibm.com> Mike Brown <mikebrow@users.noreply.github.com>
|
Mike Brown <brownwm@us.ibm.com> Mike Brown <mikebrow@users.noreply.github.com>
|
||||||
Manish Tomar <manish.tomar@docker.com> Manish Tomar <manishtomar@users.noreply.github.com>
|
Manish Tomar <manish.tomar@docker.com> Manish Tomar <manishtomar@users.noreply.github.com>
|
||||||
Sakeven Jiang <jc5930@sina.cn> sakeven <jc5930@sina.cn>
|
Sakeven Jiang <jc5930@sina.cn> sakeven <jc5930@sina.cn>
|
||||||
|
Milos Gajdos <milosgajdos83@gmail.com> Milos Gajdos <milosgajdos@users.noreply.github.com>
|
||||||
|
Derek McGowan <derek@mcgstyle.net> Derek McGowa <dmcgowan@users.noreply.github.com>
|
||||||
|
Adrian Plata <adrian.plata@docker.com> Adrian Plata <@users.noreply.github.com>
|
||||||
|
Sebastiaan van Stijn <github@gone.nl> Sebastiaan van Stijn <thaJeztah@users.noreply.github.com>
|
||||||
|
Vishesh Jindal <vishesh92@gmail.com> Vishesh Jindal <vishesh92@users.noreply.github.com>
|
||||||
|
Wang Yan <wangyan@vmware.com> Wang Yan <wy65701436@users.noreply.github.com>
|
||||||
|
Chris Patterson <chrispat@github.com> Chris Patterson <chrispat@users.noreply.github.com>
|
||||||
|
Eohyung Lee <liquidnuker@gmail.com> Eohyung Lee <leoh0@users.noreply.github.com>
|
||||||
|
João Pereira <484633+joaodrp@users.noreply.github.com>
|
||||||
|
Smasherr <soundcracker@gmail.com> Smasherr <Smasherr@users.noreply.github.com>
|
||||||
|
Thomas Berger <loki@lokis-chaos.de> Thomas Berger <tbe@users.noreply.github.com>
|
||||||
|
Samuel Karp <skarp@amazon.com> Samuel Karp <samuelkarp@users.noreply.github.com>
|
||||||
|
Justin Cormack <justin.cormack@docker.com>
|
||||||
|
sayboras <sayboras@yahoo.com>
|
||||||
|
|
|
||||||
|
|
@ -1,51 +0,0 @@
|
||||||
dist: trusty
|
|
||||||
sudo: required
|
|
||||||
# setup travis so that we can run containers for integration tests
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
|
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- "1.11.x"
|
|
||||||
|
|
||||||
go_import_path: github.com/docker/distribution
|
|
||||||
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
packages:
|
|
||||||
- python-minimal
|
|
||||||
|
|
||||||
|
|
||||||
env:
|
|
||||||
- TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- uname -r
|
|
||||||
- sudo apt-get -q update
|
|
||||||
|
|
||||||
install:
|
|
||||||
- go get -u github.com/vbatts/git-validation
|
|
||||||
# TODO: Add enforcement of license
|
|
||||||
# - go get -u github.com/kunalkushwaha/ltag
|
|
||||||
- cd $TRAVIS_BUILD_DIR
|
|
||||||
|
|
||||||
script:
|
|
||||||
- export GOOS=$TRAVIS_GOOS
|
|
||||||
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
|
|
||||||
- DCO_VERBOSITY=-q script/validate/dco
|
|
||||||
- GOOS=linux script/setup/install-dev-tools
|
|
||||||
- script/validate/vendor
|
|
||||||
- go build -i .
|
|
||||||
- make check
|
|
||||||
- make build
|
|
||||||
- make binaries
|
|
||||||
# Currently takes too long
|
|
||||||
#- if [ "$GOOS" = "linux" ]; then make test-race ; fi
|
|
||||||
- if [ "$GOOS" = "linux" ]; then make coverage ; fi
|
|
||||||
|
|
||||||
after_success:
|
|
||||||
- bash <(curl -s https://codecov.io/bash) -F linux
|
|
||||||
|
|
||||||
before_deploy:
|
|
||||||
# Run tests with storage driver configurations
|
|
||||||
|
|
@ -1,22 +1,45 @@
|
||||||
FROM golang:1.11-alpine AS build
|
# syntax=docker/dockerfile:1.3
|
||||||
|
|
||||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
ARG GO_VERSION=1.16
|
||||||
ENV BUILDTAGS include_oss include_gcs
|
ARG GORELEASER_XX_VERSION=1.2.5
|
||||||
|
|
||||||
ARG GOOS=linux
|
FROM --platform=$BUILDPLATFORM crazymax/goreleaser-xx:${GORELEASER_XX_VERSION} AS goreleaser-xx
|
||||||
ARG GOARCH=amd64
|
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS base
|
||||||
ARG GOARM=6
|
COPY --from=goreleaser-xx / /
|
||||||
|
RUN apk add --no-cache file git
|
||||||
|
WORKDIR /go/src/github.com/docker/distribution
|
||||||
|
|
||||||
RUN set -ex \
|
FROM base AS build
|
||||||
&& apk add --no-cache make git file
|
ENV GO111MODULE=auto
|
||||||
|
ENV CGO_ENABLED=0
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
ARG PKG="github.com/distribution/distribution"
|
||||||
|
ARG BUILDTAGS="include_oss include_gcs"
|
||||||
|
RUN --mount=type=bind,rw \
|
||||||
|
--mount=type=cache,target=/root/.cache/go-build \
|
||||||
|
--mount=target=/go/pkg/mod,type=cache \
|
||||||
|
goreleaser-xx --debug \
|
||||||
|
--name="registry" \
|
||||||
|
--dist="/out" \
|
||||||
|
--main="./cmd/registry" \
|
||||||
|
--flags="-v" \
|
||||||
|
--ldflags="-s -w -X '$PKG/version.Version={{.Version}}' -X '$PKG/version.Revision={{.Commit}}' -X '$PKG/version.Package=$PKG'" \
|
||||||
|
--tags="$BUILDTAGS" \
|
||||||
|
--files="LICENSE" \
|
||||||
|
--files="README.md"
|
||||||
|
|
||||||
WORKDIR $DISTRIBUTION_DIR
|
FROM scratch AS artifacts
|
||||||
COPY . $DISTRIBUTION_DIR
|
COPY --from=build /out/*.tar.gz /
|
||||||
RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked"
|
COPY --from=build /out/*.zip /
|
||||||
|
COPY --from=build /out/*.sha256 /
|
||||||
|
|
||||||
FROM alpine
|
FROM scratch AS binary
|
||||||
|
COPY --from=build /usr/local/bin/registry* /
|
||||||
|
|
||||||
|
FROM alpine:3.14
|
||||||
|
RUN apk add --no-cache ca-certificates
|
||||||
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
|
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
|
||||||
COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry
|
COPY --from=build /usr/local/bin/registry /bin/registry
|
||||||
VOLUME ["/var/lib/registry"]
|
VOLUME ["/var/lib/registry"]
|
||||||
EXPOSE 5000
|
EXPOSE 5000
|
||||||
ENTRYPOINT ["registry"]
|
ENTRYPOINT ["registry"]
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,7 @@ version/version.go:
|
||||||
|
|
||||||
check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck")
|
check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck")
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
gometalinter --config .gometalinter.json ./...
|
golangci-lint run
|
||||||
|
|
||||||
test: ## run tests, except integration test with test.short
|
test: ## run tests, except integration test with test.short
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ import (
|
||||||
|
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,51 @@
|
||||||
|
group "default" {
|
||||||
|
targets = ["image-local"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special target: https://github.com/docker/metadata-action#bake-definition
|
||||||
|
target "docker-metadata-action" {
|
||||||
|
tags = ["registry:local"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "binary" {
|
||||||
|
target = "binary"
|
||||||
|
output = ["./bin"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "artifact" {
|
||||||
|
target = "artifacts"
|
||||||
|
output = ["./bin"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "artifact-all" {
|
||||||
|
inherits = ["artifact"]
|
||||||
|
platforms = [
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/arm/v6",
|
||||||
|
"linux/arm/v7",
|
||||||
|
"linux/arm64",
|
||||||
|
"linux/ppc64le",
|
||||||
|
"linux/s390x"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "image" {
|
||||||
|
inherits = ["docker-metadata-action"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "image-local" {
|
||||||
|
inherits = ["image"]
|
||||||
|
output = ["type=docker"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "image-all" {
|
||||||
|
inherits = ["image"]
|
||||||
|
platforms = [
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/arm/v6",
|
||||||
|
"linux/arm/v7",
|
||||||
|
"linux/arm64",
|
||||||
|
"linux/ppc64le",
|
||||||
|
"linux/s390x"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
@ -87,7 +87,7 @@ func ManifestMediaTypes() (mediaTypes []string) {
|
||||||
// UnmarshalFunc implements manifest unmarshalling a given MediaType
|
// UnmarshalFunc implements manifest unmarshalling a given MediaType
|
||||||
type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
|
type UnmarshalFunc func([]byte) (Manifest, Descriptor, error)
|
||||||
|
|
||||||
var mappings = make(map[string]UnmarshalFunc, 0)
|
var mappings = make(map[string]UnmarshalFunc)
|
||||||
|
|
||||||
// UnmarshalManifest looks up manifest unmarshal functions based on
|
// UnmarshalManifest looks up manifest unmarshal functions based on
|
||||||
// MediaType
|
// MediaType
|
||||||
|
|
|
||||||
|
|
@ -56,6 +56,35 @@ func ParseNormalizedNamed(s string) (Named, error) {
|
||||||
return named, nil
|
return named, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseDockerRef normalizes the image reference following the docker convention. This is added
|
||||||
|
// mainly for backward compatibility.
|
||||||
|
// The reference returned can only be either tagged or digested. For reference contains both tag
|
||||||
|
// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@
|
||||||
|
// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as
|
||||||
|
// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa.
|
||||||
|
func ParseDockerRef(ref string) (Named, error) {
|
||||||
|
named, err := ParseNormalizedNamed(ref)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, ok := named.(NamedTagged); ok {
|
||||||
|
if canonical, ok := named.(Canonical); ok {
|
||||||
|
// The reference is both tagged and digested, only
|
||||||
|
// return digested.
|
||||||
|
newNamed, err := WithName(canonical.Name())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
newCanonical, err := WithDigest(newNamed, canonical.Digest())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newCanonical, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return TagNameOnly(named), nil
|
||||||
|
}
|
||||||
|
|
||||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||||
// If no valid domain is found, the default domain is used. Repository name
|
// If no valid domain is found, the default domain is used. Repository name
|
||||||
// needs to be already validated before.
|
// needs to be already validated before.
|
||||||
|
|
|
||||||
|
|
@ -205,7 +205,7 @@ func Parse(s string) (Reference, error) {
|
||||||
var repo repository
|
var repo repository
|
||||||
|
|
||||||
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
|
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
|
||||||
if nameMatch != nil && len(nameMatch) == 3 {
|
if len(nameMatch) == 3 {
|
||||||
repo.domain = nameMatch[1]
|
repo.domain = nameMatch[1]
|
||||||
repo.path = nameMatch[2]
|
repo.path = nameMatch[2]
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -207,11 +207,11 @@ func (errs Errors) MarshalJSON() ([]byte, error) {
|
||||||
for _, daErr := range errs {
|
for _, daErr := range errs {
|
||||||
var err Error
|
var err Error
|
||||||
|
|
||||||
switch daErr.(type) {
|
switch daErr := daErr.(type) {
|
||||||
case ErrorCode:
|
case ErrorCode:
|
||||||
err = daErr.(ErrorCode).WithDetail(nil)
|
err = daErr.WithDetail(nil)
|
||||||
case Error:
|
case Error:
|
||||||
err = daErr.(Error)
|
err = daErr
|
||||||
default:
|
default:
|
||||||
err = ErrorCodeUnknown.WithDetail(daErr)
|
err = ErrorCodeUnknown.WithDetail(daErr)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -117,8 +117,8 @@ func init() {
|
||||||
var t octetType
|
var t octetType
|
||||||
isCtl := c <= 31 || c == 127
|
isCtl := c <= 31 || c == 127
|
||||||
isChar := 0 <= c && c <= 127
|
isChar := 0 <= c && c <= 127
|
||||||
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
|
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
|
||||||
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
|
if strings.ContainsRune(" \t\r\n", rune(c)) {
|
||||||
t |= isSpace
|
t |= isSpace
|
||||||
}
|
}
|
||||||
if isChar && !isCtl && !isSeparator {
|
if isChar && !isCtl && !isSeparator {
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
|
||||||
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
|
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
|
||||||
github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
|
github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
|
||||||
github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
|
github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
|
||||||
github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04
|
github.com/dgrijalva/jwt-go 4bbdd8ac624fc7a9ef7aec841c43d99b5fe65a29 https://github.com/golang-jwt/jwt.git # v3.2.2
|
||||||
github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
|
github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
|
||||||
github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
|
github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
|
||||||
github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
|
github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
|
||||||
|
|
@ -48,4 +48,4 @@ gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b
|
||||||
gopkg.in/yaml.v2 v2.2.1
|
gopkg.in/yaml.v2 v2.2.1
|
||||||
rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
|
rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
|
||||||
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
||||||
github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882
|
github.com/opencontainers/image-spec 67d2d5658fe0476ab9bf414cec164077ebff3920 # v1.0.2
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,19 @@
|
||||||
|
|
||||||
package authn
|
package authn
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Authenticator is used to authenticate Docker transports.
|
||||||
|
type Authenticator interface {
|
||||||
|
// Authorization returns the value to use in an http transport's Authorization header.
|
||||||
|
Authorization() (*AuthConfig, error)
|
||||||
|
}
|
||||||
|
|
||||||
// AuthConfig contains authorization information for connecting to a Registry
|
// AuthConfig contains authorization information for connecting to a Registry
|
||||||
// Inlined what we use from github.com/docker/cli/cli/config/types
|
// Inlined what we use from github.com/docker/cli/cli/config/types
|
||||||
type AuthConfig struct {
|
type AuthConfig struct {
|
||||||
|
|
@ -29,8 +42,74 @@ type AuthConfig struct {
|
||||||
RegistryToken string `json:"registrytoken,omitempty"`
|
RegistryToken string `json:"registrytoken,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Authenticator is used to authenticate Docker transports.
|
// This is effectively a copy of the type AuthConfig. This simplifies
|
||||||
type Authenticator interface {
|
// JSON unmarshalling since AuthConfig methods are not inherited
|
||||||
// Authorization returns the value to use in an http transport's Authorization header.
|
type authConfig AuthConfig
|
||||||
Authorization() (*AuthConfig, error)
|
|
||||||
|
// UnmarshalJSON implements json.Unmarshaler
|
||||||
|
func (a *AuthConfig) UnmarshalJSON(data []byte) error {
|
||||||
|
var shadow authConfig
|
||||||
|
err := json.Unmarshal(data, &shadow)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*a = (AuthConfig)(shadow)
|
||||||
|
|
||||||
|
if len(shadow.Auth) != 0 {
|
||||||
|
var derr error
|
||||||
|
a.Username, a.Password, derr = decodeDockerConfigFieldAuth(shadow.Auth)
|
||||||
|
if derr != nil {
|
||||||
|
err = fmt.Errorf("unable to decode auth field: %w", derr)
|
||||||
|
}
|
||||||
|
} else if len(a.Username) != 0 && len(a.Password) != 0 {
|
||||||
|
a.Auth = encodeDockerConfigFieldAuth(shadow.Username, shadow.Password)
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON implements json.Marshaler
|
||||||
|
func (a AuthConfig) MarshalJSON() ([]byte, error) {
|
||||||
|
shadow := (authConfig)(a)
|
||||||
|
shadow.Auth = encodeDockerConfigFieldAuth(shadow.Username, shadow.Password)
|
||||||
|
return json.Marshal(shadow)
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeDockerConfigFieldAuth deserializes the "auth" field from dockercfg into a
|
||||||
|
// username and a password. The format of the auth field is base64(<username>:<password>).
|
||||||
|
//
|
||||||
|
// From https://github.com/kubernetes/kubernetes/blob/75e49ec824b183288e1dbaccfd7dbe77d89db381/pkg/credentialprovider/config.go
|
||||||
|
// Copyright 2014 The Kubernetes Authors.
|
||||||
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
|
func decodeDockerConfigFieldAuth(field string) (username, password string, err error) {
|
||||||
|
var decoded []byte
|
||||||
|
// StdEncoding can only decode padded string
|
||||||
|
// RawStdEncoding can only decode unpadded string
|
||||||
|
if strings.HasSuffix(strings.TrimSpace(field), "=") {
|
||||||
|
// decode padded data
|
||||||
|
decoded, err = base64.StdEncoding.DecodeString(field)
|
||||||
|
} else {
|
||||||
|
// decode unpadded data
|
||||||
|
decoded, err = base64.RawStdEncoding.DecodeString(field)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.SplitN(string(decoded), ":", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
err = fmt.Errorf("must be formatted as base64(username:password)")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
username = parts[0]
|
||||||
|
password = parts[1]
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeDockerConfigFieldAuth(username, password string) string {
|
||||||
|
return base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,7 @@ type randomIndex struct {
|
||||||
func Index(byteSize, layers, count int64) (v1.ImageIndex, error) {
|
func Index(byteSize, layers, count int64) (v1.ImageIndex, error) {
|
||||||
manifest := v1.IndexManifest{
|
manifest := v1.IndexManifest{
|
||||||
SchemaVersion: 2,
|
SchemaVersion: 2,
|
||||||
|
MediaType: types.OCIImageIndex,
|
||||||
Manifests: []v1.Descriptor{},
|
Manifests: []v1.Descriptor{},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -73,7 +74,7 @@ func Index(byteSize, layers, count int64) (v1.ImageIndex, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *randomIndex) MediaType() (types.MediaType, error) {
|
func (i *randomIndex) MediaType() (types.MediaType, error) {
|
||||||
return types.OCIImageIndex, nil
|
return i.manifest.MediaType, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *randomIndex) Digest() (v1.Hash, error) {
|
func (i *randomIndex) Digest() (v1.Hash, error) {
|
||||||
|
|
|
||||||
|
|
@ -102,7 +102,6 @@ var DefaultTransport = &http.Transport{
|
||||||
|
|
||||||
func makeOptions(target authn.Resource, opts ...Option) (*options, error) {
|
func makeOptions(target authn.Resource, opts ...Option) (*options, error) {
|
||||||
o := &options{
|
o := &options{
|
||||||
auth: authn.Anonymous,
|
|
||||||
transport: DefaultTransport,
|
transport: DefaultTransport,
|
||||||
platform: defaultPlatform,
|
platform: defaultPlatform,
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
|
|
@ -118,12 +117,19 @@ func makeOptions(target authn.Resource, opts ...Option) (*options, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if o.keychain != nil {
|
switch {
|
||||||
|
case o.auth != nil && o.keychain != nil:
|
||||||
|
// It is a better experience to explicitly tell a caller their auth is misconfigured
|
||||||
|
// than potentially fail silently when the correct auth is overridden by option misuse.
|
||||||
|
return nil, errors.New("provide an option for either authn.Authenticator or authn.Keychain, not both")
|
||||||
|
case o.keychain != nil:
|
||||||
auth, err := o.keychain.Resolve(target)
|
auth, err := o.keychain.Resolve(target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
o.auth = auth
|
o.auth = auth
|
||||||
|
case o.auth == nil:
|
||||||
|
o.auth = authn.Anonymous
|
||||||
}
|
}
|
||||||
|
|
||||||
// transport.Wrapper is a signal that consumers are opt-ing into providing their own transport without any additional wrapping.
|
// transport.Wrapper is a signal that consumers are opt-ing into providing their own transport without any additional wrapping.
|
||||||
|
|
@ -163,6 +169,7 @@ func WithTransport(t http.RoundTripper) Option {
|
||||||
|
|
||||||
// WithAuth is a functional option for overriding the default authenticator
|
// WithAuth is a functional option for overriding the default authenticator
|
||||||
// for remote operations.
|
// for remote operations.
|
||||||
|
// It is an error to use both WithAuth and WithAuthFromKeychain in the same Option set.
|
||||||
//
|
//
|
||||||
// The default authenticator is authn.Anonymous.
|
// The default authenticator is authn.Anonymous.
|
||||||
func WithAuth(auth authn.Authenticator) Option {
|
func WithAuth(auth authn.Authenticator) Option {
|
||||||
|
|
@ -175,6 +182,7 @@ func WithAuth(auth authn.Authenticator) Option {
|
||||||
// WithAuthFromKeychain is a functional option for overriding the default
|
// WithAuthFromKeychain is a functional option for overriding the default
|
||||||
// authenticator for remote operations, using an authn.Keychain to find
|
// authenticator for remote operations, using an authn.Keychain to find
|
||||||
// credentials.
|
// credentials.
|
||||||
|
// It is an error to use both WithAuth and WithAuthFromKeychain in the same Option set.
|
||||||
//
|
//
|
||||||
// The default authenticator is authn.Anonymous.
|
// The default authenticator is authn.Anonymous.
|
||||||
func WithAuthFromKeychain(keys authn.Keychain) Option {
|
func WithAuthFromKeychain(keys authn.Keychain) Option {
|
||||||
|
|
|
||||||
22
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
22
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
|
|
@ -87,26 +87,24 @@ func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) {
|
||||||
|
|
||||||
// If we hit a WWW-Authenticate challenge, it might be due to expired tokens or insufficient scope.
|
// If we hit a WWW-Authenticate challenge, it might be due to expired tokens or insufficient scope.
|
||||||
if challenges := authchallenge.ResponseChallenges(res); len(challenges) != 0 {
|
if challenges := authchallenge.ResponseChallenges(res); len(challenges) != 0 {
|
||||||
|
newScopes := []string{}
|
||||||
for _, wac := range challenges {
|
for _, wac := range challenges {
|
||||||
// TODO(jonjohnsonjr): Should we also update "realm" or "service"?
|
// TODO(jonjohnsonjr): Should we also update "realm" or "service"?
|
||||||
if scope, ok := wac.Parameters["scope"]; ok {
|
if want, ok := wac.Parameters["scope"]; ok {
|
||||||
// From https://tools.ietf.org/html/rfc6750#section-3
|
|
||||||
// The "scope" attribute is defined in Section 3.3 of [RFC6749]. The
|
|
||||||
// "scope" attribute is a space-delimited list of case-sensitive scope
|
|
||||||
// values indicating the required scope of the access token for
|
|
||||||
// accessing the requested resource.
|
|
||||||
scopes := strings.Split(scope, " ")
|
|
||||||
|
|
||||||
// Add any scopes that we don't already request.
|
// Add any scopes that we don't already request.
|
||||||
got := stringSet(bt.scopes)
|
got := stringSet(bt.scopes)
|
||||||
for _, want := range scopes {
|
|
||||||
if _, ok := got[want]; !ok {
|
if _, ok := got[want]; !ok {
|
||||||
bt.scopes = append(bt.scopes, want)
|
newScopes = append(newScopes, want)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Some registries seem to only look at the first scope parameter during a token exchange.
|
||||||
|
// If a request fails because it's missing a scope, we should put those at the beginning,
|
||||||
|
// otherwise the registry might just ignore it :/
|
||||||
|
newScopes = append(newScopes, bt.scopes...)
|
||||||
|
bt.scopes = newScopes
|
||||||
|
|
||||||
// TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge.
|
// TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge.
|
||||||
|
|
||||||
// Retry the request to attempt to get a valid token.
|
// Retry the request to attempt to get a valid token.
|
||||||
|
|
@ -235,7 +233,9 @@ func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) {
|
||||||
|
|
||||||
v := url.Values{}
|
v := url.Values{}
|
||||||
v.Set("scope", strings.Join(bt.scopes, " "))
|
v.Set("scope", strings.Join(bt.scopes, " "))
|
||||||
|
if bt.service != "" {
|
||||||
v.Set("service", bt.service)
|
v.Set("service", bt.service)
|
||||||
|
}
|
||||||
v.Set("client_id", defaultUserAgent)
|
v.Set("client_id", defaultUserAgent)
|
||||||
if auth.IdentityToken != "" {
|
if auth.IdentityToken != "" {
|
||||||
v.Set("grant_type", "refresh_token")
|
v.Set("grant_type", "refresh_token")
|
||||||
|
|
|
||||||
7
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go
generated
vendored
7
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go
generated
vendored
|
|
@ -76,12 +76,7 @@ func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authentic
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters)
|
return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters)
|
||||||
}
|
}
|
||||||
service, ok := pr.parameters["service"]
|
service := pr.parameters["service"]
|
||||||
if !ok {
|
|
||||||
// If the service parameter is not specified, then default it to the registry
|
|
||||||
// with which we are talking.
|
|
||||||
service = reg.String()
|
|
||||||
}
|
|
||||||
bt := &bearerTransport{
|
bt := &bearerTransport{
|
||||||
inner: t,
|
inner: t,
|
||||||
basic: auth,
|
basic: auth,
|
||||||
|
|
|
||||||
|
|
@ -410,6 +410,7 @@ func (w *writer) incrProgress(written int64) {
|
||||||
|
|
||||||
// uploadOne performs a complete upload of a single layer.
|
// uploadOne performs a complete upload of a single layer.
|
||||||
func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error {
|
func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error {
|
||||||
|
tryUpload := func() error {
|
||||||
var from, mount string
|
var from, mount string
|
||||||
if h, err := l.Digest(); err == nil {
|
if h, err := l.Digest(); err == nil {
|
||||||
// If we know the digest, this isn't a streaming layer. Do an existence
|
// If we know the digest, this isn't a streaming layer. Do an existence
|
||||||
|
|
@ -436,7 +437,6 @@ func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tryUpload := func() error {
|
|
||||||
location, mounted, err := w.initiateUpload(from, mount)
|
location, mounted, err := w.initiateUpload(from, mount)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
||||||
|
|
@ -17,6 +17,19 @@ This package provides various compression algorithms.
|
||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
|
* Feb 17, 2022 (v1.14.3)
|
||||||
|
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
|
||||||
|
* flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
|
||||||
|
* s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486)
|
||||||
|
|
||||||
|
* Jan 25, 2022 (v1.14.2)
|
||||||
|
* zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
|
||||||
|
* zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
|
||||||
|
* zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
|
||||||
|
* zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
|
||||||
|
* flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
|
||||||
|
* zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
|
||||||
|
|
||||||
* Jan 11, 2022 (v1.14.1)
|
* Jan 11, 2022 (v1.14.1)
|
||||||
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
|
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
|
||||||
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
|
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
|
||||||
|
|
@ -53,6 +66,9 @@ This package provides various compression algorithms.
|
||||||
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
|
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
|
||||||
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
|
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>See changes to v1.12.x</summary>
|
||||||
|
|
||||||
* May 25, 2021 (v1.12.3)
|
* May 25, 2021 (v1.12.3)
|
||||||
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
|
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
|
||||||
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
|
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
|
||||||
|
|
@ -74,9 +90,10 @@ This package provides various compression algorithms.
|
||||||
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
|
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
|
||||||
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
|
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
|
||||||
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
|
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
|
||||||
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>See changes prior to v1.12.1</summary>
|
<summary>See changes to v1.11.x</summary>
|
||||||
|
|
||||||
* Mar 26, 2021 (v1.11.13)
|
* Mar 26, 2021 (v1.11.13)
|
||||||
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
|
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
|
||||||
|
|
@ -135,7 +152,7 @@ This package provides various compression algorithms.
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>See changes prior to v1.11.0</summary>
|
<summary>See changes to v1.10.x</summary>
|
||||||
|
|
||||||
* July 8, 2020 (v1.10.11)
|
* July 8, 2020 (v1.10.11)
|
||||||
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
|
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
|
||||||
|
|
@ -297,11 +314,6 @@ This package provides various compression algorithms.
|
||||||
|
|
||||||
# deflate usage
|
# deflate usage
|
||||||
|
|
||||||
* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
|
|
||||||
* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
|
|
||||||
* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
|
|
||||||
* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
|
|
||||||
|
|
||||||
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
|
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
|
||||||
|
|
||||||
| old import | new import | Documentation
|
| old import | new import | Documentation
|
||||||
|
|
@ -323,6 +335,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range.
|
||||||
If you expect to have a lot of concurrently allocated Writers consider using
|
If you expect to have a lot of concurrently allocated Writers consider using
|
||||||
the stateless compress described below.
|
the stateless compress described below.
|
||||||
|
|
||||||
|
For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
|
||||||
|
|
||||||
# Stateless compression
|
# Stateless compression
|
||||||
|
|
||||||
This package offers stateless compression as a special option for gzip/deflate.
|
This package offers stateless compression as a special option for gzip/deflate.
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/klauspost/compress/fse"
|
"github.com/klauspost/compress/fse"
|
||||||
)
|
)
|
||||||
|
|
@ -216,6 +217,7 @@ func (s *Scratch) Decoder() *Decoder {
|
||||||
return &Decoder{
|
return &Decoder{
|
||||||
dt: s.dt,
|
dt: s.dt,
|
||||||
actualTableLog: s.actualTableLog,
|
actualTableLog: s.actualTableLog,
|
||||||
|
bufs: &s.decPool,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -223,6 +225,15 @@ func (s *Scratch) Decoder() *Decoder {
|
||||||
type Decoder struct {
|
type Decoder struct {
|
||||||
dt dTable
|
dt dTable
|
||||||
actualTableLog uint8
|
actualTableLog uint8
|
||||||
|
bufs *sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) buffer() *[4][256]byte {
|
||||||
|
buf, ok := d.bufs.Get().(*[4][256]byte)
|
||||||
|
if ok {
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
return &[4][256]byte{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decompress1X will decompress a 1X encoded stream.
|
// Decompress1X will decompress a 1X encoded stream.
|
||||||
|
|
@ -249,7 +260,8 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||||
dt := d.dt.single[:tlSize]
|
dt := d.dt.single[:tlSize]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
var buf [256]byte
|
bufs := d.buffer()
|
||||||
|
buf := &bufs[0]
|
||||||
var off uint8
|
var off uint8
|
||||||
|
|
||||||
for br.off >= 8 {
|
for br.off >= 8 {
|
||||||
|
|
@ -277,6 +289,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
br.close()
|
br.close()
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
dst = append(dst, buf[:]...)
|
dst = append(dst, buf[:]...)
|
||||||
|
|
@ -284,6 +297,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dst)+int(off) > maxDecodedSize {
|
if len(dst)+int(off) > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
@ -310,6 +324,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(dst) >= maxDecodedSize {
|
if len(dst) >= maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
@ -319,6 +334,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||||
bitsLeft -= nBits
|
bitsLeft -= nBits
|
||||||
dst = append(dst, uint8(v.entry>>8))
|
dst = append(dst, uint8(v.entry>>8))
|
||||||
}
|
}
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return dst, br.close()
|
return dst, br.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -341,7 +357,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
dt := d.dt.single[:256]
|
dt := d.dt.single[:256]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
var buf [256]byte
|
bufs := d.buffer()
|
||||||
|
buf := &bufs[0]
|
||||||
var off uint8
|
var off uint8
|
||||||
|
|
||||||
switch d.actualTableLog {
|
switch d.actualTableLog {
|
||||||
|
|
@ -369,6 +386,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
br.close()
|
br.close()
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
dst = append(dst, buf[:]...)
|
dst = append(dst, buf[:]...)
|
||||||
|
|
@ -398,6 +416,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
br.close()
|
br.close()
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
dst = append(dst, buf[:]...)
|
dst = append(dst, buf[:]...)
|
||||||
|
|
@ -426,6 +445,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
@ -455,6 +475,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
@ -484,6 +505,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
@ -513,6 +535,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
@ -542,6 +565,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
@ -571,6 +595,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
@ -578,10 +603,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
|
return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dst)+int(off) > maxDecodedSize {
|
if len(dst)+int(off) > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
@ -601,6 +628,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
if len(dst) >= maxDecodedSize {
|
if len(dst) >= maxDecodedSize {
|
||||||
br.close()
|
br.close()
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
v := dt[br.peekByteFast()>>shift]
|
v := dt[br.peekByteFast()>>shift]
|
||||||
|
|
@ -609,6 +637,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||||
bitsLeft -= int8(nBits)
|
bitsLeft -= int8(nBits)
|
||||||
dst = append(dst, uint8(v.entry>>8))
|
dst = append(dst, uint8(v.entry>>8))
|
||||||
}
|
}
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return dst, br.close()
|
return dst, br.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -628,7 +657,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||||
dt := d.dt.single[:256]
|
dt := d.dt.single[:256]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
var buf [256]byte
|
bufs := d.buffer()
|
||||||
|
buf := &bufs[0]
|
||||||
var off uint8
|
var off uint8
|
||||||
|
|
||||||
const shift = 56
|
const shift = 56
|
||||||
|
|
@ -655,6 +685,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||||
off += 4
|
off += 4
|
||||||
if off == 0 {
|
if off == 0 {
|
||||||
if len(dst)+256 > maxDecodedSize {
|
if len(dst)+256 > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
@ -663,6 +694,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dst)+int(off) > maxDecodedSize {
|
if len(dst)+int(off) > maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
@ -679,6 +711,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(dst) >= maxDecodedSize {
|
if len(dst) >= maxDecodedSize {
|
||||||
|
d.bufs.Put(bufs)
|
||||||
br.close()
|
br.close()
|
||||||
return nil, ErrMaxDecodedSizeExceeded
|
return nil, ErrMaxDecodedSizeExceeded
|
||||||
}
|
}
|
||||||
|
|
@ -688,6 +721,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||||
bitsLeft -= int8(nBits)
|
bitsLeft -= int8(nBits)
|
||||||
dst = append(dst, uint8(v.entry>>8))
|
dst = append(dst, uint8(v.entry>>8))
|
||||||
}
|
}
|
||||||
|
d.bufs.Put(bufs)
|
||||||
return dst, br.close()
|
return dst, br.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -735,12 +769,12 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
single := d.dt.single[:tlSize]
|
single := d.dt.single[:tlSize]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
var buf [256]byte
|
buf := d.buffer()
|
||||||
var off uint8
|
var off uint8
|
||||||
var decoded int
|
var decoded int
|
||||||
|
|
||||||
// Decode 2 values from each decoder/loop.
|
// Decode 2 values from each decoder/loop.
|
||||||
const bufoff = 256 / 4
|
const bufoff = 256
|
||||||
for {
|
for {
|
||||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||||
break
|
break
|
||||||
|
|
@ -758,8 +792,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
v2 := single[val2&tlMask]
|
v2 := single[val2&tlMask]
|
||||||
br[stream].advance(uint8(v.entry))
|
br[stream].advance(uint8(v.entry))
|
||||||
br[stream2].advance(uint8(v2.entry))
|
br[stream2].advance(uint8(v2.entry))
|
||||||
buf[off+bufoff*stream] = uint8(v.entry >> 8)
|
buf[stream][off] = uint8(v.entry >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
|
buf[stream2][off] = uint8(v2.entry >> 8)
|
||||||
|
|
||||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||||
|
|
@ -767,8 +801,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
v2 = single[val2&tlMask]
|
v2 = single[val2&tlMask]
|
||||||
br[stream].advance(uint8(v.entry))
|
br[stream].advance(uint8(v.entry))
|
||||||
br[stream2].advance(uint8(v2.entry))
|
br[stream2].advance(uint8(v2.entry))
|
||||||
buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
|
buf[stream][off+1] = uint8(v.entry >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
|
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
@ -783,8 +817,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
v2 := single[val2&tlMask]
|
v2 := single[val2&tlMask]
|
||||||
br[stream].advance(uint8(v.entry))
|
br[stream].advance(uint8(v.entry))
|
||||||
br[stream2].advance(uint8(v2.entry))
|
br[stream2].advance(uint8(v2.entry))
|
||||||
buf[off+bufoff*stream] = uint8(v.entry >> 8)
|
buf[stream][off] = uint8(v.entry >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
|
buf[stream2][off] = uint8(v2.entry >> 8)
|
||||||
|
|
||||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||||
|
|
@ -792,25 +826,26 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
v2 = single[val2&tlMask]
|
v2 = single[val2&tlMask]
|
||||||
br[stream].advance(uint8(v.entry))
|
br[stream].advance(uint8(v.entry))
|
||||||
br[stream2].advance(uint8(v2.entry))
|
br[stream2].advance(uint8(v2.entry))
|
||||||
buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
|
buf[stream][off+1] = uint8(v.entry >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
|
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
off += 2
|
off += 2
|
||||||
|
|
||||||
if off == bufoff {
|
if off == 0 {
|
||||||
if bufoff > dstEvery {
|
if bufoff > dstEvery {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 1")
|
return nil, errors.New("corruption detected: stream overrun 1")
|
||||||
}
|
}
|
||||||
copy(out, buf[:bufoff])
|
copy(out, buf[0][:])
|
||||||
copy(out[dstEvery:], buf[bufoff:bufoff*2])
|
copy(out[dstEvery:], buf[1][:])
|
||||||
copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
|
copy(out[dstEvery*2:], buf[2][:])
|
||||||
copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
|
copy(out[dstEvery*3:], buf[3][:])
|
||||||
off = 0
|
|
||||||
out = out[bufoff:]
|
out = out[bufoff:]
|
||||||
decoded += 256
|
decoded += bufoff * 4
|
||||||
// There must at least be 3 buffers left.
|
// There must at least be 3 buffers left.
|
||||||
if len(out) < dstEvery*3 {
|
if len(out) < dstEvery*3 {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 2")
|
return nil, errors.New("corruption detected: stream overrun 2")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -818,12 +853,13 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
if off > 0 {
|
if off > 0 {
|
||||||
ioff := int(off)
|
ioff := int(off)
|
||||||
if len(out) < dstEvery*3+ioff {
|
if len(out) < dstEvery*3+ioff {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 3")
|
return nil, errors.New("corruption detected: stream overrun 3")
|
||||||
}
|
}
|
||||||
copy(out, buf[:off])
|
copy(out, buf[0][:off])
|
||||||
copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
|
copy(out[dstEvery:], buf[1][:off])
|
||||||
copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
|
copy(out[dstEvery*2:], buf[2][:off])
|
||||||
copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
|
copy(out[dstEvery*3:], buf[3][:off])
|
||||||
decoded += int(off) * 4
|
decoded += int(off) * 4
|
||||||
out = out[off:]
|
out = out[off:]
|
||||||
}
|
}
|
||||||
|
|
@ -853,6 +889,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
// end inline...
|
// end inline...
|
||||||
if offset >= len(out) {
|
if offset >= len(out) {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 4")
|
return nil, errors.New("corruption detected: stream overrun 4")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -871,6 +908,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
d.bufs.Put(buf)
|
||||||
if dstSize != decoded {
|
if dstSize != decoded {
|
||||||
return nil, errors.New("corruption detected: short output block")
|
return nil, errors.New("corruption detected: short output block")
|
||||||
}
|
}
|
||||||
|
|
@ -916,12 +954,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
single := d.dt.single[:tlSize]
|
single := d.dt.single[:tlSize]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
var buf [256]byte
|
buf := d.buffer()
|
||||||
var off uint8
|
var off uint8
|
||||||
var decoded int
|
var decoded int
|
||||||
|
|
||||||
// Decode 4 values from each decoder/loop.
|
// Decode 4 values from each decoder/loop.
|
||||||
const bufoff = 256 / 4
|
const bufoff = 256
|
||||||
for {
|
for {
|
||||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||||
break
|
break
|
||||||
|
|
@ -942,8 +980,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
buf[stream][off] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
buf[stream2][off] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br1.value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br2.value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
|
@ -951,8 +989,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
buf[stream][off+1] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
buf[stream2][off+1] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br1.value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br2.value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
|
@ -960,8 +998,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
buf[stream][off+2] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
buf[stream2][off+2] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br1.value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br2.value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
|
@ -969,8 +1007,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
buf[stream][off+3] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
buf[stream2][off+3] = uint8(v2 >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
@ -987,8 +1025,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
buf[stream][off] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
buf[stream2][off] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br1.value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br2.value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
|
@ -996,8 +1034,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
buf[stream][off+1] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
buf[stream2][off+1] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br1.value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br2.value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
|
@ -1005,8 +1043,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
buf[stream][off+2] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
buf[stream2][off+2] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br1.value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br2.value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
|
@ -1014,25 +1052,26 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
br1.value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br2.bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br2.value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
buf[stream][off+3] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
buf[stream2][off+3] = uint8(v2 >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
off += 4
|
off += 4
|
||||||
|
|
||||||
if off == bufoff {
|
if off == 0 {
|
||||||
if bufoff > dstEvery {
|
if bufoff > dstEvery {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 1")
|
return nil, errors.New("corruption detected: stream overrun 1")
|
||||||
}
|
}
|
||||||
copy(out, buf[:bufoff])
|
copy(out, buf[0][:])
|
||||||
copy(out[dstEvery:], buf[bufoff:bufoff*2])
|
copy(out[dstEvery:], buf[1][:])
|
||||||
copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
|
copy(out[dstEvery*2:], buf[2][:])
|
||||||
copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
|
copy(out[dstEvery*3:], buf[3][:])
|
||||||
off = 0
|
|
||||||
out = out[bufoff:]
|
out = out[bufoff:]
|
||||||
decoded += 256
|
decoded += bufoff * 4
|
||||||
// There must at least be 3 buffers left.
|
// There must at least be 3 buffers left.
|
||||||
if len(out) < dstEvery*3 {
|
if len(out) < dstEvery*3 {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 2")
|
return nil, errors.New("corruption detected: stream overrun 2")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1040,12 +1079,13 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
if off > 0 {
|
if off > 0 {
|
||||||
ioff := int(off)
|
ioff := int(off)
|
||||||
if len(out) < dstEvery*3+ioff {
|
if len(out) < dstEvery*3+ioff {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 3")
|
return nil, errors.New("corruption detected: stream overrun 3")
|
||||||
}
|
}
|
||||||
copy(out, buf[:off])
|
copy(out, buf[0][:off])
|
||||||
copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
|
copy(out[dstEvery:], buf[1][:off])
|
||||||
copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
|
copy(out[dstEvery*2:], buf[2][:off])
|
||||||
copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
|
copy(out[dstEvery*3:], buf[3][:off])
|
||||||
decoded += int(off) * 4
|
decoded += int(off) * 4
|
||||||
out = out[off:]
|
out = out[off:]
|
||||||
}
|
}
|
||||||
|
|
@ -1057,6 +1097,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
bitsLeft := int(br.off*8) + int(64-br.bitsRead)
|
bitsLeft := int(br.off*8) + int(64-br.bitsRead)
|
||||||
for bitsLeft > 0 {
|
for bitsLeft > 0 {
|
||||||
if br.finished() {
|
if br.finished() {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, io.ErrUnexpectedEOF
|
return nil, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if br.bitsRead >= 56 {
|
if br.bitsRead >= 56 {
|
||||||
|
|
@ -1077,6 +1118,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
// end inline...
|
// end inline...
|
||||||
if offset >= len(out) {
|
if offset >= len(out) {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 4")
|
return nil, errors.New("corruption detected: stream overrun 4")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1091,9 +1133,11 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
decoded += offset - dstEvery*i
|
decoded += offset - dstEvery*i
|
||||||
err = br.close()
|
err = br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
d.bufs.Put(buf)
|
||||||
if dstSize != decoded {
|
if dstSize != decoded {
|
||||||
return nil, errors.New("corruption detected: short output block")
|
return nil, errors.New("corruption detected: short output block")
|
||||||
}
|
}
|
||||||
|
|
@ -1135,12 +1179,12 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
single := d.dt.single[:tlSize]
|
single := d.dt.single[:tlSize]
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
var buf [256]byte
|
buf := d.buffer()
|
||||||
var off uint8
|
var off uint8
|
||||||
var decoded int
|
var decoded int
|
||||||
|
|
||||||
// Decode 4 values from each decoder/loop.
|
// Decode 4 values from each decoder/loop.
|
||||||
const bufoff = 256 / 4
|
const bufoff = 256
|
||||||
for {
|
for {
|
||||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||||
break
|
break
|
||||||
|
|
@ -1150,104 +1194,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
// Interleave 2 decodes.
|
// Interleave 2 decodes.
|
||||||
const stream = 0
|
const stream = 0
|
||||||
const stream2 = 1
|
const stream2 = 1
|
||||||
br[stream].fillFast()
|
br1 := &br[stream]
|
||||||
br[stream2].fillFast()
|
br2 := &br[stream2]
|
||||||
|
br1.fillFast()
|
||||||
|
br2.fillFast()
|
||||||
|
|
||||||
v := single[uint8(br[stream].value>>shift)].entry
|
v := single[uint8(br1.value>>shift)].entry
|
||||||
v2 := single[uint8(br[stream2].value>>shift)].entry
|
v2 := single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
buf[stream][off] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
buf[stream2][off] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br[stream].value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
buf[stream][off+1] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
buf[stream2][off+1] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br[stream].value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
buf[stream][off+2] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
buf[stream2][off+2] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br[stream].value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
buf[stream][off+3] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
buf[stream2][off+3] = uint8(v2 >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
const stream = 2
|
const stream = 2
|
||||||
const stream2 = 3
|
const stream2 = 3
|
||||||
br[stream].fillFast()
|
br1 := &br[stream]
|
||||||
br[stream2].fillFast()
|
br2 := &br[stream2]
|
||||||
|
br1.fillFast()
|
||||||
|
br2.fillFast()
|
||||||
|
|
||||||
v := single[uint8(br[stream].value>>shift)].entry
|
v := single[uint8(br1.value>>shift)].entry
|
||||||
v2 := single[uint8(br[stream2].value>>shift)].entry
|
v2 := single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
buf[stream][off] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
buf[stream2][off] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br[stream].value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
buf[stream][off+1] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
buf[stream2][off+1] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br[stream].value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
buf[stream][off+2] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
buf[stream2][off+2] = uint8(v2 >> 8)
|
||||||
|
|
||||||
v = single[uint8(br[stream].value>>shift)].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].bitsRead += uint8(v)
|
br1.bitsRead += uint8(v)
|
||||||
br[stream].value <<= v & 63
|
br1.value <<= v & 63
|
||||||
br[stream2].bitsRead += uint8(v2)
|
br2.bitsRead += uint8(v2)
|
||||||
br[stream2].value <<= v2 & 63
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
buf[stream][off+3] = uint8(v >> 8)
|
||||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
buf[stream2][off+3] = uint8(v2 >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
off += 4
|
off += 4
|
||||||
|
|
||||||
if off == bufoff {
|
if off == 0 {
|
||||||
if bufoff > dstEvery {
|
if bufoff > dstEvery {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 1")
|
return nil, errors.New("corruption detected: stream overrun 1")
|
||||||
}
|
}
|
||||||
copy(out, buf[:bufoff])
|
copy(out, buf[0][:])
|
||||||
copy(out[dstEvery:], buf[bufoff:bufoff*2])
|
copy(out[dstEvery:], buf[1][:])
|
||||||
copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
|
copy(out[dstEvery*2:], buf[2][:])
|
||||||
copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
|
copy(out[dstEvery*3:], buf[3][:])
|
||||||
off = 0
|
|
||||||
out = out[bufoff:]
|
out = out[bufoff:]
|
||||||
decoded += 256
|
decoded += bufoff * 4
|
||||||
// There must at least be 3 buffers left.
|
// There must at least be 3 buffers left.
|
||||||
if len(out) < dstEvery*3 {
|
if len(out) < dstEvery*3 {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 2")
|
return nil, errors.New("corruption detected: stream overrun 2")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1257,10 +1306,10 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
if len(out) < dstEvery*3+ioff {
|
if len(out) < dstEvery*3+ioff {
|
||||||
return nil, errors.New("corruption detected: stream overrun 3")
|
return nil, errors.New("corruption detected: stream overrun 3")
|
||||||
}
|
}
|
||||||
copy(out, buf[:off])
|
copy(out, buf[0][:off])
|
||||||
copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
|
copy(out[dstEvery:], buf[1][:off])
|
||||||
copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
|
copy(out[dstEvery*2:], buf[2][:off])
|
||||||
copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
|
copy(out[dstEvery*3:], buf[3][:off])
|
||||||
decoded += int(off) * 4
|
decoded += int(off) * 4
|
||||||
out = out[off:]
|
out = out[off:]
|
||||||
}
|
}
|
||||||
|
|
@ -1272,6 +1321,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
bitsLeft := int(br.off*8) + int(64-br.bitsRead)
|
bitsLeft := int(br.off*8) + int(64-br.bitsRead)
|
||||||
for bitsLeft > 0 {
|
for bitsLeft > 0 {
|
||||||
if br.finished() {
|
if br.finished() {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, io.ErrUnexpectedEOF
|
return nil, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
if br.bitsRead >= 56 {
|
if br.bitsRead >= 56 {
|
||||||
|
|
@ -1292,6 +1342,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
// end inline...
|
// end inline...
|
||||||
if offset >= len(out) {
|
if offset >= len(out) {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 4")
|
return nil, errors.New("corruption detected: stream overrun 4")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1306,9 +1357,11 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
decoded += offset - dstEvery*i
|
decoded += offset - dstEvery*i
|
||||||
err = br.close()
|
err = br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
d.bufs.Put(buf)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
d.bufs.Put(buf)
|
||||||
if dstSize != decoded {
|
if dstSize != decoded {
|
||||||
return nil, errors.New("corruption detected: short output block")
|
return nil, errors.New("corruption detected: short output block")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/klauspost/compress/fse"
|
"github.com/klauspost/compress/fse"
|
||||||
)
|
)
|
||||||
|
|
@ -116,6 +117,7 @@ type Scratch struct {
|
||||||
nodes []nodeElt
|
nodes []nodeElt
|
||||||
tmpOut [4][]byte
|
tmpOut [4][]byte
|
||||||
fse *fse.Scratch
|
fse *fse.Scratch
|
||||||
|
decPool sync.Pool // *[4][256]byte buffers.
|
||||||
huffWeight [maxSymbolValue + 1]byte
|
huffWeight [maxSymbolValue + 1]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||||
// TEMPLATE
|
// TEMPLATE
|
||||||
const hashLog = tableBits
|
const hashLog = tableBits
|
||||||
// seems global, but would be nice to tweak.
|
// seems global, but would be nice to tweak.
|
||||||
const kSearchStrength = 7
|
const kSearchStrength = 6
|
||||||
|
|
||||||
// nextEmit is where in src the next emitLiteral should start from.
|
// nextEmit is where in src the next emitLiteral should start from.
|
||||||
nextEmit := s
|
nextEmit := s
|
||||||
|
|
@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
||||||
// TEMPLATE
|
// TEMPLATE
|
||||||
const hashLog = tableBits
|
const hashLog = tableBits
|
||||||
// seems global, but would be nice to tweak.
|
// seems global, but would be nice to tweak.
|
||||||
const kSearchStrength = 8
|
const kSearchStrength = 6
|
||||||
|
|
||||||
// nextEmit is where in src the next emitLiteral should start from.
|
// nextEmit is where in src the next emitLiteral should start from.
|
||||||
nextEmit := s
|
nextEmit := s
|
||||||
|
|
|
||||||
|
|
@ -55,8 +55,8 @@ github.com/Azure/go-autorest/logger
|
||||||
# github.com/Azure/go-autorest/tracing v0.6.0
|
# github.com/Azure/go-autorest/tracing v0.6.0
|
||||||
## explicit; go 1.12
|
## explicit; go 1.12
|
||||||
github.com/Azure/go-autorest/tracing
|
github.com/Azure/go-autorest/tracing
|
||||||
# github.com/Microsoft/go-winio v0.5.1
|
# github.com/Microsoft/go-winio v0.5.2
|
||||||
## explicit; go 1.12
|
## explicit; go 1.13
|
||||||
github.com/Microsoft/go-winio
|
github.com/Microsoft/go-winio
|
||||||
github.com/Microsoft/go-winio/pkg/guid
|
github.com/Microsoft/go-winio/pkg/guid
|
||||||
github.com/Microsoft/go-winio/pkg/security
|
github.com/Microsoft/go-winio/pkg/security
|
||||||
|
|
@ -294,7 +294,7 @@ github.com/containerd/continuity/sysx
|
||||||
# github.com/containerd/fifo v1.0.0
|
# github.com/containerd/fifo v1.0.0
|
||||||
## explicit; go 1.13
|
## explicit; go 1.13
|
||||||
github.com/containerd/fifo
|
github.com/containerd/fifo
|
||||||
# github.com/containerd/stargz-snapshotter/estargz v0.10.1
|
# github.com/containerd/stargz-snapshotter/estargz v0.11.1
|
||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
github.com/containerd/stargz-snapshotter/estargz
|
github.com/containerd/stargz-snapshotter/estargz
|
||||||
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
||||||
|
|
@ -319,7 +319,7 @@ github.com/docker/cli/cli/config
|
||||||
github.com/docker/cli/cli/config/configfile
|
github.com/docker/cli/cli/config/configfile
|
||||||
github.com/docker/cli/cli/config/credentials
|
github.com/docker/cli/cli/config/credentials
|
||||||
github.com/docker/cli/cli/config/types
|
github.com/docker/cli/cli/config/types
|
||||||
# github.com/docker/distribution v2.7.1+incompatible
|
# github.com/docker/distribution v2.8.0+incompatible
|
||||||
## explicit
|
## explicit
|
||||||
github.com/docker/distribution
|
github.com/docker/distribution
|
||||||
github.com/docker/distribution/digestset
|
github.com/docker/distribution/digestset
|
||||||
|
|
@ -539,8 +539,8 @@ github.com/google/go-cmp/cmp/internal/diff
|
||||||
github.com/google/go-cmp/cmp/internal/flags
|
github.com/google/go-cmp/cmp/internal/flags
|
||||||
github.com/google/go-cmp/cmp/internal/function
|
github.com/google/go-cmp/cmp/internal/function
|
||||||
github.com/google/go-cmp/cmp/internal/value
|
github.com/google/go-cmp/cmp/internal/value
|
||||||
# github.com/google/go-containerregistry v0.8.1-0.20220214202839-625fe7b4276a
|
# github.com/google/go-containerregistry v0.8.1-0.20220507185902-82405e5dfa82
|
||||||
## explicit; go 1.14
|
## explicit; go 1.17
|
||||||
github.com/google/go-containerregistry/internal/and
|
github.com/google/go-containerregistry/internal/and
|
||||||
github.com/google/go-containerregistry/internal/estargz
|
github.com/google/go-containerregistry/internal/estargz
|
||||||
github.com/google/go-containerregistry/internal/gzip
|
github.com/google/go-containerregistry/internal/gzip
|
||||||
|
|
@ -613,7 +613,7 @@ github.com/karrick/godirwalk
|
||||||
# github.com/kevinburke/ssh_config v1.1.0
|
# github.com/kevinburke/ssh_config v1.1.0
|
||||||
## explicit
|
## explicit
|
||||||
github.com/kevinburke/ssh_config
|
github.com/kevinburke/ssh_config
|
||||||
# github.com/klauspost/compress v1.14.2
|
# github.com/klauspost/compress v1.14.4
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/klauspost/compress
|
github.com/klauspost/compress
|
||||||
github.com/klauspost/compress/fse
|
github.com/klauspost/compress/fse
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue