chore(deps): bump golang.org/x/net from 0.20.0 to 0.21.0 (#2999)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.20.0 to 0.21.0. - [Commits](https://github.com/golang/net/compare/v0.20.0...v0.21.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
87ef541189
commit
e67299c320
9
go.mod
9
go.mod
|
|
@ -34,12 +34,12 @@ require (
|
|||
github.com/spf13/afero v1.11.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
golang.org/x/net v0.20.0
|
||||
golang.org/x/net v0.21.0
|
||||
golang.org/x/oauth2 v0.16.0
|
||||
golang.org/x/sync v0.6.0
|
||||
)
|
||||
|
||||
require github.com/containerd/containerd v1.7.12
|
||||
require github.com/containerd/containerd v1.7.6
|
||||
|
||||
require (
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
|
|
@ -137,7 +137,7 @@ require (
|
|||
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.uber.org/goleak v1.2.1 // indirect
|
||||
golang.org/x/crypto v0.18.0 // indirect
|
||||
golang.org/x/crypto v0.19.0 // indirect
|
||||
golang.org/x/sys v0.17.0
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
|
|
@ -156,7 +156,7 @@ require (
|
|||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.4 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.5 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
|
|
@ -178,6 +178,7 @@ require (
|
|||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20230315203717-e28e8ba9bc83 // indirect
|
||||
github.com/moby/sys/user v0.1.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.7 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.1 // indirect
|
||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
|
|
|
|||
31
go.sum
31
go.sum
|
|
@ -58,8 +58,8 @@ github.com/GoogleCloudPlatform/docker-credential-gcr v1.5.1-0.20230328182921-62a
|
|||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7fz8=
|
||||
github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w=
|
||||
github.com/Microsoft/hcsshim v0.11.0 h1:7EFNIY4igHEXUdj1zXgAyU3fLc7QfOKHbkldRVTBdiM=
|
||||
github.com/Microsoft/hcsshim v0.11.0/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM=
|
||||
github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
|
||||
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||
|
|
@ -117,7 +117,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
|
|||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
|
|
@ -140,8 +140,8 @@ github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaD
|
|||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
|
||||
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
|
||||
github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0=
|
||||
github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk=
|
||||
github.com/containerd/containerd v1.7.6 h1:oNAVsnhPoy4BTPQivLgTzI9Oleml9l/+eYIDYXRCYo8=
|
||||
github.com/containerd/containerd v1.7.6/go.mod h1:SY6lrkkuJT40BVNO37tlYTSnKJnP5AXBc0fhx0q+TJ4=
|
||||
github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM=
|
||||
github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
|
||||
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
|
||||
|
|
@ -286,7 +286,7 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
|
|||
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas=
|
||||
github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
|
|
@ -372,6 +372,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
|||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
||||
github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk=
|
||||
github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50=
|
||||
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
|
||||
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||
|
|
@ -476,14 +478,15 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfa
|
|||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw=
|
||||
go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y=
|
||||
go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 h1:3jAYbRHQAqzLjd9I4tzxwJ8Pk/N6AqBcF6m1ZHrxG94=
|
||||
go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg=
|
||||
go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY=
|
||||
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||
go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0=
|
||||
go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
|
|
@ -502,8 +505,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0
|
|||
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20231219160207-73b9e39aefca h1:+xQfFu/HO/82Wwg4zuJ5xiLp0yaOLJjBGnuafXp85YQ=
|
||||
golang.org/x/exp v0.0.0-20231219160207-73b9e39aefca/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
|
||||
|
|
@ -535,8 +538,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
|||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
|
||||
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
|
||||
|
|
@ -587,7 +590,7 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
|||
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
|
||||
golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
|
||||
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
|
|
|
|||
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
mkwinsyscall generates windows system call bodies
|
||||
|
||||
It parses all files specified on command line containing function
|
||||
prototypes (like syscall_windows.go) and prints system call bodies
|
||||
to standard output.
|
||||
|
||||
The prototypes are marked by lines beginning with "//sys" and read
|
||||
like func declarations if //sys is replaced by func, but:
|
||||
|
||||
- The parameter lists must give a name for each argument. This
|
||||
includes return parameters.
|
||||
|
||||
- The parameter lists must give a type for each argument:
|
||||
the (x, y, z int) shorthand is not allowed.
|
||||
|
||||
- If the return parameter is an error number, it must be named err.
|
||||
|
||||
- If go func name needs to be different from its winapi dll name,
|
||||
the winapi name could be specified at the end, after "=" sign, like
|
||||
|
||||
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
|
||||
|
||||
- Each function that returns err needs to supply a condition, that
|
||||
return value of winapi will be tested against to detect failure.
|
||||
This would set err to windows "last-error", otherwise it will be nil.
|
||||
The value can be provided at end of //sys declaration, like
|
||||
|
||||
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
|
||||
|
||||
and is [failretval==0] by default.
|
||||
|
||||
- If the function name ends in a "?", then the function not existing is non-
|
||||
fatal, and an error will be returned instead of panicking.
|
||||
|
||||
Usage:
|
||||
|
||||
mkwinsyscall [flags] [path ...]
|
||||
|
||||
Flags
|
||||
|
||||
-output string
|
||||
Output file name (standard output if omitted).
|
||||
-sort
|
||||
Sort DLL and function declarations (default true).
|
||||
Intended to help transition from older versions of mkwinsyscall by making diffs
|
||||
easier to read and understand.
|
||||
-systemdll
|
||||
Whether all DLLs should be loaded from the Windows system directory (default true).
|
||||
-trace
|
||||
Generate print statement after every syscall.
|
||||
-utf16
|
||||
Encode string arguments as UTF-16 for syscalls not ending in 'A' or 'W' (default true).
|
||||
-winio
|
||||
Import this package ("github.com/Microsoft/go-winio").
|
||||
*/
|
||||
package main
|
||||
1059
vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go
generated
vendored
Normal file
1059
vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
|
|
@ -37,10 +37,6 @@ rootfs-conv/*
|
|||
deps/*
|
||||
out/*
|
||||
|
||||
# protobuf files
|
||||
# only files at root of the repo, otherwise this will cause issues with vendoring
|
||||
/protobuf/*
|
||||
|
||||
# test results
|
||||
test/results
|
||||
|
||||
|
|
|
|||
|
|
@ -135,14 +135,3 @@ issues:
|
|||
linters:
|
||||
- stylecheck
|
||||
Text: "ST1003:"
|
||||
|
||||
# v0 APIs are deprecated, but still retained for backwards compatability
|
||||
- path: cmd\\ncproxy\\
|
||||
linters:
|
||||
- staticcheck
|
||||
text: "^SA1019: .*(ncproxygrpc|nodenetsvc)[/]?v0"
|
||||
|
||||
- path: internal\\tools\\networkagent
|
||||
linters:
|
||||
- staticcheck
|
||||
text: "^SA1019: .*nodenetsvc[/]?v0"
|
||||
|
|
|
|||
|
|
@ -9,10 +9,6 @@ plugins = ["grpc", "fieldpath"]
|
|||
# treat the root of the project as an include, but this may not be necessary.
|
||||
before = ["./protobuf"]
|
||||
|
||||
# defaults are "/usr/local/include" and "/usr/include", which don't exist on Windows.
|
||||
# override defaults to supress errors about non-existant directories.
|
||||
after = []
|
||||
|
||||
# Paths that should be treated as include roots in relation to the vendor
|
||||
# directory. These will be calculated with the vendor directory nearest the
|
||||
# target package.
|
||||
|
|
|
|||
|
|
@ -421,6 +421,12 @@ func (process *Process) CloseStdin(ctx context.Context) (err error) {
|
|||
return makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||
}
|
||||
|
||||
process.stdioLock.Lock()
|
||||
defer process.stdioLock.Unlock()
|
||||
if process.stdin == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
//HcsModifyProcess request to close stdin will fail if the process has already exited
|
||||
if !process.stopped() {
|
||||
modifyRequest := processModifyRequest{
|
||||
|
|
@ -442,12 +448,8 @@ func (process *Process) CloseStdin(ctx context.Context) (err error) {
|
|||
}
|
||||
}
|
||||
|
||||
process.stdioLock.Lock()
|
||||
defer process.stdioLock.Unlock()
|
||||
if process.stdin != nil {
|
||||
process.stdin.Close()
|
||||
process.stdin = nil
|
||||
}
|
||||
process.stdin.Close()
|
||||
process.stdin = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,13 +8,11 @@ import (
|
|||
"net"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
)
|
||||
|
||||
// TimeFormat is [time.RFC3339Nano] with nanoseconds padded using
|
||||
// zeros to ensure the formatted time is always the same number of
|
||||
// characters.
|
||||
// Based on RFC3339NanoFixed from github.com/containerd/log
|
||||
const TimeFormat = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
const TimeFormat = log.RFC3339NanoFixed
|
||||
|
||||
func FormatTime(t time.Time) string {
|
||||
return t.Format(TimeFormat)
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
|
@ -29,7 +30,7 @@ type Hook struct {
|
|||
// An empty string disables formatting.
|
||||
// When disabled, the fall back will the JSON encoding, if enabled.
|
||||
//
|
||||
// Default is [TimeFormat].
|
||||
// Default is [github.com/containerd/containerd/log.RFC3339NanoFixed].
|
||||
TimeFormat string
|
||||
|
||||
// Duration format converts a [time.Duration] fields to an appropriate encoding.
|
||||
|
|
@ -48,7 +49,7 @@ var _ logrus.Hook = &Hook{}
|
|||
|
||||
func NewHook() *Hook {
|
||||
return &Hook{
|
||||
TimeFormat: TimeFormat,
|
||||
TimeFormat: log.RFC3339NanoFixed,
|
||||
DurationFormat: DurationFormatString,
|
||||
AddSpanContext: true,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,5 @@
|
|||
//go:build tools
|
||||
|
||||
package hcsshim
|
||||
|
||||
import _ "github.com/Microsoft/go-winio/tools/mkwinsyscall"
|
||||
|
|
@ -33,15 +33,6 @@ issues:
|
|||
# conversion is necessary on Linux, unnecessary on macOS
|
||||
text: "unnecessary conversion"
|
||||
|
||||
# FIXME temporarily suppress deprecation warnings for the logs package. See https://github.com/containerd/containerd/pull/9086
|
||||
- text: "SA1019: log\\.(G|L|Fields|Entry|RFC3339NanoFixed|Level|TraceLevel|DebugLevel|InfoLevel|WarnLevel|ErrorLevel|FatalLevel|PanicLevel|SetLevel|GetLevel|OutputFormat|TextFormat|JSONFormat|SetFormat|WithLogger|GetLogger)"
|
||||
linters:
|
||||
- staticcheck
|
||||
- text: "SA1019: logtest\\.WithT"
|
||||
linters:
|
||||
- staticcheck
|
||||
|
||||
|
||||
linters-settings:
|
||||
gosec:
|
||||
# The following issues surfaced when `gosec` linter
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ haoyun <yun.hao@daocloud.io>
|
|||
Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
|
||||
Hu Shuai <hus.fnst@cn.fujitsu.com>
|
||||
Hu Shuai <hus.fnst@cn.fujitsu.com> <hushuaiia@qq.com>
|
||||
Iceber Gu <wei.cai-nat@daocloud.io> <caiwei95@hotmail.com>
|
||||
Iceber Gu <wei.cai-nat@daocloud.io>
|
||||
Jaana Burcu Dogan <burcujdogan@gmail.com> <jbd@golang.org>
|
||||
Jess Valarezo <valarezo.jessica@gmail.com>
|
||||
Jess Valarezo <valarezo.jessica@gmail.com> <jessica.valarezo@docker.com>
|
||||
|
|
|
|||
|
|
@ -234,11 +234,6 @@ bin/cni-bridge-fp: integration/failpoint/cmd/cni-bridge-fp FORCE
|
|||
@echo "$(WHALE) $@"
|
||||
@$(GO) build ${GO_BUILD_FLAGS} -o $@ ./integration/failpoint/cmd/cni-bridge-fp
|
||||
|
||||
# build runc-fp as runc wrapper to support failpoint, only used by integration test
|
||||
bin/runc-fp: integration/failpoint/cmd/runc-fp FORCE
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GO) build ${GO_BUILD_FLAGS} -o $@ ./integration/failpoint/cmd/runc-fp
|
||||
|
||||
benchmark: ## run benchmarks tests
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GO) test ${TESTFLAGS} -bench . -run Benchmark -test.root
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ EOF
|
|||
config.vm.provision "install-golang", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-install-golang"
|
||||
sh.env = {
|
||||
'GO_VERSION': ENV['GO_VERSION'] || "1.20.13",
|
||||
'GO_VERSION': ENV['GO_VERSION'] || "1.20.8",
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
|
|
|
|||
262
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
generated
vendored
262
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
generated
vendored
|
|
@ -27,7 +27,6 @@ import (
|
|||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
|
@ -273,10 +272,9 @@ type ServerResponse struct {
|
|||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
UUID string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"`
|
||||
Pid uint64 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||
Pidns uint64 `protobuf:"varint,3,opt,name=pidns,proto3" json:"pidns,omitempty"` // PID namespace, such as 4026531836
|
||||
Deprecations []*DeprecationWarning `protobuf:"bytes,4,rep,name=deprecations,proto3" json:"deprecations,omitempty"`
|
||||
UUID string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"`
|
||||
Pid uint64 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||
Pidns uint64 `protobuf:"varint,3,opt,name=pidns,proto3" json:"pidns,omitempty"` // PID namespace, such as 4026531836
|
||||
}
|
||||
|
||||
func (x *ServerResponse) Reset() {
|
||||
|
|
@ -332,76 +330,6 @@ func (x *ServerResponse) GetPidns() uint64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
func (x *ServerResponse) GetDeprecations() []*DeprecationWarning {
|
||||
if x != nil {
|
||||
return x.Deprecations
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DeprecationWarning struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
LastOccurrence *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_occurrence,json=lastOccurrence,proto3" json:"last_occurrence,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeprecationWarning) Reset() {
|
||||
*x = DeprecationWarning{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeprecationWarning) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeprecationWarning) ProtoMessage() {}
|
||||
|
||||
func (x *DeprecationWarning) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeprecationWarning.ProtoReflect.Descriptor instead.
|
||||
func (*DeprecationWarning) Descriptor() ([]byte, []int) {
|
||||
return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *DeprecationWarning) GetID() string {
|
||||
if x != nil {
|
||||
return x.ID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DeprecationWarning) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *DeprecationWarning) GetLastOccurrence() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.LastOccurrence
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDesc = []byte{
|
||||
|
|
@ -419,79 +347,63 @@ var file_github_com_containerd_containerd_api_services_introspection_v1_introspe
|
|||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70,
|
||||
0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
|
||||
0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe6, 0x02, 0x0a,
|
||||
0x06, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
|
||||
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72,
|
||||
0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72,
|
||||
0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66,
|
||||
0x6f, 0x72, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e,
|
||||
0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c,
|
||||
0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d,
|
||||
0x73, 0x12, 0x53, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03,
|
||||
0x28, 0x0b, 0x32, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
|
||||
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70,
|
||||
0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e,
|
||||
0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65,
|
||||
0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69,
|
||||
0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61,
|
||||
0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x08, 0x69, 0x6e,
|
||||
0x69, 0x74, 0x5f, 0x65, 0x72, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x52, 0x07, 0x69, 0x6e, 0x69, 0x74, 0x45, 0x72, 0x72, 0x1a, 0x3a, 0x0a, 0x0c, 0x45, 0x78, 0x70,
|
||||
0x6f, 0x72, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65,
|
||||
0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
|
||||
0x73, 0x22, 0x59, 0x0a, 0x0f, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x18,
|
||||
0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
|
||||
0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72,
|
||||
0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75,
|
||||
0x67, 0x69, 0x6e, 0x52, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x22, 0xaa, 0x01, 0x0a,
|
||||
0x0e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
|
||||
0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75,
|
||||
0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04,
|
||||
0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x69, 0x64, 0x6e, 0x73, 0x18, 0x03,
|
||||
0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x70, 0x69, 0x64, 0x6e, 0x73, 0x12, 0x5c, 0x0a, 0x0c, 0x64,
|
||||
0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73,
|
||||
0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe6, 0x02, 0x0a, 0x06,
|
||||
0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
|
||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65,
|
||||
0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65,
|
||||
0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f,
|
||||
0x72, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74,
|
||||
0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, 0x61,
|
||||
0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73,
|
||||
0x12, 0x53, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65,
|
||||
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0c, 0x64, 0x65, 0x70,
|
||||
0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x12, 0x44, 0x65,
|
||||
0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67,
|
||||
0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64,
|
||||
0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x0f, 0x6c, 0x61,
|
||||
0x73, 0x74, 0x5f, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
|
||||
0x0e, 0x6c, 0x61, 0x73, 0x74, 0x4f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x32,
|
||||
0xdf, 0x01, 0x0a, 0x0d, 0x49, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x12, 0x76, 0x0a, 0x07, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x34, 0x2e, 0x63,
|
||||
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e,
|
||||
0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x78,
|
||||
0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c,
|
||||
0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x70,
|
||||
0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x08, 0x69, 0x6e, 0x69,
|
||||
0x74, 0x5f, 0x65, 0x72, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
|
||||
0x07, 0x69, 0x6e, 0x69, 0x74, 0x45, 0x72, 0x72, 0x1a, 0x3a, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x6f,
|
||||
0x72, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x3a, 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
|
||||
0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73,
|
||||
0x22, 0x59, 0x0a, 0x0f, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
|
||||
0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f,
|
||||
0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67,
|
||||
0x69, 0x6e, 0x52, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x22, 0x4c, 0x0a, 0x0e, 0x53,
|
||||
0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69,
|
||||
0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03,
|
||||
0x70, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x69, 0x64, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x04, 0x52, 0x05, 0x70, 0x69, 0x64, 0x6e, 0x73, 0x32, 0xdf, 0x01, 0x0a, 0x0d, 0x49, 0x6e,
|
||||
0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x76, 0x0a, 0x07, 0x50,
|
||||
0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
|
||||
0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74,
|
||||
0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c,
|
||||
0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x63,
|
||||
0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
|
||||
0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e,
|
||||
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70,
|
||||
0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x53, 0x65, 0x72,
|
||||
0x76, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x34, 0x2e, 0x63, 0x6f,
|
||||
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
|
||||
0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x42, 0x4e, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61,
|
||||
0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
|
||||
0x65, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x2f, 0x76, 0x31, 0x3b, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f,
|
||||
0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x16, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
|
||||
0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65,
|
||||
0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72,
|
||||
0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72,
|
||||
0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x4e, 0x5a, 0x4c, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
|
||||
0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x6e, 0x74,
|
||||
0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x6e,
|
||||
0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
@ -506,35 +418,31 @@ func file_github_com_containerd_containerd_api_services_introspection_v1_introsp
|
|||
return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_goTypes = []interface{}{
|
||||
(*Plugin)(nil), // 0: containerd.services.introspection.v1.Plugin
|
||||
(*PluginsRequest)(nil), // 1: containerd.services.introspection.v1.PluginsRequest
|
||||
(*PluginsResponse)(nil), // 2: containerd.services.introspection.v1.PluginsResponse
|
||||
(*ServerResponse)(nil), // 3: containerd.services.introspection.v1.ServerResponse
|
||||
(*DeprecationWarning)(nil), // 4: containerd.services.introspection.v1.DeprecationWarning
|
||||
nil, // 5: containerd.services.introspection.v1.Plugin.ExportsEntry
|
||||
(*types.Platform)(nil), // 6: containerd.types.Platform
|
||||
(*status.Status)(nil), // 7: google.rpc.Status
|
||||
(*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
|
||||
(*emptypb.Empty)(nil), // 9: google.protobuf.Empty
|
||||
(*Plugin)(nil), // 0: containerd.services.introspection.v1.Plugin
|
||||
(*PluginsRequest)(nil), // 1: containerd.services.introspection.v1.PluginsRequest
|
||||
(*PluginsResponse)(nil), // 2: containerd.services.introspection.v1.PluginsResponse
|
||||
(*ServerResponse)(nil), // 3: containerd.services.introspection.v1.ServerResponse
|
||||
nil, // 4: containerd.services.introspection.v1.Plugin.ExportsEntry
|
||||
(*types.Platform)(nil), // 5: containerd.types.Platform
|
||||
(*status.Status)(nil), // 6: google.rpc.Status
|
||||
(*emptypb.Empty)(nil), // 7: google.protobuf.Empty
|
||||
}
|
||||
var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_depIdxs = []int32{
|
||||
6, // 0: containerd.services.introspection.v1.Plugin.platforms:type_name -> containerd.types.Platform
|
||||
5, // 1: containerd.services.introspection.v1.Plugin.exports:type_name -> containerd.services.introspection.v1.Plugin.ExportsEntry
|
||||
7, // 2: containerd.services.introspection.v1.Plugin.init_err:type_name -> google.rpc.Status
|
||||
5, // 0: containerd.services.introspection.v1.Plugin.platforms:type_name -> containerd.types.Platform
|
||||
4, // 1: containerd.services.introspection.v1.Plugin.exports:type_name -> containerd.services.introspection.v1.Plugin.ExportsEntry
|
||||
6, // 2: containerd.services.introspection.v1.Plugin.init_err:type_name -> google.rpc.Status
|
||||
0, // 3: containerd.services.introspection.v1.PluginsResponse.plugins:type_name -> containerd.services.introspection.v1.Plugin
|
||||
4, // 4: containerd.services.introspection.v1.ServerResponse.deprecations:type_name -> containerd.services.introspection.v1.DeprecationWarning
|
||||
8, // 5: containerd.services.introspection.v1.DeprecationWarning.last_occurrence:type_name -> google.protobuf.Timestamp
|
||||
1, // 6: containerd.services.introspection.v1.Introspection.Plugins:input_type -> containerd.services.introspection.v1.PluginsRequest
|
||||
9, // 7: containerd.services.introspection.v1.Introspection.Server:input_type -> google.protobuf.Empty
|
||||
2, // 8: containerd.services.introspection.v1.Introspection.Plugins:output_type -> containerd.services.introspection.v1.PluginsResponse
|
||||
3, // 9: containerd.services.introspection.v1.Introspection.Server:output_type -> containerd.services.introspection.v1.ServerResponse
|
||||
8, // [8:10] is the sub-list for method output_type
|
||||
6, // [6:8] is the sub-list for method input_type
|
||||
6, // [6:6] is the sub-list for extension type_name
|
||||
6, // [6:6] is the sub-list for extension extendee
|
||||
0, // [0:6] is the sub-list for field type_name
|
||||
1, // 4: containerd.services.introspection.v1.Introspection.Plugins:input_type -> containerd.services.introspection.v1.PluginsRequest
|
||||
7, // 5: containerd.services.introspection.v1.Introspection.Server:input_type -> google.protobuf.Empty
|
||||
2, // 6: containerd.services.introspection.v1.Introspection.Plugins:output_type -> containerd.services.introspection.v1.PluginsResponse
|
||||
3, // 7: containerd.services.introspection.v1.Introspection.Server:output_type -> containerd.services.introspection.v1.ServerResponse
|
||||
6, // [6:8] is the sub-list for method output_type
|
||||
4, // [4:6] is the sub-list for method input_type
|
||||
4, // [4:4] is the sub-list for extension type_name
|
||||
4, // [4:4] is the sub-list for extension extendee
|
||||
0, // [0:4] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
@ -593,18 +501,6 @@ func file_github_com_containerd_containerd_api_services_introspection_v1_introsp
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeprecationWarning); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
|
|
@ -612,7 +508,7 @@ func file_github_com_containerd_containerd_api_services_introspection_v1_introsp
|
|||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 6,
|
||||
NumMessages: 5,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ package containerd.services.introspection.v1;
|
|||
import "github.com/containerd/containerd/api/types/platform.proto";
|
||||
import "google/rpc/status.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection";
|
||||
|
||||
|
|
@ -103,11 +102,4 @@ message ServerResponse {
|
|||
string uuid = 1;
|
||||
uint64 pid = 2;
|
||||
uint64 pidns = 3; // PID namespace, such as 4026531836
|
||||
repeated DeprecationWarning deprecations = 4;
|
||||
}
|
||||
|
||||
message DeprecationWarning {
|
||||
string id = 1;
|
||||
string message = 2;
|
||||
google.protobuf.Timestamp last_occurrence = 3;
|
||||
}
|
||||
|
|
@ -25,12 +25,12 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
type (
|
||||
|
|
|
|||
|
|
@ -332,14 +332,3 @@ func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Exists returns whether an attempt to access the content would not error out
|
||||
// with an ErrNotFound error. It will return an encountered error if it was
|
||||
// different than ErrNotFound.
|
||||
func Exists(ctx context.Context, provider InfoProvider, desc ocispec.Descriptor) (bool, error) {
|
||||
_, err := provider.Info(ctx, desc.Digest)
|
||||
if errdefs.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return err == nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,12 +25,12 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/protobuf"
|
||||
"github.com/containerd/containerd/protobuf/proto"
|
||||
"github.com/containerd/typeurl/v2"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
// NewBinaryProcessor returns a binary processor for use with processing content streams
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
|
|
@ -32,6 +31,7 @@ import (
|
|||
"github.com/containerd/containerd/protobuf/proto"
|
||||
"github.com/containerd/typeurl/v2"
|
||||
"github.com/sirupsen/logrus"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
const processorPipe = "STREAM_PROCESSOR_PIPE"
|
||||
|
|
|
|||
|
|
@ -437,15 +437,7 @@ func (i *image) getLayers(ctx context.Context, platform platforms.MatchComparer,
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve rootfs: %w", err)
|
||||
}
|
||||
|
||||
// parse out the image layers from oci artifact layers
|
||||
imageLayers := []ocispec.Descriptor{}
|
||||
for _, ociLayer := range manifest.Layers {
|
||||
if images.IsLayerType(ociLayer.MediaType) {
|
||||
imageLayers = append(imageLayers, ociLayer)
|
||||
}
|
||||
}
|
||||
if len(diffIDs) != len(imageLayers) {
|
||||
if len(diffIDs) != len(manifest.Layers) {
|
||||
return nil, errors.New("mismatched image rootfs and manifest layers")
|
||||
}
|
||||
layers := make([]rootfs.Layer, len(diffIDs))
|
||||
|
|
@ -455,7 +447,7 @@ func (i *image) getLayers(ctx context.Context, platform platforms.MatchComparer,
|
|||
MediaType: ocispec.MediaTypeImageLayer,
|
||||
Digest: diffIDs[i],
|
||||
}
|
||||
layers[i].Blob = imageLayers[i]
|
||||
layers[i].Blob = manifest.Layers[i]
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,14 +24,11 @@ import (
|
|||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/labels"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/log"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
|
@ -143,45 +140,6 @@ func WithSkipNonDistributableBlobs() ExportOpt {
|
|||
return WithBlobFilter(f)
|
||||
}
|
||||
|
||||
// WithSkipMissing excludes blobs referenced by manifests if not all blobs
|
||||
// would be included in the archive.
|
||||
// The manifest itself is excluded only if it's not present locally.
|
||||
// This allows to export multi-platform images if not all platforms are present
|
||||
// while still persisting the multi-platform index.
|
||||
func WithSkipMissing(store ContentProvider) ExportOpt {
|
||||
return func(ctx context.Context, o *exportOptions) error {
|
||||
o.blobRecordOptions.childrenHandler = images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
|
||||
children, err := images.Children(ctx, store, desc)
|
||||
if !images.IsManifestType(desc.MediaType) {
|
||||
return children, err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// If manifest itself is missing, skip it from export.
|
||||
if errdefs.IsNotFound(err) {
|
||||
return nil, images.ErrSkipDesc
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Don't export manifest descendants if any of them doesn't exist.
|
||||
for _, child := range children {
|
||||
exists, err := content.Exists(ctx, store, child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If any child is missing, only export the manifest, but don't export its descendants.
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return children, nil
|
||||
})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func addNameAnnotation(name string, base map[string]string) map[string]string {
|
||||
annotations := map[string]string{}
|
||||
for k, v := range base {
|
||||
|
|
@ -194,29 +152,6 @@ func addNameAnnotation(name string, base map[string]string) map[string]string {
|
|||
return annotations
|
||||
}
|
||||
|
||||
func copySourceLabels(ctx context.Context, infoProvider content.InfoProvider, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
|
||||
info, err := infoProvider.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return desc, err
|
||||
}
|
||||
for k, v := range info.Labels {
|
||||
if strings.HasPrefix(k, labels.LabelDistributionSource) {
|
||||
if desc.Annotations == nil {
|
||||
desc.Annotations = map[string]string{k: v}
|
||||
} else {
|
||||
desc.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
// ContentProvider provides both content and info about content
|
||||
type ContentProvider interface {
|
||||
content.Provider
|
||||
content.InfoProvider
|
||||
}
|
||||
|
||||
// Export implements Exporter.
|
||||
func Export(ctx context.Context, store content.Provider, writer io.Writer, opts ...ExportOpt) error {
|
||||
var eo exportOptions
|
||||
|
|
@ -228,27 +163,15 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts
|
|||
|
||||
records := []tarRecord{
|
||||
ociLayoutFile(""),
|
||||
}
|
||||
|
||||
manifests := make([]ocispec.Descriptor, 0, len(eo.manifests))
|
||||
if infoProvider, ok := store.(content.InfoProvider); ok {
|
||||
for _, desc := range eo.manifests {
|
||||
d, err := copySourceLabels(ctx, infoProvider, desc)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).WithField("desc", desc).Warn("failed to copy distribution.source labels")
|
||||
continue
|
||||
}
|
||||
manifests = append(manifests, d)
|
||||
}
|
||||
} else {
|
||||
manifests = append(manifests, eo.manifests...)
|
||||
ociIndexRecord(eo.manifests),
|
||||
}
|
||||
|
||||
algorithms := map[string]struct{}{}
|
||||
dManifests := map[digest.Digest]*exportManifest{}
|
||||
resolvedIndex := map[digest.Digest]digest.Digest{}
|
||||
for _, desc := range manifests {
|
||||
if images.IsManifestType(desc.MediaType) {
|
||||
for _, desc := range eo.manifests {
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
mt, ok := dManifests[desc.Digest]
|
||||
if !ok {
|
||||
// TODO(containerd): Skip if already added
|
||||
|
|
@ -268,7 +191,7 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts
|
|||
if name != "" {
|
||||
mt.names = append(mt.names, name)
|
||||
}
|
||||
} else if images.IsIndexType(desc.MediaType) {
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
d, ok := resolvedIndex[desc.Digest]
|
||||
if !ok {
|
||||
if err := desc.Digest.Validate(); err != nil {
|
||||
|
|
@ -332,13 +255,11 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts
|
|||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
default:
|
||||
return fmt.Errorf("only manifests may be exported: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
}
|
||||
|
||||
records = append(records, ociIndexRecord(manifests))
|
||||
|
||||
if !eo.skipDockerManifest && len(dManifests) > 0 {
|
||||
tr, err := manifestsRecord(ctx, store, dManifests)
|
||||
if err != nil {
|
||||
|
|
@ -371,10 +292,7 @@ func getRecords(ctx context.Context, store content.Provider, desc ocispec.Descri
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
childrenHandler := brOpts.childrenHandler
|
||||
if childrenHandler == nil {
|
||||
childrenHandler = images.ChildrenHandler(store)
|
||||
}
|
||||
childrenHandler := images.ChildrenHandler(store)
|
||||
|
||||
handlers := images.Handlers(
|
||||
childrenHandler,
|
||||
|
|
@ -396,8 +314,7 @@ type tarRecord struct {
|
|||
}
|
||||
|
||||
type blobRecordOptions struct {
|
||||
blobFilter BlobFilter
|
||||
childrenHandler images.HandlerFunc
|
||||
blobFilter BlobFilter
|
||||
}
|
||||
|
||||
func blobRecord(cs content.Provider, desc ocispec.Descriptor, opts *blobRecordOptions) tarRecord {
|
||||
|
|
|
|||
|
|
@ -1,21 +0,0 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
const (
|
||||
ConvertedDockerSchema1LabelKey = "io.containerd.image/converted-docker-schema1"
|
||||
)
|
||||
|
|
@ -39,7 +39,6 @@ type importOpts struct {
|
|||
platformMatcher platforms.MatchComparer
|
||||
compress bool
|
||||
discardLayers bool
|
||||
skipMissing bool
|
||||
}
|
||||
|
||||
// ImportOpt allows the caller to specify import specific options
|
||||
|
|
@ -116,15 +115,6 @@ func WithDiscardUnpackedLayers() ImportOpt {
|
|||
}
|
||||
}
|
||||
|
||||
// WithSkipMissing allows to import an archive which doesn't contain all the
|
||||
// referenced blobs.
|
||||
func WithSkipMissing() ImportOpt {
|
||||
return func(c *importOpts) error {
|
||||
c.skipMissing = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Import imports an image from a Tar stream using reader.
|
||||
// Caller needs to specify importer. Future version may use oci.v1 as the default.
|
||||
// Note that unreferenced blobs may be imported to the content store as well.
|
||||
|
|
@ -174,12 +164,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
|
|||
var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
// Only save images at top level
|
||||
if desc.Digest != index.Digest {
|
||||
// Don't set labels on missing content.
|
||||
children, err := images.Children(ctx, cs, desc)
|
||||
if iopts.skipMissing && errdefs.IsNotFound(err) {
|
||||
return nil, images.ErrSkipDesc
|
||||
}
|
||||
return children, err
|
||||
return images.Children(ctx, cs, desc)
|
||||
}
|
||||
|
||||
p, err := content.ReadBlob(ctx, cs, desc)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package log provides types and functions related to logging, passing
|
||||
// loggers through a context, and attaching context to the logger.
|
||||
//
|
||||
// # Transitional types
|
||||
//
|
||||
// This package contains various types that are aliases for types in [logrus].
|
||||
// These aliases are intended for transitioning away from hard-coding logrus
|
||||
// as logging implementation. Consumers of this package are encouraged to use
|
||||
// the type-aliases from this package instead of directly using their logrus
|
||||
// equivalent.
|
||||
//
|
||||
// The intent is to replace these aliases with locally defined types and
|
||||
// interfaces once all consumers are no longer directly importing logrus
|
||||
// types.
|
||||
//
|
||||
// IMPORTANT: due to the transitional purpose of this package, it is not
|
||||
// guaranteed for the full logrus API to be provided in the future. As
|
||||
// outlined, these aliases are provided as a step to transition away from
|
||||
// a specific implementation which, as a result, exposes the full logrus API.
|
||||
// While no decisions have been made on the ultimate design and interface
|
||||
// provided by this package, we do not expect carrying "less common" features.
|
||||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// G is a shorthand for [GetLogger].
|
||||
//
|
||||
// We may want to define this locally to a package to get package tagged log
|
||||
// messages.
|
||||
var G = GetLogger
|
||||
|
||||
// L is an alias for the standard logger.
|
||||
var L = &Entry{
|
||||
Logger: logrus.StandardLogger(),
|
||||
// Default is three fields plus a little extra room.
|
||||
Data: make(Fields, 6),
|
||||
}
|
||||
|
||||
type loggerKey struct{}
|
||||
|
||||
// Fields type to pass to "WithFields".
|
||||
type Fields = map[string]any
|
||||
|
||||
// Entry is a logging entry. It contains all the fields passed with
|
||||
// [Entry.WithFields]. It's finally logged when Trace, Debug, Info, Warn,
|
||||
// Error, Fatal or Panic is called on it. These objects can be reused and
|
||||
// passed around as much as you wish to avoid field duplication.
|
||||
//
|
||||
// Entry is a transitional type, and currently an alias for [logrus.Entry].
|
||||
type Entry = logrus.Entry
|
||||
|
||||
// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using
|
||||
// zeros to ensure the formatted time is always the same number of
|
||||
// characters.
|
||||
const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
|
||||
// Level is a logging level.
|
||||
type Level = logrus.Level
|
||||
|
||||
// Supported log levels.
|
||||
const (
|
||||
// TraceLevel level. Designates finer-grained informational events
|
||||
// than [DebugLevel].
|
||||
TraceLevel Level = logrus.TraceLevel
|
||||
|
||||
// DebugLevel level. Usually only enabled when debugging. Very verbose
|
||||
// logging.
|
||||
DebugLevel Level = logrus.DebugLevel
|
||||
|
||||
// InfoLevel level. General operational entries about what's going on
|
||||
// inside the application.
|
||||
InfoLevel Level = logrus.InfoLevel
|
||||
|
||||
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||
WarnLevel Level = logrus.WarnLevel
|
||||
|
||||
// ErrorLevel level. Logs errors that should definitely be noted.
|
||||
// Commonly used for hooks to send errors to an error tracking service.
|
||||
ErrorLevel Level = logrus.ErrorLevel
|
||||
|
||||
// FatalLevel level. Logs and then calls "logger.Exit(1)". It exits
|
||||
// even if the logging level is set to Panic.
|
||||
FatalLevel Level = logrus.FatalLevel
|
||||
|
||||
// PanicLevel level. This is the highest level of severity. Logs and
|
||||
// then calls panic with the message passed to Debug, Info, ...
|
||||
PanicLevel Level = logrus.PanicLevel
|
||||
)
|
||||
|
||||
// SetLevel sets log level globally. It returns an error if the given
|
||||
// level is not supported.
|
||||
//
|
||||
// level can be one of:
|
||||
//
|
||||
// - "trace" ([TraceLevel])
|
||||
// - "debug" ([DebugLevel])
|
||||
// - "info" ([InfoLevel])
|
||||
// - "warn" ([WarnLevel])
|
||||
// - "error" ([ErrorLevel])
|
||||
// - "fatal" ([FatalLevel])
|
||||
// - "panic" ([PanicLevel])
|
||||
func SetLevel(level string) error {
|
||||
lvl, err := logrus.ParseLevel(level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
L.Logger.SetLevel(lvl)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLevel returns the current log level.
|
||||
func GetLevel() Level {
|
||||
return L.Logger.GetLevel()
|
||||
}
|
||||
|
||||
// OutputFormat specifies a log output format.
|
||||
type OutputFormat string
|
||||
|
||||
// Supported log output formats.
|
||||
const (
|
||||
// TextFormat represents the text logging format.
|
||||
TextFormat OutputFormat = "text"
|
||||
|
||||
// JSONFormat represents the JSON logging format.
|
||||
JSONFormat OutputFormat = "json"
|
||||
)
|
||||
|
||||
// SetFormat sets the log output format ([TextFormat] or [JSONFormat]).
|
||||
func SetFormat(format OutputFormat) error {
|
||||
switch format {
|
||||
case TextFormat:
|
||||
L.Logger.SetFormatter(&logrus.TextFormatter{
|
||||
TimestampFormat: RFC3339NanoFixed,
|
||||
FullTimestamp: true,
|
||||
})
|
||||
return nil
|
||||
case JSONFormat:
|
||||
L.Logger.SetFormatter(&logrus.JSONFormatter{
|
||||
TimestampFormat: RFC3339NanoFixed,
|
||||
})
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unknown log format: %s", format)
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogger returns a new context with the provided logger. Use in
|
||||
// combination with logger.WithField(s) for great effect.
|
||||
func WithLogger(ctx context.Context, logger *Entry) context.Context {
|
||||
return context.WithValue(ctx, loggerKey{}, logger.WithContext(ctx))
|
||||
}
|
||||
|
||||
// GetLogger retrieves the current logger from the context. If no logger is
|
||||
// available, the default logger is returned.
|
||||
func GetLogger(ctx context.Context) *Entry {
|
||||
if logger := ctx.Value(loggerKey{}); logger != nil {
|
||||
return logger.(*Entry)
|
||||
}
|
||||
return L.WithContext(ctx)
|
||||
}
|
||||
|
|
@ -1,149 +0,0 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/log"
|
||||
)
|
||||
|
||||
// G is a shorthand for [GetLogger].
|
||||
//
|
||||
// Deprecated: use [log.G].
|
||||
var G = log.G
|
||||
|
||||
// L is an alias for the standard logger.
|
||||
//
|
||||
// Deprecated: use [log.L].
|
||||
var L = log.L
|
||||
|
||||
// Fields type to pass to "WithFields".
|
||||
//
|
||||
// Deprecated: use [log.Fields].
|
||||
type Fields = log.Fields
|
||||
|
||||
// Entry is a logging entry.
|
||||
//
|
||||
// Deprecated: use [log.Entry].
|
||||
type Entry = log.Entry
|
||||
|
||||
// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using
|
||||
// zeros to ensure the formatted time is always the same number of
|
||||
// characters.
|
||||
//
|
||||
// Deprecated: use [log.RFC3339NanoFixed].
|
||||
const RFC3339NanoFixed = log.RFC3339NanoFixed
|
||||
|
||||
// Level is a logging level.
|
||||
//
|
||||
// Deprecated: use [log.Level].
|
||||
type Level = log.Level
|
||||
|
||||
// Supported log levels.
|
||||
const (
|
||||
// TraceLevel level.
|
||||
//
|
||||
// Deprecated: use [log.TraceLevel].
|
||||
TraceLevel Level = log.TraceLevel
|
||||
|
||||
// DebugLevel level.
|
||||
//
|
||||
// Deprecated: use [log.DebugLevel].
|
||||
DebugLevel Level = log.DebugLevel
|
||||
|
||||
// InfoLevel level.
|
||||
//
|
||||
// Deprecated: use [log.InfoLevel].
|
||||
InfoLevel Level = log.InfoLevel
|
||||
|
||||
// WarnLevel level.
|
||||
//
|
||||
// Deprecated: use [log.WarnLevel].
|
||||
WarnLevel Level = log.WarnLevel
|
||||
|
||||
// ErrorLevel level
|
||||
//
|
||||
// Deprecated: use [log.ErrorLevel].
|
||||
ErrorLevel Level = log.ErrorLevel
|
||||
|
||||
// FatalLevel level.
|
||||
//
|
||||
// Deprecated: use [log.FatalLevel].
|
||||
FatalLevel Level = log.FatalLevel
|
||||
|
||||
// PanicLevel level.
|
||||
//
|
||||
// Deprecated: use [log.PanicLevel].
|
||||
PanicLevel Level = log.PanicLevel
|
||||
)
|
||||
|
||||
// SetLevel sets log level globally. It returns an error if the given
|
||||
// level is not supported.
|
||||
//
|
||||
// Deprecated: use [log.SetLevel].
|
||||
func SetLevel(level string) error {
|
||||
return log.SetLevel(level)
|
||||
}
|
||||
|
||||
// GetLevel returns the current log level.
|
||||
//
|
||||
// Deprecated: use [log.GetLevel].
|
||||
func GetLevel() log.Level {
|
||||
return log.GetLevel()
|
||||
}
|
||||
|
||||
// OutputFormat specifies a log output format.
|
||||
//
|
||||
// Deprecated: use [log.OutputFormat].
|
||||
type OutputFormat = log.OutputFormat
|
||||
|
||||
// Supported log output formats.
|
||||
const (
|
||||
// TextFormat represents the text logging format.
|
||||
//
|
||||
// Deprecated: use [log.TextFormat].
|
||||
TextFormat log.OutputFormat = "text"
|
||||
|
||||
// JSONFormat represents the JSON logging format.
|
||||
//
|
||||
// Deprecated: use [log.JSONFormat].
|
||||
JSONFormat log.OutputFormat = "json"
|
||||
)
|
||||
|
||||
// SetFormat sets the log output format.
|
||||
//
|
||||
// Deprecated: use [log.SetFormat].
|
||||
func SetFormat(format OutputFormat) error {
|
||||
return log.SetFormat(format)
|
||||
}
|
||||
|
||||
// WithLogger returns a new context with the provided logger. Use in
|
||||
// combination with logger.WithField(s) for great effect.
|
||||
//
|
||||
// Deprecated: use [log.WithLogger].
|
||||
func WithLogger(ctx context.Context, logger *log.Entry) context.Context {
|
||||
return log.WithLogger(ctx, logger)
|
||||
}
|
||||
|
||||
// GetLogger retrieves the current logger from the context. If no logger is
|
||||
// available, the default logger is returned.
|
||||
//
|
||||
// Deprecated: use [log.GetLogger].
|
||||
func GetLogger(ctx context.Context) *log.Entry {
|
||||
return log.GetLogger(ctx)
|
||||
}
|
||||
|
|
@ -20,9 +20,9 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
exec "golang.org/x/sys/execabs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -20,12 +20,12 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
exec "golang.org/x/sys/execabs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -18,8 +18,6 @@ package oci
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
|
|
@ -45,22 +43,6 @@ var (
|
|||
// to be created without the "issues" with go vendoring and package imports
|
||||
type Spec = specs.Spec
|
||||
|
||||
const ConfigFilename = "config.json"
|
||||
|
||||
// ReadSpec deserializes JSON into an OCI runtime Spec from a given path.
|
||||
func ReadSpec(path string) (*Spec, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
var s Spec
|
||||
if err := json.NewDecoder(f).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
// GenerateSpec will generate a default spec from the provided image
|
||||
// for use as a containerd container
|
||||
func GenerateSpec(ctx context.Context, client Client, c *containers.Container, opts ...SpecOpts) (*Spec, error) {
|
||||
|
|
@ -193,7 +175,6 @@ func populateDefaultUnixSpec(ctx context.Context, s *Spec, id string) error {
|
|||
"/proc/timer_stats",
|
||||
"/proc/sched_debug",
|
||||
"/sys/firmware",
|
||||
"/sys/devices/virtual/powercap",
|
||||
"/proc/scsi",
|
||||
},
|
||||
ReadonlyPaths: []string{
|
||||
|
|
|
|||
|
|
@ -35,8 +35,8 @@ import (
|
|||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/moby/sys/user"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
|
|
@ -185,6 +185,13 @@ func WithEnv(environmentVariables []string) SpecOpts {
|
|||
}
|
||||
}
|
||||
|
||||
// WithDefaultPathEnv sets the $PATH environment variable to the
|
||||
// default PATH defined in this package.
|
||||
func WithDefaultPathEnv(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
|
||||
s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, defaultUnixEnv)
|
||||
return nil
|
||||
}
|
||||
|
||||
// replaceOrAppendEnvValues returns the defaults with the overrides either
|
||||
// replaced by env key or appended to the list
|
||||
func replaceOrAppendEnvValues(defaults, overrides []string) []string {
|
||||
|
|
@ -893,9 +900,9 @@ func WithAppendAdditionalGroups(groups ...string) SpecOpts {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ugroups, groupErr := user.ParseGroupFile(gpath)
|
||||
if groupErr != nil && !os.IsNotExist(groupErr) {
|
||||
return groupErr
|
||||
ugroups, err := user.ParseGroupFile(gpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
groupMap := make(map[string]user.Group)
|
||||
for _, group := range ugroups {
|
||||
|
|
@ -909,9 +916,6 @@ func WithAppendAdditionalGroups(groups ...string) SpecOpts {
|
|||
} else {
|
||||
g, ok := groupMap[group]
|
||||
if !ok {
|
||||
if groupErr != nil {
|
||||
return fmt.Errorf("unable to find group %s: %w", group, groupErr)
|
||||
}
|
||||
return fmt.Errorf("unable to find group %s", group)
|
||||
}
|
||||
gids = append(gids, uint32(g.Gid))
|
||||
|
|
|
|||
|
|
@ -1,32 +0,0 @@
|
|||
//go:build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package oci
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
)
|
||||
|
||||
// WithDefaultPathEnv sets the $PATH environment variable to the
|
||||
// default PATH defined in this package.
|
||||
func WithDefaultPathEnv(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
|
||||
s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, defaultUnixEnv)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -62,8 +62,3 @@ func WithDevices(devicePath, containerPath, permissions string) SpecOpts {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Windows containers have default path configured at bootup
|
||||
func WithDefaultPathEnv(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,97 +0,0 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package deprecation
|
||||
|
||||
type Warning string
|
||||
|
||||
const (
|
||||
// Prefix is a standard prefix for all Warnings, used for filtering plugin Exports
|
||||
Prefix = "io.containerd.deprecation/"
|
||||
// PullSchema1Image is a warning for the use of schema 1 images
|
||||
PullSchema1Image Warning = Prefix + "pull-schema-1-image"
|
||||
// GoPluginLibrary is a warning for the use of dynamic library Go plugins
|
||||
GoPluginLibrary Warning = Prefix + "go-plugin-library"
|
||||
// CRISystemdCgroupV1 is a warning for the `systemd_cgroup` property
|
||||
CRISystemdCgroupV1 Warning = Prefix + "cri-systemd-cgroup-v1"
|
||||
// CRIUntrustedWorkloadRuntime is a warning for the `untrusted_workload_runtime` property
|
||||
CRIUntrustedWorkloadRuntime Warning = Prefix + "cri-untrusted-workload-runtime"
|
||||
// CRIDefaultRuntime is a warning for the `default_runtime` property
|
||||
CRIDefaultRuntime Warning = Prefix + "cri-default-runtime"
|
||||
// CRIRuntimeEngine is a warning for the `runtime_engine` property
|
||||
CRIRuntimeEngine Warning = Prefix + "cri-runtime-engine"
|
||||
// CRIRuntimeRoot is a warning for the `runtime_root` property
|
||||
CRIRuntimeRoot Warning = Prefix + "cri-runtime-root"
|
||||
// CRIRegistryMirrors is a warning for the use of the `mirrors` property
|
||||
CRIRegistryMirrors Warning = Prefix + "cri-registry-mirrors"
|
||||
// CRIRegistryAuths is a warning for the use of the `auths` property
|
||||
CRIRegistryAuths Warning = Prefix + "cri-registry-auths"
|
||||
// CRIRegistryConfigs is a warning for the use of the `configs` property
|
||||
CRIRegistryConfigs Warning = Prefix + "cri-registry-configs"
|
||||
// CRIAPIV1Alpha2 is a warning for the use of CRI-API v1alpha2
|
||||
CRIAPIV1Alpha2 Warning = Prefix + "cri-api-v1alpha2"
|
||||
// AUFSSnapshotter is a warning for the use of the aufs snapshotter
|
||||
AUFSSnapshotter Warning = Prefix + "aufs-snapshotter"
|
||||
// RestartLogpath is a warning for the containerd.io/restart.logpath label
|
||||
RestartLogpath Warning = Prefix + "restart-logpath"
|
||||
// RuntimeV1 is a warning for the io.containerd.runtime.v1.linux runtime
|
||||
RuntimeV1 Warning = Prefix + "runtime-v1"
|
||||
// RuntimeRuncV1 is a warning for the io.containerd.runc.v1 runtime
|
||||
RuntimeRuncV1 Warning = Prefix + "runtime-runc-v1"
|
||||
// CRICRIUPath is a warning for the use of the `CriuPath` property
|
||||
CRICRIUPath Warning = Prefix + "cri-criu-path"
|
||||
)
|
||||
|
||||
var messages = map[Warning]string{
|
||||
PullSchema1Image: "Schema 1 images are deprecated since containerd v1.7 and removed in containerd v2.0. " +
|
||||
`Since containerd v1.7.8, schema 1 images are identified by the "io.containerd.image/converted-docker-schema1" label.`,
|
||||
GoPluginLibrary: "Dynamically-linked Go plugins as containerd runtimes will be deprecated in containerd v2.0 and removed in containerd v2.1.",
|
||||
CRISystemdCgroupV1: "The `systemd_cgroup` property (old form) of `[plugins.\"io.containerd.grpc.v1.cri\"] is deprecated since containerd v1.3 and will be removed in containerd v2.0. " +
|
||||
"Use `SystemdCgroup` in [plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.runc.options] options instead.",
|
||||
CRIUntrustedWorkloadRuntime: "The `untrusted_workload_runtime` property of [plugins.\"io.containerd.grpc.v1.cri\".containerd] is deprecated since containerd v1.2 and will be removed in containerd v2.0. " +
|
||||
"Create an `untrusted` runtime in `runtimes` instead.",
|
||||
CRIDefaultRuntime: "The `default_runtime` property of [plugins.\"io.containerd.grpc.v1.cri\".containerd] is deprecated since containerd v1.3 and will be removed in containerd v2.0. " +
|
||||
"Use `default_runtime_name` instead.",
|
||||
CRIRuntimeEngine: "The `runtime_engine` property of [plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.*] is deprecated since containerd v1.3 and will be removed in containerd v2.0. " +
|
||||
"Use a v2 runtime and `options` instead.",
|
||||
CRIRuntimeRoot: "The `runtime_root` property of [plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.*] is deprecated since containerd v1.3 and will be removed in containerd v2.0. " +
|
||||
"Use a v2 runtime and `options.Root` instead.",
|
||||
CRIRegistryMirrors: "The `mirrors` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.0. " +
|
||||
"Use `config_path` instead.",
|
||||
CRIRegistryAuths: "The `auths` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.3 and will be removed in containerd v2.0. " +
|
||||
"Use `ImagePullSecrets` instead.",
|
||||
CRIRegistryConfigs: "The `configs` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.0. " +
|
||||
"Use `config_path` instead.",
|
||||
CRIAPIV1Alpha2: "CRI API v1alpha2 is deprecated since containerd v1.7 and removed in containerd v2.0. Use CRI API v1 instead.",
|
||||
AUFSSnapshotter: "The aufs snapshotter is deprecated since containerd v1.5 and removed in containerd v2.0. Use the overlay snapshotter instead.",
|
||||
RestartLogpath: "The `containerd.io/restart.logpath` label is deprecated since containerd v1.5 and removed in containerd v2.0. Use `containerd.io/restart.loguri` instead.",
|
||||
RuntimeV1: "The `io.containerd.runtime.v1.linux` runtime is deprecated since containerd v1.4 and removed in containerd v2.0. Use the `io.containerd.runc.v2` runtime instead.",
|
||||
RuntimeRuncV1: "The `io.containerd.runc.v1` runtime is deprecated since containerd v1.4 and removed in containerd v2.0. Use the `io.containerd.runc.v2` runtime instead.",
|
||||
CRICRIUPath: "The `CriuPath` property of `[plugins.\"io.containerd.grpc.v1.cri\".containerd.runtimes.*.options]` is deprecated since containerd v1.7 and will be removed in containerd v2.0. " +
|
||||
"Use a criu binary in $PATH instead.",
|
||||
}
|
||||
|
||||
// Valid checks whether a given Warning is valid
|
||||
func Valid(id Warning) bool {
|
||||
_, ok := messages[id]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Message returns the human-readable message for a given Warning
|
||||
func Message(id Warning) (string, bool) {
|
||||
msg, ok := messages[id]
|
||||
return msg, ok
|
||||
}
|
||||
|
|
@ -17,11 +17,8 @@
|
|||
package dialer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
|
|
@ -32,16 +29,10 @@ func isNoent(err error) bool {
|
|||
}
|
||||
|
||||
func dialer(address string, timeout time.Duration) (net.Conn, error) {
|
||||
address = strings.TrimPrefix(filepath.ToSlash(address), "npipe://")
|
||||
return winio.DialPipe(address, &timeout)
|
||||
}
|
||||
|
||||
// DialAddress returns the dial address with npipe:// prepended to the
|
||||
// provided address
|
||||
// DialAddress returns the dial address
|
||||
func DialAddress(address string) string {
|
||||
address = filepath.ToSlash(address)
|
||||
if !strings.HasPrefix(address, "npipe://") {
|
||||
address = fmt.Sprintf("npipe://%s", address)
|
||||
}
|
||||
return address
|
||||
}
|
||||
|
|
|
|||
|
|
@ -236,7 +236,6 @@ func (u *Unpacker) unpack(
|
|||
ctx := u.ctx
|
||||
ctx, layerSpan := tracing.StartSpan(ctx, tracing.Name(unpackSpanPrefix, "unpack"))
|
||||
defer layerSpan.End()
|
||||
unpackStart := time.Now()
|
||||
p, err := content.ReadBlob(ctx, u.content, config)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -413,7 +412,6 @@ func (u *Unpacker) unpack(
|
|||
|
||||
for i, desc := range layers {
|
||||
_, layerSpan := tracing.StartSpan(ctx, tracing.Name(unpackSpanPrefix, "unpackLayer"))
|
||||
unpackLayerStart := time.Now()
|
||||
layerSpan.SetAttributes(
|
||||
tracing.Attribute("layer.media.type", desc.MediaType),
|
||||
tracing.Attribute("layer.media.size", desc.Size),
|
||||
|
|
@ -425,10 +423,6 @@ func (u *Unpacker) unpack(
|
|||
return err
|
||||
}
|
||||
layerSpan.End()
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"layer": desc.Digest,
|
||||
"duration": time.Since(unpackLayerStart),
|
||||
}).Debug("layer unpacked")
|
||||
}
|
||||
|
||||
chainID := identity.ChainID(chain).String()
|
||||
|
|
@ -443,9 +437,8 @@ func (u *Unpacker) unpack(
|
|||
return err
|
||||
}
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
"config": config.Digest,
|
||||
"chainID": chainID,
|
||||
"duration": time.Since(unpackStart),
|
||||
"config": config.Digest,
|
||||
"chainID": chainID,
|
||||
}).Debug("image unpacked")
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -21,10 +21,9 @@ import (
|
|||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/events/exchange"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// InitContext is used for plugin initialization
|
||||
|
|
@ -134,19 +133,6 @@ func (ps *Set) Get(t Type) (interface{}, error) {
|
|||
return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
// GetByID returns the plugin of the given type and ID
|
||||
func (ps *Set) GetByID(t Type, id string) (*Plugin, error) {
|
||||
typSet, ok := ps.byTypeAndID[t]
|
||||
if !ok || len(typSet) == 0 {
|
||||
return nil, fmt.Errorf("no plugins registered for %s: %w", t, errdefs.ErrNotFound)
|
||||
}
|
||||
p, ok := typSet[id]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no plugins registered for %s %q: %w", t, id, errdefs.ErrNotFound)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// GetAll returns all initialized plugins
|
||||
func (ps *Set) GetAll() []*Plugin {
|
||||
return ps.ordered
|
||||
|
|
|
|||
|
|
@ -90,8 +90,6 @@ const (
|
|||
SandboxStorePlugin Type = "io.containerd.sandbox.store.v1"
|
||||
// SandboxControllerPlugin implements a sandbox controller
|
||||
SandboxControllerPlugin Type = "io.containerd.sandbox.controller.v1"
|
||||
// WarningPlugin implements a warning service
|
||||
WarningPlugin Type = "io.containerd.warning.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -100,8 +98,7 @@ const (
|
|||
// RuntimeRuncV1 is the runc runtime that supports a single container
|
||||
RuntimeRuncV1 = "io.containerd.runc.v1"
|
||||
// RuntimeRuncV2 is the runc runtime that supports multiple containers per shim
|
||||
RuntimeRuncV2 = "io.containerd.runc.v2"
|
||||
DeprecationsPlugin = "deprecations"
|
||||
RuntimeRuncV2 = "io.containerd.runc.v2"
|
||||
)
|
||||
|
||||
// Registration contains information for registering a plugin
|
||||
|
|
@ -146,7 +143,7 @@ var register = struct {
|
|||
}{}
|
||||
|
||||
// Load loads all plugins at the provided path into containerd
|
||||
func Load(path string) (count int, err error) {
|
||||
func Load(path string) (err error) {
|
||||
defer func() {
|
||||
if v := recover(); v != nil {
|
||||
rerr, ok := v.(error)
|
||||
|
|
|
|||
|
|
@ -25,13 +25,12 @@ import (
|
|||
"runtime"
|
||||
)
|
||||
|
||||
// loadPlugins loads all plugins for the OS and Arch that containerd is built
|
||||
// for inside the provided path and returns the count of successfully-loaded
|
||||
// plugins
|
||||
func loadPlugins(path string) (int, error) {
|
||||
// loadPlugins loads all plugins for the OS and Arch
|
||||
// that containerd is built for inside the provided path
|
||||
func loadPlugins(path string) error {
|
||||
abs, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
pattern := filepath.Join(abs, fmt.Sprintf(
|
||||
"*-%s-%s.%s",
|
||||
|
|
@ -41,16 +40,14 @@ func loadPlugins(path string) (int, error) {
|
|||
))
|
||||
libs, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return err
|
||||
}
|
||||
loaded := 0
|
||||
for _, lib := range libs {
|
||||
if _, err := plugin.Open(lib); err != nil {
|
||||
return loaded, err
|
||||
return err
|
||||
}
|
||||
loaded++
|
||||
}
|
||||
return loaded, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// getLibExt returns a platform specific lib extension for
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
package plugin
|
||||
|
||||
func loadPlugins(path string) (int, error) {
|
||||
func loadPlugins(path string) error {
|
||||
// plugins not supported until 1.8
|
||||
return 0, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,9 +21,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/pkg/unpack"
|
||||
|
|
@ -32,6 +29,8 @@ import (
|
|||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||
"github.com/containerd/containerd/tracing"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -190,10 +189,9 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
|
|||
var (
|
||||
handler images.Handler
|
||||
|
||||
isConvertible bool
|
||||
originalSchema1Digest string
|
||||
converterFunc func(context.Context, ocispec.Descriptor) (ocispec.Descriptor, error)
|
||||
limiter *semaphore.Weighted
|
||||
isConvertible bool
|
||||
converterFunc func(context.Context, ocispec.Descriptor) (ocispec.Descriptor, error)
|
||||
limiter *semaphore.Weighted
|
||||
)
|
||||
|
||||
if desc.MediaType == images.MediaTypeDockerSchema1Manifest && rCtx.ConvertSchema1 {
|
||||
|
|
@ -206,8 +204,6 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
|
|||
converterFunc = func(ctx context.Context, _ ocispec.Descriptor) (ocispec.Descriptor, error) {
|
||||
return schema1Converter.Convert(ctx)
|
||||
}
|
||||
|
||||
originalSchema1Digest = desc.Digest.String()
|
||||
} else {
|
||||
// Get all the children for a descriptor
|
||||
childrenHandler := images.ChildrenHandler(store)
|
||||
|
|
@ -274,13 +270,6 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
|
|||
}
|
||||
}
|
||||
|
||||
if originalSchema1Digest != "" {
|
||||
if rCtx.Labels == nil {
|
||||
rCtx.Labels = make(map[string]string)
|
||||
}
|
||||
rCtx.Labels[images.ConvertedDockerSchema1LabelKey] = originalSchema1Digest
|
||||
}
|
||||
|
||||
return images.Image{
|
||||
Name: name,
|
||||
Target: desc,
|
||||
|
|
|
|||
|
|
@ -186,15 +186,15 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R
|
|||
return err
|
||||
}
|
||||
|
||||
if username == "" || secret == "" {
|
||||
return fmt.Errorf("%w: no basic auth credentials", ErrInvalidAuthorization)
|
||||
}
|
||||
if username != "" && secret != "" {
|
||||
common := auth.TokenOptions{
|
||||
Username: username,
|
||||
Secret: secret,
|
||||
}
|
||||
|
||||
a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, auth.TokenOptions{
|
||||
Username: username,
|
||||
Secret: secret,
|
||||
})
|
||||
return nil
|
||||
a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to find supported auth scheme: %w", errdefs.ErrNotImplemented)
|
||||
|
|
|
|||
|
|
@ -76,16 +76,6 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
|
|||
if _, err2 := hrs.reader(); err2 == nil {
|
||||
return n, nil
|
||||
}
|
||||
} else if err == io.EOF {
|
||||
// The CRI's imagePullProgressTimeout relies on responseBody.Close to
|
||||
// update the process monitor's status. If the err is io.EOF, close
|
||||
// the connection since there is no more available data.
|
||||
if hrs.rc != nil {
|
||||
if clsErr := hrs.rc.Close(); clsErr != nil {
|
||||
log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser after io.EOF")
|
||||
}
|
||||
hrs.rc = nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ import (
|
|||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
|
@ -138,9 +137,6 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
|||
if exists {
|
||||
p.tracker.SetStatus(ref, Status{
|
||||
Committed: true,
|
||||
PushStatus: PushStatus{
|
||||
Exists: true,
|
||||
},
|
||||
Status: content.Status{
|
||||
Ref: ref,
|
||||
Total: desc.Size,
|
||||
|
|
@ -168,7 +164,6 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
|||
// Start upload request
|
||||
req = p.request(host, http.MethodPost, "blobs", "uploads/")
|
||||
|
||||
mountedFrom := ""
|
||||
var resp *http.Response
|
||||
if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" {
|
||||
preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo)
|
||||
|
|
@ -185,14 +180,11 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
|||
return nil, err
|
||||
}
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusUnauthorized:
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
log.G(ctx).Debugf("failed to mount from repository %s", fromRepo)
|
||||
|
||||
resp.Body.Close()
|
||||
resp = nil
|
||||
case http.StatusCreated:
|
||||
mountedFrom = path.Join(p.refspec.Hostname(), fromRepo)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -212,9 +204,6 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
|||
case http.StatusCreated:
|
||||
p.tracker.SetStatus(ref, Status{
|
||||
Committed: true,
|
||||
PushStatus: PushStatus{
|
||||
MountedFrom: mountedFrom,
|
||||
},
|
||||
Status: content.Status{
|
||||
Ref: ref,
|
||||
Total: desc.Size,
|
||||
|
|
@ -249,16 +238,13 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
|||
}
|
||||
|
||||
if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme {
|
||||
|
||||
lhost.Scheme = lurl.Scheme
|
||||
lhost.Host = lurl.Host
|
||||
log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination")
|
||||
|
||||
// Check if different than what was requested, accounting for fallback in the transport layer
|
||||
requested := resp.Request.URL
|
||||
if requested.Host != lhost.Host || requested.Scheme != lhost.Scheme {
|
||||
// Strip authorizer if change to host or scheme
|
||||
lhost.Authorizer = nil
|
||||
log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination, authorizer removed")
|
||||
}
|
||||
// Strip authorizer if change to host or scheme
|
||||
lhost.Authorizer = nil
|
||||
}
|
||||
}
|
||||
q := lurl.Query()
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ package docker
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
|
@ -585,13 +584,18 @@ func (r *request) do(ctx context.Context) (*http.Response, error) {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
tracing.UpdateHTTPClient(client, tracing.Name("remotes.docker.resolver", "HTTPRequest"))
|
||||
|
||||
_, httpSpan := tracing.StartSpan(
|
||||
ctx,
|
||||
tracing.Name("remotes.docker.resolver", "HTTPRequest"),
|
||||
tracing.WithHTTPRequest(req),
|
||||
)
|
||||
defer httpSpan.End()
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
httpSpan.SetStatus(err)
|
||||
return nil, fmt.Errorf("failed to do request: %w", err)
|
||||
}
|
||||
httpSpan.SetAttributes(tracing.HTTPStatusCodeAttributes(resp.StatusCode)...)
|
||||
log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received")
|
||||
return resp, nil
|
||||
}
|
||||
|
|
@ -703,27 +707,3 @@ func IsLocalhost(host string) bool {
|
|||
ip := net.ParseIP(host)
|
||||
return ip.IsLoopback()
|
||||
}
|
||||
|
||||
// HTTPFallback is an http.RoundTripper which allows fallback from https to http
|
||||
// for registry endpoints with configurations for both http and TLS, such as
|
||||
// defaulted localhost endpoints.
|
||||
type HTTPFallback struct {
|
||||
http.RoundTripper
|
||||
}
|
||||
|
||||
func (f HTTPFallback) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
resp, err := f.RoundTripper.RoundTrip(r)
|
||||
var tlsErr tls.RecordHeaderError
|
||||
if errors.As(err, &tlsErr) && string(tlsErr.RecordHeader[:]) == "HTTP/" {
|
||||
// server gave HTTP response to HTTPS client
|
||||
plainHTTPUrl := *r.URL
|
||||
plainHTTPUrl.Scheme = "http"
|
||||
|
||||
plainHTTPRequest := *r
|
||||
plainHTTPRequest.URL = &plainHTTPUrl
|
||||
|
||||
return f.RoundTripper.RoundTrip(&plainHTTPRequest)
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -36,17 +36,6 @@ type Status struct {
|
|||
|
||||
// UploadUUID is used by the Docker registry to reference blob uploads
|
||||
UploadUUID string
|
||||
|
||||
// PushStatus contains status related to push.
|
||||
PushStatus
|
||||
}
|
||||
|
||||
type PushStatus struct {
|
||||
// MountedFrom is the source content was cross-repo mounted from (empty if no cross-repo mount was performed).
|
||||
MountedFrom string
|
||||
|
||||
// Exists indicates whether content already exists in the repository and wasn't uploaded.
|
||||
Exists bool
|
||||
}
|
||||
|
||||
// StatusTracker to track status of operations
|
||||
|
|
|
|||
|
|
@ -361,15 +361,8 @@ func annotateDistributionSourceHandler(f images.HandlerFunc, provider content.In
|
|||
return children, nil
|
||||
}
|
||||
|
||||
parentSourceAnnotations := desc.Annotations
|
||||
var parentLabels map[string]string
|
||||
if pi, err := provider.Info(ctx, desc.Digest); err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
parentLabels = pi.Labels
|
||||
}
|
||||
// parentInfo can be used to inherit info for non-existent blobs
|
||||
var parentInfo *content.Info
|
||||
|
||||
for i := range children {
|
||||
child := children[i]
|
||||
|
|
@ -379,35 +372,32 @@ func annotateDistributionSourceHandler(f images.HandlerFunc, provider content.In
|
|||
if !errdefs.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
if parentInfo == nil {
|
||||
pi, err := provider.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentInfo = &pi
|
||||
}
|
||||
// Blob may not exist locally, annotate with parent labels for cross repo
|
||||
// mount or fetch. Parent sources may apply to all children since most
|
||||
// registries enforce that children exist before the manifests.
|
||||
info = *parentInfo
|
||||
}
|
||||
copyDistributionSourceLabels(info.Labels, &child)
|
||||
|
||||
// Annotate with parent labels for cross repo mount or fetch.
|
||||
// Parent sources may apply to all children since most registries
|
||||
// enforce that children exist before the manifests.
|
||||
copyDistributionSourceLabels(parentSourceAnnotations, &child)
|
||||
copyDistributionSourceLabels(parentLabels, &child)
|
||||
for k, v := range info.Labels {
|
||||
if !strings.HasPrefix(k, labels.LabelDistributionSource+".") {
|
||||
continue
|
||||
}
|
||||
|
||||
if child.Annotations == nil {
|
||||
child.Annotations = map[string]string{}
|
||||
}
|
||||
child.Annotations[k] = v
|
||||
}
|
||||
|
||||
children[i] = child
|
||||
}
|
||||
return children, nil
|
||||
}
|
||||
}
|
||||
|
||||
func copyDistributionSourceLabels(from map[string]string, to *ocispec.Descriptor) {
|
||||
for k, v := range from {
|
||||
if !strings.HasPrefix(k, labels.LabelDistributionSource+".") {
|
||||
continue
|
||||
}
|
||||
|
||||
if to.Annotations == nil {
|
||||
to.Annotations = make(map[string]string)
|
||||
} else {
|
||||
// Only propagate the parent label if the child doesn't already have it.
|
||||
if _, has := to.Annotations[k]; has {
|
||||
continue
|
||||
}
|
||||
}
|
||||
to.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,59 +18,35 @@ package introspection
|
|||
|
||||
import (
|
||||
context "context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"google.golang.org/genproto/googleapis/rpc/code"
|
||||
rpc "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
api "github.com/containerd/containerd/api/services/introspection/v1"
|
||||
"github.com/containerd/containerd/api/types"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
"github.com/containerd/containerd/protobuf"
|
||||
ptypes "github.com/containerd/containerd/protobuf/types"
|
||||
"github.com/containerd/containerd/services"
|
||||
"github.com/containerd/containerd/services/warning"
|
||||
"github.com/google/uuid"
|
||||
"google.golang.org/genproto/googleapis/rpc/code"
|
||||
rpc "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func init() {
|
||||
plugin.Register(&plugin.Registration{
|
||||
Type: plugin.ServicePlugin,
|
||||
ID: services.IntrospectionService,
|
||||
Requires: []plugin.Type{plugin.WarningPlugin},
|
||||
Requires: []plugin.Type{},
|
||||
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
|
||||
sps, err := ic.GetByType(plugin.WarningPlugin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p, ok := sps[plugin.DeprecationsPlugin]
|
||||
if !ok {
|
||||
return nil, errors.New("warning service not found")
|
||||
}
|
||||
|
||||
i, err := p.Instance()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
warningClient, ok := i.(warning.Service)
|
||||
if !ok {
|
||||
return nil, errors.New("could not create a local client for warning service")
|
||||
}
|
||||
|
||||
// this service fetches all plugins through the plugin set of the plugin context
|
||||
return &Local{
|
||||
plugins: ic.Plugins(),
|
||||
root: ic.Root,
|
||||
warningClient: warningClient,
|
||||
plugins: ic.Plugins(),
|
||||
root: ic.Root,
|
||||
}, nil
|
||||
},
|
||||
})
|
||||
|
|
@ -78,11 +54,10 @@ func init() {
|
|||
|
||||
// Local is a local implementation of the introspection service
|
||||
type Local struct {
|
||||
mu sync.Mutex
|
||||
root string
|
||||
plugins *plugin.Set
|
||||
pluginCache []*api.Plugin
|
||||
warningClient warning.Service
|
||||
mu sync.Mutex
|
||||
root string
|
||||
plugins *plugin.Set
|
||||
pluginCache []*api.Plugin
|
||||
}
|
||||
|
||||
var _ = (api.IntrospectionClient)(&Local{})
|
||||
|
|
@ -140,10 +115,9 @@ func (l *Local) Server(ctx context.Context, _ *ptypes.Empty, _ ...grpc.CallOptio
|
|||
}
|
||||
}
|
||||
return &api.ServerResponse{
|
||||
UUID: u,
|
||||
Pid: uint64(pid),
|
||||
Pidns: pidns,
|
||||
Deprecations: l.getWarnings(ctx),
|
||||
UUID: u,
|
||||
Pid: uint64(pid),
|
||||
Pidns: pidns,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -185,10 +159,6 @@ func (l *Local) uuidPath() string {
|
|||
return filepath.Join(l.root, "uuid")
|
||||
}
|
||||
|
||||
func (l *Local) getWarnings(ctx context.Context) []*api.DeprecationWarning {
|
||||
return warningsPB(ctx, l.warningClient.Warnings())
|
||||
}
|
||||
|
||||
func adaptPlugin(o interface{}) filters.Adaptor {
|
||||
obj := o.(*api.Plugin)
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
|
|
@ -270,16 +240,3 @@ func pluginsToPB(plugins []*plugin.Plugin) []*api.Plugin {
|
|||
|
||||
return pluginsPB
|
||||
}
|
||||
|
||||
func warningsPB(ctx context.Context, warnings []warning.Warning) []*api.DeprecationWarning {
|
||||
var pb []*api.DeprecationWarning
|
||||
|
||||
for _, w := range warnings {
|
||||
pb = append(pb, &api.DeprecationWarning{
|
||||
ID: string(w.ID),
|
||||
Message: w.Message,
|
||||
LastOccurrence: protobuf.ToTimestamp(w.LastOccurrence),
|
||||
})
|
||||
}
|
||||
return pb
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,83 +0,0 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package warning
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/log"
|
||||
|
||||
deprecation "github.com/containerd/containerd/pkg/deprecation"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
)
|
||||
|
||||
type Service interface {
|
||||
Emit(context.Context, deprecation.Warning)
|
||||
Warnings() []Warning
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugin.Register(&plugin.Registration{
|
||||
Type: plugin.WarningPlugin,
|
||||
ID: plugin.DeprecationsPlugin,
|
||||
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
|
||||
return &service{warnings: make(map[deprecation.Warning]time.Time)}, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type Warning struct {
|
||||
ID deprecation.Warning
|
||||
LastOccurrence time.Time
|
||||
Message string
|
||||
}
|
||||
|
||||
var _ Service = (*service)(nil)
|
||||
|
||||
type service struct {
|
||||
warnings map[deprecation.Warning]time.Time
|
||||
m sync.RWMutex
|
||||
}
|
||||
|
||||
func (s *service) Emit(ctx context.Context, warning deprecation.Warning) {
|
||||
if !deprecation.Valid(warning) {
|
||||
log.G(ctx).WithField("warningID", string(warning)).Warn("invalid deprecation warning")
|
||||
return
|
||||
}
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
s.warnings[warning] = time.Now()
|
||||
}
|
||||
func (s *service) Warnings() []Warning {
|
||||
s.m.RLock()
|
||||
defer s.m.RUnlock()
|
||||
var warnings []Warning
|
||||
for k, v := range s.warnings {
|
||||
msg, ok := deprecation.Message(k)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
warnings = append(warnings, Warning{
|
||||
ID: k,
|
||||
LastOccurrence: v,
|
||||
Message: msg,
|
||||
})
|
||||
}
|
||||
return warnings
|
||||
}
|
||||
|
|
@ -20,11 +20,11 @@ import (
|
|||
"context"
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||
httpconv "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
|
|
@ -37,27 +37,15 @@ type SpanOpt func(config *StartConfig)
|
|||
|
||||
// WithHTTPRequest marks span as a HTTP request operation from client to server.
|
||||
// It'll append attributes from the HTTP request object and mark it with `SpanKindClient` type.
|
||||
//
|
||||
// Deprecated: use upstream functionality from otelhttp directly instead. This function is kept for API compatibility
|
||||
// but no longer works as expected due to required functionality no longer exported in OpenTelemetry libraries.
|
||||
func WithHTTPRequest(_ *http.Request) SpanOpt {
|
||||
func WithHTTPRequest(request *http.Request) SpanOpt {
|
||||
return func(config *StartConfig) {
|
||||
config.spanOpts = append(config.spanOpts,
|
||||
trace.WithSpanKind(trace.SpanKindClient), // A client making a request to a server
|
||||
trace.WithSpanKind(trace.SpanKindClient), // A client making a request to a server
|
||||
trace.WithAttributes(httpconv.ClientRequest(request)...), // Add HTTP attributes
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateHTTPClient updates the http client with the necessary otel transport
|
||||
func UpdateHTTPClient(client *http.Client, name string) {
|
||||
client.Transport = otelhttp.NewTransport(
|
||||
client.Transport,
|
||||
otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string {
|
||||
return name
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
// StartSpan starts child span in a context.
|
||||
func StartSpan(ctx context.Context, opName string, opts ...SpanOpt) (context.Context, *Span) {
|
||||
config := StartConfig{}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ var (
|
|||
Package = "github.com/containerd/containerd"
|
||||
|
||||
// Version holds the complete version number. Filled in at linking time.
|
||||
Version = "1.7.12+unknown"
|
||||
Version = "1.7.6+unknown"
|
||||
|
||||
// Revision is filled with the VCS (e.g. git) revision being used to build
|
||||
// the program at linking time.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,191 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2014 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
runc
|
||||
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
||||
157
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
generated
vendored
Normal file
157
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
generated
vendored
Normal file
|
|
@ -0,0 +1,157 @@
|
|||
//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package user
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Unix-specific path to the passwd and group formatted files.
|
||||
const (
|
||||
unixPasswdPath = "/etc/passwd"
|
||||
unixGroupPath = "/etc/group"
|
||||
)
|
||||
|
||||
// LookupUser looks up a user by their username in /etc/passwd. If the user
|
||||
// cannot be found (or there is no /etc/passwd file on the filesystem), then
|
||||
// LookupUser returns an error.
|
||||
func LookupUser(username string) (User, error) {
|
||||
return lookupUserFunc(func(u User) bool {
|
||||
return u.Name == username
|
||||
})
|
||||
}
|
||||
|
||||
// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
|
||||
// be found (or there is no /etc/passwd file on the filesystem), then LookupId
|
||||
// returns an error.
|
||||
func LookupUid(uid int) (User, error) {
|
||||
return lookupUserFunc(func(u User) bool {
|
||||
return u.Uid == uid
|
||||
})
|
||||
}
|
||||
|
||||
func lookupUserFunc(filter func(u User) bool) (User, error) {
|
||||
// Get operating system-specific passwd reader-closer.
|
||||
passwd, err := GetPasswd()
|
||||
if err != nil {
|
||||
return User{}, err
|
||||
}
|
||||
defer passwd.Close()
|
||||
|
||||
// Get the users.
|
||||
users, err := ParsePasswdFilter(passwd, filter)
|
||||
if err != nil {
|
||||
return User{}, err
|
||||
}
|
||||
|
||||
// No user entries found.
|
||||
if len(users) == 0 {
|
||||
return User{}, ErrNoPasswdEntries
|
||||
}
|
||||
|
||||
// Assume the first entry is the "correct" one.
|
||||
return users[0], nil
|
||||
}
|
||||
|
||||
// LookupGroup looks up a group by its name in /etc/group. If the group cannot
|
||||
// be found (or there is no /etc/group file on the filesystem), then LookupGroup
|
||||
// returns an error.
|
||||
func LookupGroup(groupname string) (Group, error) {
|
||||
return lookupGroupFunc(func(g Group) bool {
|
||||
return g.Name == groupname
|
||||
})
|
||||
}
|
||||
|
||||
// LookupGid looks up a group by its group id in /etc/group. If the group cannot
|
||||
// be found (or there is no /etc/group file on the filesystem), then LookupGid
|
||||
// returns an error.
|
||||
func LookupGid(gid int) (Group, error) {
|
||||
return lookupGroupFunc(func(g Group) bool {
|
||||
return g.Gid == gid
|
||||
})
|
||||
}
|
||||
|
||||
func lookupGroupFunc(filter func(g Group) bool) (Group, error) {
|
||||
// Get operating system-specific group reader-closer.
|
||||
group, err := GetGroup()
|
||||
if err != nil {
|
||||
return Group{}, err
|
||||
}
|
||||
defer group.Close()
|
||||
|
||||
// Get the users.
|
||||
groups, err := ParseGroupFilter(group, filter)
|
||||
if err != nil {
|
||||
return Group{}, err
|
||||
}
|
||||
|
||||
// No user entries found.
|
||||
if len(groups) == 0 {
|
||||
return Group{}, ErrNoGroupEntries
|
||||
}
|
||||
|
||||
// Assume the first entry is the "correct" one.
|
||||
return groups[0], nil
|
||||
}
|
||||
|
||||
func GetPasswdPath() (string, error) {
|
||||
return unixPasswdPath, nil
|
||||
}
|
||||
|
||||
func GetPasswd() (io.ReadCloser, error) {
|
||||
return os.Open(unixPasswdPath)
|
||||
}
|
||||
|
||||
func GetGroupPath() (string, error) {
|
||||
return unixGroupPath, nil
|
||||
}
|
||||
|
||||
func GetGroup() (io.ReadCloser, error) {
|
||||
return os.Open(unixGroupPath)
|
||||
}
|
||||
|
||||
// CurrentUser looks up the current user by their user id in /etc/passwd. If the
|
||||
// user cannot be found (or there is no /etc/passwd file on the filesystem),
|
||||
// then CurrentUser returns an error.
|
||||
func CurrentUser() (User, error) {
|
||||
return LookupUid(unix.Getuid())
|
||||
}
|
||||
|
||||
// CurrentGroup looks up the current user's group by their primary group id's
|
||||
// entry in /etc/passwd. If the group cannot be found (or there is no
|
||||
// /etc/group file on the filesystem), then CurrentGroup returns an error.
|
||||
func CurrentGroup() (Group, error) {
|
||||
return LookupGid(unix.Getgid())
|
||||
}
|
||||
|
||||
func currentUserSubIDs(fileName string) ([]SubID, error) {
|
||||
u, err := CurrentUser()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filter := func(entry SubID) bool {
|
||||
return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid)
|
||||
}
|
||||
return ParseSubIDFileFilter(fileName, filter)
|
||||
}
|
||||
|
||||
func CurrentUserSubUIDs() ([]SubID, error) {
|
||||
return currentUserSubIDs("/etc/subuid")
|
||||
}
|
||||
|
||||
func CurrentUserSubGIDs() ([]SubID, error) {
|
||||
return currentUserSubIDs("/etc/subgid")
|
||||
}
|
||||
|
||||
func CurrentProcessUIDMap() ([]IDMap, error) {
|
||||
return ParseIDMapFile("/proc/self/uid_map")
|
||||
}
|
||||
|
||||
func CurrentProcessGIDMap() ([]IDMap, error) {
|
||||
return ParseIDMapFile("/proc/self/gid_map")
|
||||
}
|
||||
|
|
@ -0,0 +1,605 @@
|
|||
package user
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
minID = 0
|
||||
maxID = 1<<31 - 1 // for 32-bit systems compatibility
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoPasswdEntries is returned if no matching entries were found in /etc/group.
|
||||
ErrNoPasswdEntries = errors.New("no matching entries in passwd file")
|
||||
// ErrNoGroupEntries is returned if no matching entries were found in /etc/passwd.
|
||||
ErrNoGroupEntries = errors.New("no matching entries in group file")
|
||||
// ErrRange is returned if a UID or GID is outside of the valid range.
|
||||
ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minID, maxID)
|
||||
)
|
||||
|
||||
type User struct {
|
||||
Name string
|
||||
Pass string
|
||||
Uid int
|
||||
Gid int
|
||||
Gecos string
|
||||
Home string
|
||||
Shell string
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
Name string
|
||||
Pass string
|
||||
Gid int
|
||||
List []string
|
||||
}
|
||||
|
||||
// SubID represents an entry in /etc/sub{u,g}id
|
||||
type SubID struct {
|
||||
Name string
|
||||
SubID int64
|
||||
Count int64
|
||||
}
|
||||
|
||||
// IDMap represents an entry in /proc/PID/{u,g}id_map
|
||||
type IDMap struct {
|
||||
ID int64
|
||||
ParentID int64
|
||||
Count int64
|
||||
}
|
||||
|
||||
func parseLine(line []byte, v ...interface{}) {
|
||||
parseParts(bytes.Split(line, []byte(":")), v...)
|
||||
}
|
||||
|
||||
func parseParts(parts [][]byte, v ...interface{}) {
|
||||
if len(parts) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for i, p := range parts {
|
||||
// Ignore cases where we don't have enough fields to populate the arguments.
|
||||
// Some configuration files like to misbehave.
|
||||
if len(v) <= i {
|
||||
break
|
||||
}
|
||||
|
||||
// Use the type of the argument to figure out how to parse it, scanf() style.
|
||||
// This is legit.
|
||||
switch e := v[i].(type) {
|
||||
case *string:
|
||||
*e = string(p)
|
||||
case *int:
|
||||
// "numbers", with conversion errors ignored because of some misbehaving configuration files.
|
||||
*e, _ = strconv.Atoi(string(p))
|
||||
case *int64:
|
||||
*e, _ = strconv.ParseInt(string(p), 10, 64)
|
||||
case *[]string:
|
||||
// Comma-separated lists.
|
||||
if len(p) != 0 {
|
||||
*e = strings.Split(string(p), ",")
|
||||
} else {
|
||||
*e = []string{}
|
||||
}
|
||||
default:
|
||||
// Someone goof'd when writing code using this function. Scream so they can hear us.
|
||||
panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ParsePasswdFile(path string) ([]User, error) {
|
||||
passwd, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer passwd.Close()
|
||||
return ParsePasswd(passwd)
|
||||
}
|
||||
|
||||
func ParsePasswd(passwd io.Reader) ([]User, error) {
|
||||
return ParsePasswdFilter(passwd, nil)
|
||||
}
|
||||
|
||||
func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
|
||||
passwd, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer passwd.Close()
|
||||
return ParsePasswdFilter(passwd, filter)
|
||||
}
|
||||
|
||||
func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
|
||||
if r == nil {
|
||||
return nil, errors.New("nil source for passwd-formatted data")
|
||||
}
|
||||
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []User{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
line := bytes.TrimSpace(s.Bytes())
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// see: man 5 passwd
|
||||
// name:password:UID:GID:GECOS:directory:shell
|
||||
// Name:Pass:Uid:Gid:Gecos:Home:Shell
|
||||
// root:x:0:0:root:/root:/bin/bash
|
||||
// adm:x:3:4:adm:/var/adm:/bin/false
|
||||
p := User{}
|
||||
parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell)
|
||||
|
||||
if filter == nil || filter(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func ParseGroupFile(path string) ([]Group, error) {
|
||||
group, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer group.Close()
|
||||
return ParseGroup(group)
|
||||
}
|
||||
|
||||
func ParseGroup(group io.Reader) ([]Group, error) {
|
||||
return ParseGroupFilter(group, nil)
|
||||
}
|
||||
|
||||
func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
|
||||
group, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer group.Close()
|
||||
return ParseGroupFilter(group, filter)
|
||||
}
|
||||
|
||||
func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
|
||||
if r == nil {
|
||||
return nil, errors.New("nil source for group-formatted data")
|
||||
}
|
||||
rd := bufio.NewReader(r)
|
||||
out := []Group{}
|
||||
|
||||
// Read the file line-by-line.
|
||||
for {
|
||||
var (
|
||||
isPrefix bool
|
||||
wholeLine []byte
|
||||
err error
|
||||
)
|
||||
|
||||
// Read the next line. We do so in chunks (as much as reader's
|
||||
// buffer is able to keep), check if we read enough columns
|
||||
// already on each step and store final result in wholeLine.
|
||||
for {
|
||||
var line []byte
|
||||
line, isPrefix, err = rd.ReadLine()
|
||||
|
||||
if err != nil {
|
||||
// We should return no error if EOF is reached
|
||||
// without a match.
|
||||
if err == io.EOF { //nolint:errorlint // comparison with io.EOF is legit, https://github.com/polyfloyd/go-errorlint/pull/12
|
||||
err = nil
|
||||
}
|
||||
return out, err
|
||||
}
|
||||
|
||||
// Simple common case: line is short enough to fit in a
|
||||
// single reader's buffer.
|
||||
if !isPrefix && len(wholeLine) == 0 {
|
||||
wholeLine = line
|
||||
break
|
||||
}
|
||||
|
||||
wholeLine = append(wholeLine, line...)
|
||||
|
||||
// Check if we read the whole line already.
|
||||
if !isPrefix {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// There's no spec for /etc/passwd or /etc/group, but we try to follow
|
||||
// the same rules as the glibc parser, which allows comments and blank
|
||||
// space at the beginning of a line.
|
||||
wholeLine = bytes.TrimSpace(wholeLine)
|
||||
if len(wholeLine) == 0 || wholeLine[0] == '#' {
|
||||
continue
|
||||
}
|
||||
|
||||
// see: man 5 group
|
||||
// group_name:password:GID:user_list
|
||||
// Name:Pass:Gid:List
|
||||
// root:x:0:root
|
||||
// adm:x:4:root,adm,daemon
|
||||
p := Group{}
|
||||
parseLine(wholeLine, &p.Name, &p.Pass, &p.Gid, &p.List)
|
||||
|
||||
if filter == nil || filter(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ExecUser struct {
|
||||
Uid int
|
||||
Gid int
|
||||
Sgids []int
|
||||
Home string
|
||||
}
|
||||
|
||||
// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
|
||||
// given file paths and uses that data as the arguments to GetExecUser. If the
|
||||
// files cannot be opened for any reason, the error is ignored and a nil
|
||||
// io.Reader is passed instead.
|
||||
func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
|
||||
var passwd, group io.Reader
|
||||
|
||||
if passwdFile, err := os.Open(passwdPath); err == nil {
|
||||
passwd = passwdFile
|
||||
defer passwdFile.Close()
|
||||
}
|
||||
|
||||
if groupFile, err := os.Open(groupPath); err == nil {
|
||||
group = groupFile
|
||||
defer groupFile.Close()
|
||||
}
|
||||
|
||||
return GetExecUser(userSpec, defaults, passwd, group)
|
||||
}
|
||||
|
||||
// GetExecUser parses a user specification string (using the passwd and group
|
||||
// readers as sources for /etc/passwd and /etc/group data, respectively). In
|
||||
// the case of blank fields or missing data from the sources, the values in
|
||||
// defaults is used.
|
||||
//
|
||||
// GetExecUser will return an error if a user or group literal could not be
|
||||
// found in any entry in passwd and group respectively.
|
||||
//
|
||||
// Examples of valid user specifications are:
|
||||
// - ""
|
||||
// - "user"
|
||||
// - "uid"
|
||||
// - "user:group"
|
||||
// - "uid:gid
|
||||
// - "user:gid"
|
||||
// - "uid:group"
|
||||
//
|
||||
// It should be noted that if you specify a numeric user or group id, they will
|
||||
// not be evaluated as usernames (only the metadata will be filled). So attempting
|
||||
// to parse a user with user.Name = "1337" will produce the user with a UID of
|
||||
// 1337.
|
||||
func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
|
||||
if defaults == nil {
|
||||
defaults = new(ExecUser)
|
||||
}
|
||||
|
||||
// Copy over defaults.
|
||||
user := &ExecUser{
|
||||
Uid: defaults.Uid,
|
||||
Gid: defaults.Gid,
|
||||
Sgids: defaults.Sgids,
|
||||
Home: defaults.Home,
|
||||
}
|
||||
|
||||
// Sgids slice *cannot* be nil.
|
||||
if user.Sgids == nil {
|
||||
user.Sgids = []int{}
|
||||
}
|
||||
|
||||
// Allow for userArg to have either "user" syntax, or optionally "user:group" syntax
|
||||
var userArg, groupArg string
|
||||
parseLine([]byte(userSpec), &userArg, &groupArg)
|
||||
|
||||
// Convert userArg and groupArg to be numeric, so we don't have to execute
|
||||
// Atoi *twice* for each iteration over lines.
|
||||
uidArg, uidErr := strconv.Atoi(userArg)
|
||||
gidArg, gidErr := strconv.Atoi(groupArg)
|
||||
|
||||
// Find the matching user.
|
||||
users, err := ParsePasswdFilter(passwd, func(u User) bool {
|
||||
if userArg == "" {
|
||||
// Default to current state of the user.
|
||||
return u.Uid == user.Uid
|
||||
}
|
||||
|
||||
if uidErr == nil {
|
||||
// If the userArg is numeric, always treat it as a UID.
|
||||
return uidArg == u.Uid
|
||||
}
|
||||
|
||||
return u.Name == userArg
|
||||
})
|
||||
|
||||
// If we can't find the user, we have to bail.
|
||||
if err != nil && passwd != nil {
|
||||
if userArg == "" {
|
||||
userArg = strconv.Itoa(user.Uid)
|
||||
}
|
||||
return nil, fmt.Errorf("unable to find user %s: %w", userArg, err)
|
||||
}
|
||||
|
||||
var matchedUserName string
|
||||
if len(users) > 0 {
|
||||
// First match wins, even if there's more than one matching entry.
|
||||
matchedUserName = users[0].Name
|
||||
user.Uid = users[0].Uid
|
||||
user.Gid = users[0].Gid
|
||||
user.Home = users[0].Home
|
||||
} else if userArg != "" {
|
||||
// If we can't find a user with the given username, the only other valid
|
||||
// option is if it's a numeric username with no associated entry in passwd.
|
||||
|
||||
if uidErr != nil {
|
||||
// Not numeric.
|
||||
return nil, fmt.Errorf("unable to find user %s: %w", userArg, ErrNoPasswdEntries)
|
||||
}
|
||||
user.Uid = uidArg
|
||||
|
||||
// Must be inside valid uid range.
|
||||
if user.Uid < minID || user.Uid > maxID {
|
||||
return nil, ErrRange
|
||||
}
|
||||
|
||||
// Okay, so it's numeric. We can just roll with this.
|
||||
}
|
||||
|
||||
// On to the groups. If we matched a username, we need to do this because of
|
||||
// the supplementary group IDs.
|
||||
if groupArg != "" || matchedUserName != "" {
|
||||
groups, err := ParseGroupFilter(group, func(g Group) bool {
|
||||
// If the group argument isn't explicit, we'll just search for it.
|
||||
if groupArg == "" {
|
||||
// Check if user is a member of this group.
|
||||
for _, u := range g.List {
|
||||
if u == matchedUserName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if gidErr == nil {
|
||||
// If the groupArg is numeric, always treat it as a GID.
|
||||
return gidArg == g.Gid
|
||||
}
|
||||
|
||||
return g.Name == groupArg
|
||||
})
|
||||
if err != nil && group != nil {
|
||||
return nil, fmt.Errorf("unable to find groups for spec %v: %w", matchedUserName, err)
|
||||
}
|
||||
|
||||
// Only start modifying user.Gid if it is in explicit form.
|
||||
if groupArg != "" {
|
||||
if len(groups) > 0 {
|
||||
// First match wins, even if there's more than one matching entry.
|
||||
user.Gid = groups[0].Gid
|
||||
} else {
|
||||
// If we can't find a group with the given name, the only other valid
|
||||
// option is if it's a numeric group name with no associated entry in group.
|
||||
|
||||
if gidErr != nil {
|
||||
// Not numeric.
|
||||
return nil, fmt.Errorf("unable to find group %s: %w", groupArg, ErrNoGroupEntries)
|
||||
}
|
||||
user.Gid = gidArg
|
||||
|
||||
// Must be inside valid gid range.
|
||||
if user.Gid < minID || user.Gid > maxID {
|
||||
return nil, ErrRange
|
||||
}
|
||||
|
||||
// Okay, so it's numeric. We can just roll with this.
|
||||
}
|
||||
} else if len(groups) > 0 {
|
||||
// Supplementary group ids only make sense if in the implicit form.
|
||||
user.Sgids = make([]int, len(groups))
|
||||
for i, group := range groups {
|
||||
user.Sgids[i] = group.Gid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
// GetAdditionalGroups looks up a list of groups by name or group id
|
||||
// against the given /etc/group formatted data. If a group name cannot
|
||||
// be found, an error will be returned. If a group id cannot be found,
|
||||
// or the given group data is nil, the id will be returned as-is
|
||||
// provided it is in the legal range.
|
||||
func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
|
||||
groups := []Group{}
|
||||
if group != nil {
|
||||
var err error
|
||||
groups, err = ParseGroupFilter(group, func(g Group) bool {
|
||||
for _, ag := range additionalGroups {
|
||||
if g.Name == ag || strconv.Itoa(g.Gid) == ag {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to find additional groups %v: %w", additionalGroups, err)
|
||||
}
|
||||
}
|
||||
|
||||
gidMap := make(map[int]struct{})
|
||||
for _, ag := range additionalGroups {
|
||||
var found bool
|
||||
for _, g := range groups {
|
||||
// if we found a matched group either by name or gid, take the
|
||||
// first matched as correct
|
||||
if g.Name == ag || strconv.Itoa(g.Gid) == ag {
|
||||
if _, ok := gidMap[g.Gid]; !ok {
|
||||
gidMap[g.Gid] = struct{}{}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// we asked for a group but didn't find it. let's check to see
|
||||
// if we wanted a numeric group
|
||||
if !found {
|
||||
gid, err := strconv.ParseInt(ag, 10, 64)
|
||||
if err != nil {
|
||||
// Not a numeric ID either.
|
||||
return nil, fmt.Errorf("Unable to find group %s: %w", ag, ErrNoGroupEntries)
|
||||
}
|
||||
// Ensure gid is inside gid range.
|
||||
if gid < minID || gid > maxID {
|
||||
return nil, ErrRange
|
||||
}
|
||||
gidMap[int(gid)] = struct{}{}
|
||||
}
|
||||
}
|
||||
gids := []int{}
|
||||
for gid := range gidMap {
|
||||
gids = append(gids, gid)
|
||||
}
|
||||
return gids, nil
|
||||
}
|
||||
|
||||
// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups
|
||||
// that opens the groupPath given and gives it as an argument to
|
||||
// GetAdditionalGroups.
|
||||
func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
|
||||
var group io.Reader
|
||||
|
||||
if groupFile, err := os.Open(groupPath); err == nil {
|
||||
group = groupFile
|
||||
defer groupFile.Close()
|
||||
}
|
||||
return GetAdditionalGroups(additionalGroups, group)
|
||||
}
|
||||
|
||||
func ParseSubIDFile(path string) ([]SubID, error) {
|
||||
subid, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer subid.Close()
|
||||
return ParseSubID(subid)
|
||||
}
|
||||
|
||||
func ParseSubID(subid io.Reader) ([]SubID, error) {
|
||||
return ParseSubIDFilter(subid, nil)
|
||||
}
|
||||
|
||||
func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) {
|
||||
subid, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer subid.Close()
|
||||
return ParseSubIDFilter(subid, filter)
|
||||
}
|
||||
|
||||
func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
|
||||
if r == nil {
|
||||
return nil, errors.New("nil source for subid-formatted data")
|
||||
}
|
||||
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []SubID{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
line := bytes.TrimSpace(s.Bytes())
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// see: man 5 subuid
|
||||
p := SubID{}
|
||||
parseLine(line, &p.Name, &p.SubID, &p.Count)
|
||||
|
||||
if filter == nil || filter(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func ParseIDMapFile(path string) ([]IDMap, error) {
|
||||
r, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
return ParseIDMap(r)
|
||||
}
|
||||
|
||||
func ParseIDMap(r io.Reader) ([]IDMap, error) {
|
||||
return ParseIDMapFilter(r, nil)
|
||||
}
|
||||
|
||||
func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) {
|
||||
r, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
return ParseIDMapFilter(r, filter)
|
||||
}
|
||||
|
||||
func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
|
||||
if r == nil {
|
||||
return nil, errors.New("nil source for idmap-formatted data")
|
||||
}
|
||||
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []IDMap{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
line := bytes.TrimSpace(s.Bytes())
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// see: man 7 user_namespaces
|
||||
p := IDMap{}
|
||||
parseParts(bytes.Fields(line), &p.ID, &p.ParentID, &p.Count)
|
||||
|
||||
if filter == nil || filter(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
43
vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go
generated
vendored
Normal file
43
vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
//go:build gofuzz
|
||||
// +build gofuzz
|
||||
|
||||
package user
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func IsDivisbleBy(n int, divisibleby int) bool {
|
||||
return (n % divisibleby) == 0
|
||||
}
|
||||
|
||||
func FuzzUser(data []byte) int {
|
||||
if len(data) == 0 {
|
||||
return -1
|
||||
}
|
||||
if !IsDivisbleBy(len(data), 5) {
|
||||
return -1
|
||||
}
|
||||
|
||||
var divided [][]byte
|
||||
|
||||
chunkSize := len(data) / 5
|
||||
|
||||
for i := 0; i < len(data); i += chunkSize {
|
||||
end := i + chunkSize
|
||||
|
||||
divided = append(divided, data[i:end])
|
||||
}
|
||||
|
||||
_, _ = ParsePasswdFilter(strings.NewReader(string(divided[0])), nil)
|
||||
|
||||
var passwd, group io.Reader
|
||||
|
||||
group = strings.NewReader(string(divided[1]))
|
||||
_, _ = GetAdditionalGroups([]string{string(divided[2])}, group)
|
||||
|
||||
passwd = strings.NewReader(string(divided[3]))
|
||||
_, _ = GetExecUser(string(divided[4]), nil, passwd, group)
|
||||
return 1
|
||||
}
|
||||
|
|
@ -0,0 +1,404 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/semconv/internal/v2"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
)
|
||||
|
||||
// HTTPConv are the HTTP semantic convention attributes defined for a version
|
||||
// of the OpenTelemetry specification.
|
||||
type HTTPConv struct {
|
||||
NetConv *NetConv
|
||||
|
||||
EnduserIDKey attribute.Key
|
||||
HTTPClientIPKey attribute.Key
|
||||
HTTPFlavorKey attribute.Key
|
||||
HTTPMethodKey attribute.Key
|
||||
HTTPRequestContentLengthKey attribute.Key
|
||||
HTTPResponseContentLengthKey attribute.Key
|
||||
HTTPRouteKey attribute.Key
|
||||
HTTPSchemeHTTP attribute.KeyValue
|
||||
HTTPSchemeHTTPS attribute.KeyValue
|
||||
HTTPStatusCodeKey attribute.Key
|
||||
HTTPTargetKey attribute.Key
|
||||
HTTPURLKey attribute.Key
|
||||
HTTPUserAgentKey attribute.Key
|
||||
}
|
||||
|
||||
// ClientResponse returns attributes for an HTTP response received by a client
|
||||
// from a server. The following attributes are returned if the related values
|
||||
// are defined in resp: "http.status.code", "http.response_content_length".
|
||||
//
|
||||
// This does not add all OpenTelemetry required attributes for an HTTP event,
|
||||
// it assumes ClientRequest was used to create the span with a complete set of
|
||||
// attributes. If a complete set of attributes can be generated using the
|
||||
// request contained in resp. For example:
|
||||
//
|
||||
// append(ClientResponse(resp), ClientRequest(resp.Request)...)
|
||||
func (c *HTTPConv) ClientResponse(resp *http.Response) []attribute.KeyValue {
|
||||
var n int
|
||||
if resp.StatusCode > 0 {
|
||||
n++
|
||||
}
|
||||
if resp.ContentLength > 0 {
|
||||
n++
|
||||
}
|
||||
|
||||
attrs := make([]attribute.KeyValue, 0, n)
|
||||
if resp.StatusCode > 0 {
|
||||
attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode))
|
||||
}
|
||||
if resp.ContentLength > 0 {
|
||||
attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength)))
|
||||
}
|
||||
return attrs
|
||||
}
|
||||
|
||||
// ClientRequest returns attributes for an HTTP request made by a client. The
|
||||
// following attributes are always returned: "http.url", "http.flavor",
|
||||
// "http.method", "net.peer.name". The following attributes are returned if the
|
||||
// related values are defined in req: "net.peer.port", "http.user_agent",
|
||||
// "http.request_content_length", "enduser.id".
|
||||
func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue {
|
||||
n := 3 // URL, peer name, proto, and method.
|
||||
var h string
|
||||
if req.URL != nil {
|
||||
h = req.URL.Host
|
||||
}
|
||||
peer, p := firstHostPort(h, req.Header.Get("Host"))
|
||||
port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p)
|
||||
if port > 0 {
|
||||
n++
|
||||
}
|
||||
useragent := req.UserAgent()
|
||||
if useragent != "" {
|
||||
n++
|
||||
}
|
||||
if req.ContentLength > 0 {
|
||||
n++
|
||||
}
|
||||
userID, _, hasUserID := req.BasicAuth()
|
||||
if hasUserID {
|
||||
n++
|
||||
}
|
||||
attrs := make([]attribute.KeyValue, 0, n)
|
||||
|
||||
attrs = append(attrs, c.method(req.Method))
|
||||
attrs = append(attrs, c.proto(req.Proto))
|
||||
|
||||
var u string
|
||||
if req.URL != nil {
|
||||
// Remove any username/password info that may be in the URL.
|
||||
userinfo := req.URL.User
|
||||
req.URL.User = nil
|
||||
u = req.URL.String()
|
||||
// Restore any username/password info that was removed.
|
||||
req.URL.User = userinfo
|
||||
}
|
||||
attrs = append(attrs, c.HTTPURLKey.String(u))
|
||||
|
||||
attrs = append(attrs, c.NetConv.PeerName(peer))
|
||||
if port > 0 {
|
||||
attrs = append(attrs, c.NetConv.PeerPort(port))
|
||||
}
|
||||
|
||||
if useragent != "" {
|
||||
attrs = append(attrs, c.HTTPUserAgentKey.String(useragent))
|
||||
}
|
||||
|
||||
if l := req.ContentLength; l > 0 {
|
||||
attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l))
|
||||
}
|
||||
|
||||
if hasUserID {
|
||||
attrs = append(attrs, c.EnduserIDKey.String(userID))
|
||||
}
|
||||
|
||||
return attrs
|
||||
}
|
||||
|
||||
// ServerRequest returns attributes for an HTTP request received by a server.
|
||||
//
|
||||
// The server must be the primary server name if it is known. For example this
|
||||
// would be the ServerName directive
|
||||
// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
|
||||
// server, and the server_name directive
|
||||
// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
|
||||
// nginx server. More generically, the primary server name would be the host
|
||||
// header value that matches the default virtual host of an HTTP server. It
|
||||
// should include the host identifier and if a port is used to route to the
|
||||
// server that port identifier should be included as an appropriate port
|
||||
// suffix.
|
||||
//
|
||||
// If the primary server name is not known, server should be an empty string.
|
||||
// The req Host will be used to determine the server instead.
|
||||
//
|
||||
// The following attributes are always returned: "http.method", "http.scheme",
|
||||
// "http.flavor", "http.target", "net.host.name". The following attributes are
|
||||
// returned if they related values are defined in req: "net.host.port",
|
||||
// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id",
|
||||
// "http.client_ip".
|
||||
func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue {
|
||||
// TODO: This currently does not add the specification required
|
||||
// `http.target` attribute. It has too high of a cardinality to safely be
|
||||
// added. An alternate should be added, or this comment removed, when it is
|
||||
// addressed by the specification. If it is ultimately decided to continue
|
||||
// not including the attribute, the HTTPTargetKey field of the HTTPConv
|
||||
// should be removed as well.
|
||||
|
||||
n := 4 // Method, scheme, proto, and host name.
|
||||
var host string
|
||||
var p int
|
||||
if server == "" {
|
||||
host, p = splitHostPort(req.Host)
|
||||
} else {
|
||||
// Prioritize the primary server name.
|
||||
host, p = splitHostPort(server)
|
||||
if p < 0 {
|
||||
_, p = splitHostPort(req.Host)
|
||||
}
|
||||
}
|
||||
hostPort := requiredHTTPPort(req.TLS != nil, p)
|
||||
if hostPort > 0 {
|
||||
n++
|
||||
}
|
||||
peer, peerPort := splitHostPort(req.RemoteAddr)
|
||||
if peer != "" {
|
||||
n++
|
||||
if peerPort > 0 {
|
||||
n++
|
||||
}
|
||||
}
|
||||
useragent := req.UserAgent()
|
||||
if useragent != "" {
|
||||
n++
|
||||
}
|
||||
userID, _, hasUserID := req.BasicAuth()
|
||||
if hasUserID {
|
||||
n++
|
||||
}
|
||||
clientIP := serverClientIP(req.Header.Get("X-Forwarded-For"))
|
||||
if clientIP != "" {
|
||||
n++
|
||||
}
|
||||
attrs := make([]attribute.KeyValue, 0, n)
|
||||
|
||||
attrs = append(attrs, c.method(req.Method))
|
||||
attrs = append(attrs, c.scheme(req.TLS != nil))
|
||||
attrs = append(attrs, c.proto(req.Proto))
|
||||
attrs = append(attrs, c.NetConv.HostName(host))
|
||||
|
||||
if hostPort > 0 {
|
||||
attrs = append(attrs, c.NetConv.HostPort(hostPort))
|
||||
}
|
||||
|
||||
if peer != "" {
|
||||
// The Go HTTP server sets RemoteAddr to "IP:port", this will not be a
|
||||
// file-path that would be interpreted with a sock family.
|
||||
attrs = append(attrs, c.NetConv.SockPeerAddr(peer))
|
||||
if peerPort > 0 {
|
||||
attrs = append(attrs, c.NetConv.SockPeerPort(peerPort))
|
||||
}
|
||||
}
|
||||
|
||||
if useragent != "" {
|
||||
attrs = append(attrs, c.HTTPUserAgentKey.String(useragent))
|
||||
}
|
||||
|
||||
if hasUserID {
|
||||
attrs = append(attrs, c.EnduserIDKey.String(userID))
|
||||
}
|
||||
|
||||
if clientIP != "" {
|
||||
attrs = append(attrs, c.HTTPClientIPKey.String(clientIP))
|
||||
}
|
||||
|
||||
return attrs
|
||||
}
|
||||
|
||||
func (c *HTTPConv) method(method string) attribute.KeyValue {
|
||||
if method == "" {
|
||||
return c.HTTPMethodKey.String(http.MethodGet)
|
||||
}
|
||||
return c.HTTPMethodKey.String(method)
|
||||
}
|
||||
|
||||
func (c *HTTPConv) scheme(https bool) attribute.KeyValue { // nolint:revive
|
||||
if https {
|
||||
return c.HTTPSchemeHTTPS
|
||||
}
|
||||
return c.HTTPSchemeHTTP
|
||||
}
|
||||
|
||||
func (c *HTTPConv) proto(proto string) attribute.KeyValue {
|
||||
switch proto {
|
||||
case "HTTP/1.0":
|
||||
return c.HTTPFlavorKey.String("1.0")
|
||||
case "HTTP/1.1":
|
||||
return c.HTTPFlavorKey.String("1.1")
|
||||
case "HTTP/2":
|
||||
return c.HTTPFlavorKey.String("2.0")
|
||||
case "HTTP/3":
|
||||
return c.HTTPFlavorKey.String("3.0")
|
||||
default:
|
||||
return c.HTTPFlavorKey.String(proto)
|
||||
}
|
||||
}
|
||||
|
||||
func serverClientIP(xForwardedFor string) string {
|
||||
if idx := strings.Index(xForwardedFor, ","); idx >= 0 {
|
||||
xForwardedFor = xForwardedFor[:idx]
|
||||
}
|
||||
return xForwardedFor
|
||||
}
|
||||
|
||||
func requiredHTTPPort(https bool, port int) int { // nolint:revive
|
||||
if https {
|
||||
if port > 0 && port != 443 {
|
||||
return port
|
||||
}
|
||||
} else {
|
||||
if port > 0 && port != 80 {
|
||||
return port
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Return the request host and port from the first non-empty source.
|
||||
func firstHostPort(source ...string) (host string, port int) {
|
||||
for _, hostport := range source {
|
||||
host, port = splitHostPort(hostport)
|
||||
if host != "" || port > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RequestHeader returns the contents of h as OpenTelemetry attributes.
|
||||
func (c *HTTPConv) RequestHeader(h http.Header) []attribute.KeyValue {
|
||||
return c.header("http.request.header", h)
|
||||
}
|
||||
|
||||
// ResponseHeader returns the contents of h as OpenTelemetry attributes.
|
||||
func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue {
|
||||
return c.header("http.response.header", h)
|
||||
}
|
||||
|
||||
func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue {
|
||||
key := func(k string) attribute.Key {
|
||||
k = strings.ToLower(k)
|
||||
k = strings.ReplaceAll(k, "-", "_")
|
||||
k = fmt.Sprintf("%s.%s", prefix, k)
|
||||
return attribute.Key(k)
|
||||
}
|
||||
|
||||
attrs := make([]attribute.KeyValue, 0, len(h))
|
||||
for k, v := range h {
|
||||
attrs = append(attrs, key(k).StringSlice(v))
|
||||
}
|
||||
return attrs
|
||||
}
|
||||
|
||||
// ClientStatus returns a span status code and message for an HTTP status code
|
||||
// value received by a client.
|
||||
func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) {
|
||||
stat, valid := validateHTTPStatusCode(code)
|
||||
if !valid {
|
||||
return stat, fmt.Sprintf("Invalid HTTP status code %d", code)
|
||||
}
|
||||
return stat, ""
|
||||
}
|
||||
|
||||
// ServerStatus returns a span status code and message for an HTTP status code
|
||||
// value returned by a server. Status codes in the 400-499 range are not
|
||||
// returned as errors.
|
||||
func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) {
|
||||
stat, valid := validateHTTPStatusCode(code)
|
||||
if !valid {
|
||||
return stat, fmt.Sprintf("Invalid HTTP status code %d", code)
|
||||
}
|
||||
|
||||
if code/100 == 4 {
|
||||
return codes.Unset, ""
|
||||
}
|
||||
return stat, ""
|
||||
}
|
||||
|
||||
type codeRange struct {
|
||||
fromInclusive int
|
||||
toInclusive int
|
||||
}
|
||||
|
||||
func (r codeRange) contains(code int) bool {
|
||||
return r.fromInclusive <= code && code <= r.toInclusive
|
||||
}
|
||||
|
||||
var validRangesPerCategory = map[int][]codeRange{
|
||||
1: {
|
||||
{http.StatusContinue, http.StatusEarlyHints},
|
||||
},
|
||||
2: {
|
||||
{http.StatusOK, http.StatusAlreadyReported},
|
||||
{http.StatusIMUsed, http.StatusIMUsed},
|
||||
},
|
||||
3: {
|
||||
{http.StatusMultipleChoices, http.StatusUseProxy},
|
||||
{http.StatusTemporaryRedirect, http.StatusPermanentRedirect},
|
||||
},
|
||||
4: {
|
||||
{http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful…
|
||||
{http.StatusMisdirectedRequest, http.StatusUpgradeRequired},
|
||||
{http.StatusPreconditionRequired, http.StatusTooManyRequests},
|
||||
{http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge},
|
||||
{http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons},
|
||||
},
|
||||
5: {
|
||||
{http.StatusInternalServerError, http.StatusLoopDetected},
|
||||
{http.StatusNotExtended, http.StatusNetworkAuthenticationRequired},
|
||||
},
|
||||
}
|
||||
|
||||
// validateHTTPStatusCode validates the HTTP status code and returns
|
||||
// corresponding span status code. If the `code` is not a valid HTTP status
|
||||
// code, returns span status Error and false.
|
||||
func validateHTTPStatusCode(code int) (codes.Code, bool) {
|
||||
category := code / 100
|
||||
ranges, ok := validRangesPerCategory[category]
|
||||
if !ok {
|
||||
return codes.Error, false
|
||||
}
|
||||
ok = false
|
||||
for _, crange := range ranges {
|
||||
ok = crange.contains(code)
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
return codes.Error, false
|
||||
}
|
||||
if category > 0 && category < 4 {
|
||||
return codes.Unset, true
|
||||
}
|
||||
return codes.Error, true
|
||||
}
|
||||
|
|
@ -0,0 +1,324 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal // import "go.opentelemetry.io/otel/semconv/internal/v2"
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
// NetConv are the network semantic convention attributes defined for a version
|
||||
// of the OpenTelemetry specification.
|
||||
type NetConv struct {
|
||||
NetHostNameKey attribute.Key
|
||||
NetHostPortKey attribute.Key
|
||||
NetPeerNameKey attribute.Key
|
||||
NetPeerPortKey attribute.Key
|
||||
NetSockFamilyKey attribute.Key
|
||||
NetSockPeerAddrKey attribute.Key
|
||||
NetSockPeerPortKey attribute.Key
|
||||
NetSockHostAddrKey attribute.Key
|
||||
NetSockHostPortKey attribute.Key
|
||||
NetTransportOther attribute.KeyValue
|
||||
NetTransportTCP attribute.KeyValue
|
||||
NetTransportUDP attribute.KeyValue
|
||||
NetTransportInProc attribute.KeyValue
|
||||
}
|
||||
|
||||
func (c *NetConv) Transport(network string) attribute.KeyValue {
|
||||
switch network {
|
||||
case "tcp", "tcp4", "tcp6":
|
||||
return c.NetTransportTCP
|
||||
case "udp", "udp4", "udp6":
|
||||
return c.NetTransportUDP
|
||||
case "unix", "unixgram", "unixpacket":
|
||||
return c.NetTransportInProc
|
||||
default:
|
||||
// "ip:*", "ip4:*", and "ip6:*" all are considered other.
|
||||
return c.NetTransportOther
|
||||
}
|
||||
}
|
||||
|
||||
// Host returns attributes for a network host address.
|
||||
func (c *NetConv) Host(address string) []attribute.KeyValue {
|
||||
h, p := splitHostPort(address)
|
||||
var n int
|
||||
if h != "" {
|
||||
n++
|
||||
if p > 0 {
|
||||
n++
|
||||
}
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
attrs := make([]attribute.KeyValue, 0, n)
|
||||
attrs = append(attrs, c.HostName(h))
|
||||
if p > 0 {
|
||||
attrs = append(attrs, c.HostPort(int(p)))
|
||||
}
|
||||
return attrs
|
||||
}
|
||||
|
||||
// Server returns attributes for a network listener listening at address. See
|
||||
// net.Listen for information about acceptable address values, address should
|
||||
// be the same as the one used to create ln. If ln is nil, only network host
|
||||
// attributes will be returned that describe address. Otherwise, the socket
|
||||
// level information about ln will also be included.
|
||||
func (c *NetConv) Server(address string, ln net.Listener) []attribute.KeyValue {
|
||||
if ln == nil {
|
||||
return c.Host(address)
|
||||
}
|
||||
|
||||
lAddr := ln.Addr()
|
||||
if lAddr == nil {
|
||||
return c.Host(address)
|
||||
}
|
||||
|
||||
hostName, hostPort := splitHostPort(address)
|
||||
sockHostAddr, sockHostPort := splitHostPort(lAddr.String())
|
||||
network := lAddr.Network()
|
||||
sockFamily := family(network, sockHostAddr)
|
||||
|
||||
n := nonZeroStr(hostName, network, sockHostAddr, sockFamily)
|
||||
n += positiveInt(hostPort, sockHostPort)
|
||||
attr := make([]attribute.KeyValue, 0, n)
|
||||
if hostName != "" {
|
||||
attr = append(attr, c.HostName(hostName))
|
||||
if hostPort > 0 {
|
||||
// Only if net.host.name is set should net.host.port be.
|
||||
attr = append(attr, c.HostPort(hostPort))
|
||||
}
|
||||
}
|
||||
if network != "" {
|
||||
attr = append(attr, c.Transport(network))
|
||||
}
|
||||
if sockFamily != "" {
|
||||
attr = append(attr, c.NetSockFamilyKey.String(sockFamily))
|
||||
}
|
||||
if sockHostAddr != "" {
|
||||
attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr))
|
||||
if sockHostPort > 0 {
|
||||
// Only if net.sock.host.addr is set should net.sock.host.port be.
|
||||
attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort))
|
||||
}
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
func (c *NetConv) HostName(name string) attribute.KeyValue {
|
||||
return c.NetHostNameKey.String(name)
|
||||
}
|
||||
|
||||
func (c *NetConv) HostPort(port int) attribute.KeyValue {
|
||||
return c.NetHostPortKey.Int(port)
|
||||
}
|
||||
|
||||
// Client returns attributes for a client network connection to address. See
|
||||
// net.Dial for information about acceptable address values, address should be
|
||||
// the same as the one used to create conn. If conn is nil, only network peer
|
||||
// attributes will be returned that describe address. Otherwise, the socket
|
||||
// level information about conn will also be included.
|
||||
func (c *NetConv) Client(address string, conn net.Conn) []attribute.KeyValue {
|
||||
if conn == nil {
|
||||
return c.Peer(address)
|
||||
}
|
||||
|
||||
lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr()
|
||||
|
||||
var network string
|
||||
switch {
|
||||
case lAddr != nil:
|
||||
network = lAddr.Network()
|
||||
case rAddr != nil:
|
||||
network = rAddr.Network()
|
||||
default:
|
||||
return c.Peer(address)
|
||||
}
|
||||
|
||||
peerName, peerPort := splitHostPort(address)
|
||||
var (
|
||||
sockFamily string
|
||||
sockPeerAddr string
|
||||
sockPeerPort int
|
||||
sockHostAddr string
|
||||
sockHostPort int
|
||||
)
|
||||
|
||||
if lAddr != nil {
|
||||
sockHostAddr, sockHostPort = splitHostPort(lAddr.String())
|
||||
}
|
||||
|
||||
if rAddr != nil {
|
||||
sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String())
|
||||
}
|
||||
|
||||
switch {
|
||||
case sockHostAddr != "":
|
||||
sockFamily = family(network, sockHostAddr)
|
||||
case sockPeerAddr != "":
|
||||
sockFamily = family(network, sockPeerAddr)
|
||||
}
|
||||
|
||||
n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily)
|
||||
n += positiveInt(peerPort, sockPeerPort, sockHostPort)
|
||||
attr := make([]attribute.KeyValue, 0, n)
|
||||
if peerName != "" {
|
||||
attr = append(attr, c.PeerName(peerName))
|
||||
if peerPort > 0 {
|
||||
// Only if net.peer.name is set should net.peer.port be.
|
||||
attr = append(attr, c.PeerPort(peerPort))
|
||||
}
|
||||
}
|
||||
if network != "" {
|
||||
attr = append(attr, c.Transport(network))
|
||||
}
|
||||
if sockFamily != "" {
|
||||
attr = append(attr, c.NetSockFamilyKey.String(sockFamily))
|
||||
}
|
||||
if sockPeerAddr != "" {
|
||||
attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr))
|
||||
if sockPeerPort > 0 {
|
||||
// Only if net.sock.peer.addr is set should net.sock.peer.port be.
|
||||
attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort))
|
||||
}
|
||||
}
|
||||
if sockHostAddr != "" {
|
||||
attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr))
|
||||
if sockHostPort > 0 {
|
||||
// Only if net.sock.host.addr is set should net.sock.host.port be.
|
||||
attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort))
|
||||
}
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
func family(network, address string) string {
|
||||
switch network {
|
||||
case "unix", "unixgram", "unixpacket":
|
||||
return "unix"
|
||||
default:
|
||||
if ip := net.ParseIP(address); ip != nil {
|
||||
if ip.To4() == nil {
|
||||
return "inet6"
|
||||
}
|
||||
return "inet"
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func nonZeroStr(strs ...string) int {
|
||||
var n int
|
||||
for _, str := range strs {
|
||||
if str != "" {
|
||||
n++
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func positiveInt(ints ...int) int {
|
||||
var n int
|
||||
for _, i := range ints {
|
||||
if i > 0 {
|
||||
n++
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Peer returns attributes for a network peer address.
|
||||
func (c *NetConv) Peer(address string) []attribute.KeyValue {
|
||||
h, p := splitHostPort(address)
|
||||
var n int
|
||||
if h != "" {
|
||||
n++
|
||||
if p > 0 {
|
||||
n++
|
||||
}
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
attrs := make([]attribute.KeyValue, 0, n)
|
||||
attrs = append(attrs, c.PeerName(h))
|
||||
if p > 0 {
|
||||
attrs = append(attrs, c.PeerPort(int(p)))
|
||||
}
|
||||
return attrs
|
||||
}
|
||||
|
||||
func (c *NetConv) PeerName(name string) attribute.KeyValue {
|
||||
return c.NetPeerNameKey.String(name)
|
||||
}
|
||||
|
||||
func (c *NetConv) PeerPort(port int) attribute.KeyValue {
|
||||
return c.NetPeerPortKey.Int(port)
|
||||
}
|
||||
|
||||
func (c *NetConv) SockPeerAddr(addr string) attribute.KeyValue {
|
||||
return c.NetSockPeerAddrKey.String(addr)
|
||||
}
|
||||
|
||||
func (c *NetConv) SockPeerPort(port int) attribute.KeyValue {
|
||||
return c.NetSockPeerPortKey.Int(port)
|
||||
}
|
||||
|
||||
// splitHostPort splits a network address hostport of the form "host",
|
||||
// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port",
|
||||
// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and
|
||||
// port.
|
||||
//
|
||||
// An empty host is returned if it is not provided or unparsable. A negative
|
||||
// port is returned if it is not provided or unparsable.
|
||||
func splitHostPort(hostport string) (host string, port int) {
|
||||
port = -1
|
||||
|
||||
if strings.HasPrefix(hostport, "[") {
|
||||
addrEnd := strings.LastIndex(hostport, "]")
|
||||
if addrEnd < 0 {
|
||||
// Invalid hostport.
|
||||
return
|
||||
}
|
||||
if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 {
|
||||
host = hostport[1:addrEnd]
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if i := strings.LastIndex(hostport, ":"); i < 0 {
|
||||
host = hostport
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
host, pStr, err := net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
p, err := strconv.ParseUint(pStr, 10, 16)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return host, int(p)
|
||||
}
|
||||
|
|
@ -0,0 +1,152 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package httpconv provides OpenTelemetry HTTP semantic conventions for
|
||||
// tracing telemetry.
|
||||
package httpconv // import "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/semconv/internal/v2"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||
)
|
||||
|
||||
var (
|
||||
nc = &internal.NetConv{
|
||||
NetHostNameKey: semconv.NetHostNameKey,
|
||||
NetHostPortKey: semconv.NetHostPortKey,
|
||||
NetPeerNameKey: semconv.NetPeerNameKey,
|
||||
NetPeerPortKey: semconv.NetPeerPortKey,
|
||||
NetSockPeerAddrKey: semconv.NetSockPeerAddrKey,
|
||||
NetSockPeerPortKey: semconv.NetSockPeerPortKey,
|
||||
NetTransportOther: semconv.NetTransportOther,
|
||||
NetTransportTCP: semconv.NetTransportTCP,
|
||||
NetTransportUDP: semconv.NetTransportUDP,
|
||||
NetTransportInProc: semconv.NetTransportInProc,
|
||||
}
|
||||
|
||||
hc = &internal.HTTPConv{
|
||||
NetConv: nc,
|
||||
|
||||
EnduserIDKey: semconv.EnduserIDKey,
|
||||
HTTPClientIPKey: semconv.HTTPClientIPKey,
|
||||
HTTPFlavorKey: semconv.HTTPFlavorKey,
|
||||
HTTPMethodKey: semconv.HTTPMethodKey,
|
||||
HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey,
|
||||
HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey,
|
||||
HTTPRouteKey: semconv.HTTPRouteKey,
|
||||
HTTPSchemeHTTP: semconv.HTTPSchemeHTTP,
|
||||
HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS,
|
||||
HTTPStatusCodeKey: semconv.HTTPStatusCodeKey,
|
||||
HTTPTargetKey: semconv.HTTPTargetKey,
|
||||
HTTPURLKey: semconv.HTTPURLKey,
|
||||
HTTPUserAgentKey: semconv.HTTPUserAgentKey,
|
||||
}
|
||||
)
|
||||
|
||||
// ClientResponse returns trace attributes for an HTTP response received by a
|
||||
// client from a server. It will return the following attributes if the related
|
||||
// values are defined in resp: "http.status.code",
|
||||
// "http.response_content_length".
|
||||
//
|
||||
// This does not add all OpenTelemetry required attributes for an HTTP event,
|
||||
// it assumes ClientRequest was used to create the span with a complete set of
|
||||
// attributes. If a complete set of attributes can be generated using the
|
||||
// request contained in resp. For example:
|
||||
//
|
||||
// append(ClientResponse(resp), ClientRequest(resp.Request)...)
|
||||
func ClientResponse(resp *http.Response) []attribute.KeyValue {
|
||||
return hc.ClientResponse(resp)
|
||||
}
|
||||
|
||||
// ClientRequest returns trace attributes for an HTTP request made by a client.
|
||||
// The following attributes are always returned: "http.url", "http.flavor",
|
||||
// "http.method", "net.peer.name". The following attributes are returned if the
|
||||
// related values are defined in req: "net.peer.port", "http.user_agent",
|
||||
// "http.request_content_length", "enduser.id".
|
||||
func ClientRequest(req *http.Request) []attribute.KeyValue {
|
||||
return hc.ClientRequest(req)
|
||||
}
|
||||
|
||||
// ClientStatus returns a span status code and message for an HTTP status code
|
||||
// value received by a client.
|
||||
func ClientStatus(code int) (codes.Code, string) {
|
||||
return hc.ClientStatus(code)
|
||||
}
|
||||
|
||||
// ServerRequest returns trace attributes for an HTTP request received by a
|
||||
// server.
|
||||
//
|
||||
// The server must be the primary server name if it is known. For example this
|
||||
// would be the ServerName directive
|
||||
// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache
|
||||
// server, and the server_name directive
|
||||
// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an
|
||||
// nginx server. More generically, the primary server name would be the host
|
||||
// header value that matches the default virtual host of an HTTP server. It
|
||||
// should include the host identifier and if a port is used to route to the
|
||||
// server that port identifier should be included as an appropriate port
|
||||
// suffix.
|
||||
//
|
||||
// If the primary server name is not known, server should be an empty string.
|
||||
// The req Host will be used to determine the server instead.
|
||||
//
|
||||
// The following attributes are always returned: "http.method", "http.scheme",
|
||||
// "http.flavor", "http.target", "net.host.name". The following attributes are
|
||||
// returned if they related values are defined in req: "net.host.port",
|
||||
// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id",
|
||||
// "http.client_ip".
|
||||
func ServerRequest(server string, req *http.Request) []attribute.KeyValue {
|
||||
return hc.ServerRequest(server, req)
|
||||
}
|
||||
|
||||
// ServerStatus returns a span status code and message for an HTTP status code
|
||||
// value returned by a server. Status codes in the 400-499 range are not
|
||||
// returned as errors.
|
||||
func ServerStatus(code int) (codes.Code, string) {
|
||||
return hc.ServerStatus(code)
|
||||
}
|
||||
|
||||
// RequestHeader returns the contents of h as attributes.
|
||||
//
|
||||
// Instrumentation should require an explicit configuration of which headers to
|
||||
// captured and then prune what they pass here. Including all headers can be a
|
||||
// security risk - explicit configuration helps avoid leaking sensitive
|
||||
// information.
|
||||
//
|
||||
// The User-Agent header is already captured in the http.user_agent attribute
|
||||
// from ClientRequest and ServerRequest. Instrumentation may provide an option
|
||||
// to capture that header here even though it is not recommended. Otherwise,
|
||||
// instrumentation should filter that out of what is passed.
|
||||
func RequestHeader(h http.Header) []attribute.KeyValue {
|
||||
return hc.RequestHeader(h)
|
||||
}
|
||||
|
||||
// ResponseHeader returns the contents of h as attributes.
|
||||
//
|
||||
// Instrumentation should require an explicit configuration of which headers to
|
||||
// captured and then prune what they pass here. Including all headers can be a
|
||||
// security risk - explicit configuration helps avoid leaking sensitive
|
||||
// information.
|
||||
//
|
||||
// The User-Agent header is already captured in the http.user_agent attribute
|
||||
// from ClientRequest and ServerRequest. Instrumentation may provide an option
|
||||
// to capture that header here even though it is not recommended. Otherwise,
|
||||
// instrumentation should filter that out of what is passed.
|
||||
func ResponseHeader(h http.Header) []attribute.KeyValue {
|
||||
return hc.ResponseHeader(h)
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,20 +0,0 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package semconv implements OpenTelemetry semantic conventions.
|
||||
//
|
||||
// OpenTelemetry semantic conventions are agreed standardized naming
|
||||
// patterns for OpenTelemetry things. This package represents the v1.21.0
|
||||
// version of the OpenTelemetry semantic conventions.
|
||||
package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
|
||||
|
|
@ -1,199 +0,0 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated from semantic convention specification. DO NOT EDIT.
|
||||
|
||||
package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
|
||||
|
||||
import "go.opentelemetry.io/otel/attribute"
|
||||
|
||||
// This semantic convention defines the attributes used to represent a feature
|
||||
// flag evaluation as an event.
|
||||
const (
|
||||
// FeatureFlagKeyKey is the attribute Key conforming to the
|
||||
// "feature_flag.key" semantic conventions. It represents the unique
|
||||
// identifier of the feature flag.
|
||||
//
|
||||
// Type: string
|
||||
// RequirementLevel: Required
|
||||
// Stability: stable
|
||||
// Examples: 'logo-color'
|
||||
FeatureFlagKeyKey = attribute.Key("feature_flag.key")
|
||||
|
||||
// FeatureFlagProviderNameKey is the attribute Key conforming to the
|
||||
// "feature_flag.provider_name" semantic conventions. It represents the
|
||||
// name of the service provider that performs the flag evaluation.
|
||||
//
|
||||
// Type: string
|
||||
// RequirementLevel: Recommended
|
||||
// Stability: stable
|
||||
// Examples: 'Flag Manager'
|
||||
FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
|
||||
|
||||
// FeatureFlagVariantKey is the attribute Key conforming to the
|
||||
// "feature_flag.variant" semantic conventions. It represents the sHOULD be
|
||||
// a semantic identifier for a value. If one is unavailable, a stringified
|
||||
// version of the value can be used.
|
||||
//
|
||||
// Type: string
|
||||
// RequirementLevel: Recommended
|
||||
// Stability: stable
|
||||
// Examples: 'red', 'true', 'on'
|
||||
// Note: A semantic identifier, commonly referred to as a variant, provides
|
||||
// a means
|
||||
// for referring to a value without including the value itself. This can
|
||||
// provide additional context for understanding the meaning behind a value.
|
||||
// For example, the variant `red` maybe be used for the value `#c05543`.
|
||||
//
|
||||
// A stringified version of the value can be used in situations where a
|
||||
// semantic identifier is unavailable. String representation of the value
|
||||
// should be determined by the implementer.
|
||||
FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
|
||||
)
|
||||
|
||||
// FeatureFlagKey returns an attribute KeyValue conforming to the
|
||||
// "feature_flag.key" semantic conventions. It represents the unique identifier
|
||||
// of the feature flag.
|
||||
func FeatureFlagKey(val string) attribute.KeyValue {
|
||||
return FeatureFlagKeyKey.String(val)
|
||||
}
|
||||
|
||||
// FeatureFlagProviderName returns an attribute KeyValue conforming to the
|
||||
// "feature_flag.provider_name" semantic conventions. It represents the name of
|
||||
// the service provider that performs the flag evaluation.
|
||||
func FeatureFlagProviderName(val string) attribute.KeyValue {
|
||||
return FeatureFlagProviderNameKey.String(val)
|
||||
}
|
||||
|
||||
// FeatureFlagVariant returns an attribute KeyValue conforming to the
|
||||
// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
|
||||
// semantic identifier for a value. If one is unavailable, a stringified
|
||||
// version of the value can be used.
|
||||
func FeatureFlagVariant(val string) attribute.KeyValue {
|
||||
return FeatureFlagVariantKey.String(val)
|
||||
}
|
||||
|
||||
// RPC received/sent message.
|
||||
const (
|
||||
// MessageTypeKey is the attribute Key conforming to the "message.type"
|
||||
// semantic conventions. It represents the whether this is a received or
|
||||
// sent message.
|
||||
//
|
||||
// Type: Enum
|
||||
// RequirementLevel: Optional
|
||||
// Stability: stable
|
||||
MessageTypeKey = attribute.Key("message.type")
|
||||
|
||||
// MessageIDKey is the attribute Key conforming to the "message.id"
|
||||
// semantic conventions. It represents the mUST be calculated as two
|
||||
// different counters starting from `1` one for sent messages and one for
|
||||
// received message.
|
||||
//
|
||||
// Type: int
|
||||
// RequirementLevel: Optional
|
||||
// Stability: stable
|
||||
// Note: This way we guarantee that the values will be consistent between
|
||||
// different implementations.
|
||||
MessageIDKey = attribute.Key("message.id")
|
||||
|
||||
// MessageCompressedSizeKey is the attribute Key conforming to the
|
||||
// "message.compressed_size" semantic conventions. It represents the
|
||||
// compressed size of the message in bytes.
|
||||
//
|
||||
// Type: int
|
||||
// RequirementLevel: Optional
|
||||
// Stability: stable
|
||||
MessageCompressedSizeKey = attribute.Key("message.compressed_size")
|
||||
|
||||
// MessageUncompressedSizeKey is the attribute Key conforming to the
|
||||
// "message.uncompressed_size" semantic conventions. It represents the
|
||||
// uncompressed size of the message in bytes.
|
||||
//
|
||||
// Type: int
|
||||
// RequirementLevel: Optional
|
||||
// Stability: stable
|
||||
MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
|
||||
)
|
||||
|
||||
var (
|
||||
// sent
|
||||
MessageTypeSent = MessageTypeKey.String("SENT")
|
||||
// received
|
||||
MessageTypeReceived = MessageTypeKey.String("RECEIVED")
|
||||
)
|
||||
|
||||
// MessageID returns an attribute KeyValue conforming to the "message.id"
|
||||
// semantic conventions. It represents the mUST be calculated as two different
|
||||
// counters starting from `1` one for sent messages and one for received
|
||||
// message.
|
||||
func MessageID(val int) attribute.KeyValue {
|
||||
return MessageIDKey.Int(val)
|
||||
}
|
||||
|
||||
// MessageCompressedSize returns an attribute KeyValue conforming to the
|
||||
// "message.compressed_size" semantic conventions. It represents the compressed
|
||||
// size of the message in bytes.
|
||||
func MessageCompressedSize(val int) attribute.KeyValue {
|
||||
return MessageCompressedSizeKey.Int(val)
|
||||
}
|
||||
|
||||
// MessageUncompressedSize returns an attribute KeyValue conforming to the
|
||||
// "message.uncompressed_size" semantic conventions. It represents the
|
||||
// uncompressed size of the message in bytes.
|
||||
func MessageUncompressedSize(val int) attribute.KeyValue {
|
||||
return MessageUncompressedSizeKey.Int(val)
|
||||
}
|
||||
|
||||
// The attributes used to report a single exception associated with a span.
|
||||
const (
|
||||
// ExceptionEscapedKey is the attribute Key conforming to the
|
||||
// "exception.escaped" semantic conventions. It represents the sHOULD be
|
||||
// set to true if the exception event is recorded at a point where it is
|
||||
// known that the exception is escaping the scope of the span.
|
||||
//
|
||||
// Type: boolean
|
||||
// RequirementLevel: Optional
|
||||
// Stability: stable
|
||||
// Note: An exception is considered to have escaped (or left) the scope of
|
||||
// a span,
|
||||
// if that span is ended while the exception is still logically "in
|
||||
// flight".
|
||||
// This may be actually "in flight" in some languages (e.g. if the
|
||||
// exception
|
||||
// is passed to a Context manager's `__exit__` method in Python) but will
|
||||
// usually be caught at the point of recording the exception in most
|
||||
// languages.
|
||||
//
|
||||
// It is usually not possible to determine at the point where an exception
|
||||
// is thrown
|
||||
// whether it will escape the scope of a span.
|
||||
// However, it is trivial to know that an exception
|
||||
// will escape, if one checks for an active exception just before ending
|
||||
// the span,
|
||||
// as done in the [example above](#recording-an-exception).
|
||||
//
|
||||
// It follows that an exception may still escape the scope of the span
|
||||
// even if the `exception.escaped` attribute was not set or set to false,
|
||||
// since the event might have been recorded at a time where it was not
|
||||
// clear whether the exception will escape.
|
||||
ExceptionEscapedKey = attribute.Key("exception.escaped")
|
||||
)
|
||||
|
||||
// ExceptionEscaped returns an attribute KeyValue conforming to the
|
||||
// "exception.escaped" semantic conventions. It represents the sHOULD be set to
|
||||
// true if the exception event is recorded at a point where it is known that
|
||||
// the exception is escaping the scope of the span.
|
||||
func ExceptionEscaped(val bool) attribute.KeyValue {
|
||||
return ExceptionEscapedKey.Bool(val)
|
||||
}
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
|
||||
|
||||
const (
|
||||
// ExceptionEventName is the name of the Span event representing an exception.
|
||||
ExceptionEventName = "exception"
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,20 +0,0 @@
|
|||
// Copyright The OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
|
||||
|
||||
// SchemaURL is the schema URL that matches the version of the semantic conventions
|
||||
// that this package defines. Semconv packages starting from v1.4.0 must declare
|
||||
// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
|
||||
const SchemaURL = "https://opentelemetry.io/schemas/1.21.0"
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
|
|||
}
|
||||
|
||||
func (fr *Framer) maxHeaderStringLen() int {
|
||||
v := fr.maxHeaderListSize()
|
||||
if uint32(int(v)) == v {
|
||||
return int(v)
|
||||
v := int(fr.maxHeaderListSize())
|
||||
if v < 0 {
|
||||
// If maxHeaderListSize overflows an int, use no limit (0).
|
||||
return 0
|
||||
}
|
||||
// They had a crazy big number for MaxHeaderBytes anyway,
|
||||
// so give them unlimited header lengths:
|
||||
return 0
|
||||
return v
|
||||
}
|
||||
|
||||
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
|
||||
|
|
|
|||
|
|
@ -124,8 +124,9 @@ github.com/Microsoft/go-winio/internal/socket
|
|||
github.com/Microsoft/go-winio/internal/stringbuffer
|
||||
github.com/Microsoft/go-winio/pkg/bindfilter
|
||||
github.com/Microsoft/go-winio/pkg/guid
|
||||
github.com/Microsoft/go-winio/tools/mkwinsyscall
|
||||
github.com/Microsoft/go-winio/vhd
|
||||
# github.com/Microsoft/hcsshim v0.11.4
|
||||
# github.com/Microsoft/hcsshim v0.11.0
|
||||
## explicit; go 1.18
|
||||
github.com/Microsoft/hcsshim
|
||||
github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options
|
||||
|
|
@ -369,7 +370,7 @@ github.com/containerd/cgroups/v3/cgroup1
|
|||
github.com/containerd/cgroups/v3/cgroup1/stats
|
||||
github.com/containerd/cgroups/v3/cgroup2
|
||||
github.com/containerd/cgroups/v3/cgroup2/stats
|
||||
# github.com/containerd/containerd v1.7.12
|
||||
# github.com/containerd/containerd v1.7.6
|
||||
## explicit; go 1.19
|
||||
github.com/containerd/containerd
|
||||
github.com/containerd/containerd/api/runtime/sandbox/v1
|
||||
|
|
@ -417,7 +418,6 @@ github.com/containerd/containerd/namespaces
|
|||
github.com/containerd/containerd/oci
|
||||
github.com/containerd/containerd/pkg/cap
|
||||
github.com/containerd/containerd/pkg/cleanup
|
||||
github.com/containerd/containerd/pkg/deprecation
|
||||
github.com/containerd/containerd/pkg/dialer
|
||||
github.com/containerd/containerd/pkg/epoch
|
||||
github.com/containerd/containerd/pkg/kmutex
|
||||
|
|
@ -449,7 +449,6 @@ github.com/containerd/containerd/sandbox
|
|||
github.com/containerd/containerd/sandbox/proxy
|
||||
github.com/containerd/containerd/services
|
||||
github.com/containerd/containerd/services/introspection
|
||||
github.com/containerd/containerd/services/warning
|
||||
github.com/containerd/containerd/snapshots
|
||||
github.com/containerd/containerd/snapshots/proxy
|
||||
github.com/containerd/containerd/tracing
|
||||
|
|
@ -919,6 +918,9 @@ github.com/opencontainers/go-digest/digestset
|
|||
github.com/opencontainers/image-spec/identity
|
||||
github.com/opencontainers/image-spec/specs-go
|
||||
github.com/opencontainers/image-spec/specs-go/v1
|
||||
# github.com/opencontainers/runc v1.1.7
|
||||
## explicit; go 1.17
|
||||
github.com/opencontainers/runc/libcontainer/user
|
||||
# github.com/opencontainers/runtime-spec v1.1.0
|
||||
## explicit
|
||||
github.com/opencontainers/runtime-spec/specs-go
|
||||
|
|
@ -1065,9 +1067,10 @@ go.opentelemetry.io/otel/internal/attribute
|
|||
go.opentelemetry.io/otel/internal/baggage
|
||||
go.opentelemetry.io/otel/internal/global
|
||||
go.opentelemetry.io/otel/propagation
|
||||
go.opentelemetry.io/otel/semconv/internal/v2
|
||||
go.opentelemetry.io/otel/semconv/v1.17.0
|
||||
go.opentelemetry.io/otel/semconv/v1.17.0/httpconv
|
||||
go.opentelemetry.io/otel/semconv/v1.20.0
|
||||
go.opentelemetry.io/otel/semconv/v1.21.0
|
||||
# go.opentelemetry.io/otel/metric v1.22.0
|
||||
## explicit; go 1.20
|
||||
go.opentelemetry.io/otel/metric
|
||||
|
|
@ -1081,7 +1084,7 @@ go.opentelemetry.io/otel/trace/embedded
|
|||
# go.uber.org/multierr v1.11.0
|
||||
## explicit; go 1.19
|
||||
go.uber.org/multierr
|
||||
# golang.org/x/crypto v0.18.0
|
||||
# golang.org/x/crypto v0.19.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/crypto/argon2
|
||||
golang.org/x/crypto/blake2b
|
||||
|
|
@ -1113,7 +1116,7 @@ golang.org/x/exp/slog/internal/buffer
|
|||
# golang.org/x/mod v0.14.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.20.0
|
||||
# golang.org/x/net v0.21.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/http/httpguts
|
||||
|
|
|
|||
Loading…
Reference in New Issue