diff --git a/go.mod b/go.mod index be111aad0..35865acb1 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220228164355-396b2034c795 github.com/chrismellard/docker-credential-acr-env v0.0.0-20220119192733-fe33c00cee21 github.com/containerd/cgroups v1.0.3 // indirect - github.com/docker/docker v20.10.12+incompatible + github.com/docker/docker v20.10.13+incompatible github.com/genuinetools/bpfd v0.0.2-0.20190525234658-c12d8cd9aac8 github.com/go-git/go-billy/v5 v5.3.1 github.com/go-git/go-git/v5 v5.4.2 diff --git a/go.sum b/go.sum index 97dff9220..6fe69842d 100644 --- a/go.sum +++ b/go.sum @@ -580,8 +580,9 @@ github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v17.12.0-ce-rc1.0.20200730172259-9f28837c1d93+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.13+incompatible h1:5s7uxnKZG+b8hYWlPYUi6x1Sjpq2MSt96d15eLZeHyw= +github.com/docker/docker v20.10.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index bada4a8e3..b6bca4cef 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -5755,7 +5755,6 @@ paths: property1: "string" property2: "string" IpcMode: "" - LxcConf: [] Memory: 0 MemorySwap: 0 MemoryReservation: 0 @@ -8607,12 +8606,20 @@ paths: if `tty` was specified as part of creating and starting the exec instance. operationId: "ExecResize" responses: - 201: + 200: description: "No error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "No such exec instance" schema: $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" parameters: - name: "id" in: "path" diff --git a/vendor/github.com/docker/docker/daemon/logger/local/read.go b/vendor/github.com/docker/docker/daemon/logger/local/read.go index b28aa9065..d517995cf 100644 --- a/vendor/github.com/docker/docker/daemon/logger/local/read.go +++ b/vendor/github.com/docker/docker/daemon/logger/local/read.go @@ -1,12 +1,12 @@ package local import ( + "bytes" "context" "encoding/binary" + "fmt" "io" - "bytes" - "github.com/docker/docker/api/types/plugins/logdriver" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/logger/loggerutils" @@ -14,6 +14,10 @@ import ( "github.com/pkg/errors" ) +// maxMsgLen is the maximum size of the logger.Message after serialization. +// logger.defaultBufSize caps the size of Line field. +const maxMsgLen int = 1e6 // 1MB. + func (d *driver) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { logWatcher := logger.NewLogWatcher() @@ -99,7 +103,35 @@ func getTailReader(ctx context.Context, r loggerutils.SizeReaderAt, req int) (io type decoder struct { rdr io.Reader proto *logdriver.LogEntry - buf []byte + // buf keeps bytes from rdr. + buf []byte + // offset is the position in buf. + // If offset > 0, buf[offset:] has bytes which are read but haven't used. + offset int + // nextMsgLen is the length of the next log message. + // If nextMsgLen = 0, a new value must be read from rdr. + nextMsgLen int +} + +func (d *decoder) readRecord(size int) error { + var err error + for i := 0; i < maxDecodeRetry; i++ { + var n int + n, err = io.ReadFull(d.rdr, d.buf[d.offset:size]) + d.offset += n + if err != nil { + if err != io.ErrUnexpectedEOF { + return err + } + continue + } + break + } + if err != nil { + return err + } + d.offset = 0 + return nil } func (d *decoder) Decode() (*logger.Message, error) { @@ -111,44 +143,35 @@ func (d *decoder) Decode() (*logger.Message, error) { if d.buf == nil { d.buf = make([]byte, initialBufSize) } - var ( - read int - err error - ) - for i := 0; i < maxDecodeRetry; i++ { - var n int - n, err = io.ReadFull(d.rdr, d.buf[read:encodeBinaryLen]) + if d.nextMsgLen == 0 { + msgLen, err := d.decodeSizeHeader() if err != nil { - if err != io.ErrUnexpectedEOF { - return nil, errors.Wrap(err, "error reading log message length") - } - read += n - continue + return nil, err } - read += n - break - } - if err != nil { - return nil, errors.Wrapf(err, "could not read log message length: read: %d, expected: %d", read, encodeBinaryLen) - } - msgLen := int(binary.BigEndian.Uint32(d.buf[:read])) + if msgLen > maxMsgLen { + return nil, fmt.Errorf("log message is too large (%d > %d)", msgLen, maxMsgLen) + } - if len(d.buf) < msgLen+encodeBinaryLen { - d.buf = make([]byte, msgLen+encodeBinaryLen) - } else { - if msgLen <= initialBufSize { + if len(d.buf) < msgLen+encodeBinaryLen { + d.buf = make([]byte, msgLen+encodeBinaryLen) + } else if msgLen <= initialBufSize { d.buf = d.buf[:initialBufSize] } else { d.buf = d.buf[:msgLen+encodeBinaryLen] } - } - return decodeLogEntry(d.rdr, d.proto, d.buf, msgLen) + d.nextMsgLen = msgLen + } + return d.decodeLogEntry() } func (d *decoder) Reset(rdr io.Reader) { + if d.rdr == rdr { + return + } + d.rdr = rdr if d.proto != nil { resetProto(d.proto) @@ -156,6 +179,8 @@ func (d *decoder) Reset(rdr io.Reader) { if d.buf != nil { d.buf = d.buf[:initialBufSize] } + d.offset = 0 + d.nextMsgLen = 0 } func (d *decoder) Close() { @@ -171,34 +196,32 @@ func decodeFunc(rdr io.Reader) loggerutils.Decoder { return &decoder{rdr: rdr} } -func decodeLogEntry(rdr io.Reader, proto *logdriver.LogEntry, buf []byte, msgLen int) (*logger.Message, error) { - var ( - read int - err error - ) - for i := 0; i < maxDecodeRetry; i++ { - var n int - n, err = io.ReadFull(rdr, buf[read:msgLen+encodeBinaryLen]) - if err != nil { - if err != io.ErrUnexpectedEOF { - return nil, errors.Wrap(err, "could not decode log entry") - } - read += n - continue - } - break - } +func (d *decoder) decodeSizeHeader() (int, error) { + err := d.readRecord(encodeBinaryLen) if err != nil { - return nil, errors.Wrapf(err, "could not decode entry: read %d, expected: %d", read, msgLen) + return 0, errors.Wrap(err, "could not read a size header") } - if err := proto.Unmarshal(buf[:msgLen]); err != nil { - return nil, errors.Wrap(err, "error unmarshalling log entry") + msgLen := int(binary.BigEndian.Uint32(d.buf[:encodeBinaryLen])) + return msgLen, nil +} + +func (d *decoder) decodeLogEntry() (*logger.Message, error) { + msgLen := d.nextMsgLen + err := d.readRecord(msgLen + encodeBinaryLen) + if err != nil { + return nil, errors.Wrapf(err, "could not read a log entry (size=%d+%d)", msgLen, encodeBinaryLen) + } + d.nextMsgLen = 0 + + if err := d.proto.Unmarshal(d.buf[:msgLen]); err != nil { + return nil, errors.Wrapf(err, "error unmarshalling log entry (size=%d)", msgLen) } - msg := protoToMessage(proto) + msg := protoToMessage(d.proto) if msg.PLogMetaData == nil { msg.Line = append(msg.Line, '\n') } + return msg, nil } diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go new file mode 100644 index 000000000..755a483d7 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/follow.go @@ -0,0 +1,211 @@ +package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" + +import ( + "io" + "os" + "time" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/pkg/filenotify" + "github.com/fsnotify/fsnotify" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var errRetry = errors.New("retry") +var errDone = errors.New("done") + +type follow struct { + file *os.File + dec Decoder + fileWatcher filenotify.FileWatcher + logWatcher *logger.LogWatcher + notifyRotate, notifyEvict chan interface{} + oldSize int64 + retries int +} + +func (fl *follow) handleRotate() error { + name := fl.file.Name() + + fl.file.Close() + fl.fileWatcher.Remove(name) + + // retry when the file doesn't exist + var err error + for retries := 0; retries <= 5; retries++ { + f, err := open(name) + if err == nil || !os.IsNotExist(err) { + fl.file = f + break + } + } + if err != nil { + return err + } + if err := fl.fileWatcher.Add(name); err != nil { + return err + } + fl.dec.Reset(fl.file) + return nil +} + +func (fl *follow) handleMustClose(evictErr error) { + fl.file.Close() + fl.dec.Close() + fl.logWatcher.Err <- errors.Wrap(evictErr, "log reader evicted due to errors") + logrus.WithField("file", fl.file.Name()).Error("Log reader notified that it must re-open log file, some log data may not be streamed to the client.") +} + +func (fl *follow) waitRead() error { + select { + case e := <-fl.notifyEvict: + if e != nil { + err := e.(error) + fl.handleMustClose(err) + } + return errDone + case e := <-fl.fileWatcher.Events(): + switch e.Op { + case fsnotify.Write: + fl.dec.Reset(fl.file) + return nil + case fsnotify.Rename, fsnotify.Remove: + select { + case <-fl.notifyRotate: + case <-fl.logWatcher.WatchProducerGone(): + return errDone + case <-fl.logWatcher.WatchConsumerGone(): + return errDone + } + if err := fl.handleRotate(); err != nil { + return err + } + return nil + } + return errRetry + case err := <-fl.fileWatcher.Errors(): + logrus.Debugf("logger got error watching file: %v", err) + // Something happened, let's try and stay alive and create a new watcher + if fl.retries <= 5 { + fl.fileWatcher.Close() + fl.fileWatcher, err = watchFile(fl.file.Name()) + if err != nil { + return err + } + fl.retries++ + return errRetry + } + return err + case <-fl.logWatcher.WatchProducerGone(): + return errDone + case <-fl.logWatcher.WatchConsumerGone(): + return errDone + } +} + +func (fl *follow) handleDecodeErr(err error) error { + if !errors.Is(err, io.EOF) { + return err + } + + // Handle special case (#39235): max-file=1 and file was truncated + st, stErr := fl.file.Stat() + if stErr == nil { + size := st.Size() + defer func() { fl.oldSize = size }() + if size < fl.oldSize { // truncated + fl.file.Seek(0, 0) + fl.dec.Reset(fl.file) + return nil + } + } else { + logrus.WithError(stErr).Warn("logger: stat error") + } + + for { + err := fl.waitRead() + if err == nil { + break + } + if err == errRetry { + continue + } + return err + } + return nil +} + +func (fl *follow) mainLoop(since, until time.Time) { + for { + select { + case err := <-fl.notifyEvict: + if err != nil { + fl.handleMustClose(err.(error)) + } + return + default: + } + msg, err := fl.dec.Decode() + if err != nil { + if err := fl.handleDecodeErr(err); err != nil { + if err == errDone { + return + } + // we got an unrecoverable error, so return + fl.logWatcher.Err <- err + return + } + // ready to try again + continue + } + + fl.retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + if !until.IsZero() && msg.Timestamp.After(until) { + return + } + // send the message, unless the consumer is gone + select { + case e := <-fl.notifyEvict: + if e != nil { + err := e.(error) + logrus.WithError(err).Debug("Reader evicted while sending log message") + fl.logWatcher.Err <- err + } + return + case fl.logWatcher.Msg <- msg: + case <-fl.logWatcher.WatchConsumerGone(): + return + } + } +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate, notifyEvict chan interface{}, dec Decoder, since, until time.Time) { + dec.Reset(f) + + name := f.Name() + fileWatcher, err := watchFile(name) + if err != nil { + logWatcher.Err <- err + return + } + defer func() { + f.Close() + dec.Close() + fileWatcher.Close() + }() + + fl := &follow{ + file: f, + oldSize: -1, + logWatcher: logWatcher, + fileWatcher: fileWatcher, + notifyRotate: notifyRotate, + notifyEvict: notifyEvict, + dec: dec, + } + fl.mainLoop(since, until) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go index 6b42d9dd3..d4f3f5e14 100644 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go @@ -17,7 +17,6 @@ import ( "github.com/docker/docker/pkg/filenotify" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/pubsub" - "github.com/fsnotify/fsnotify" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -608,179 +607,6 @@ func tailFiles(files []SizeReaderAt, watcher *logger.LogWatcher, dec Decoder, ge } } -func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate, notifyEvict chan interface{}, dec Decoder, since, until time.Time) { - dec.Reset(f) - - name := f.Name() - fileWatcher, err := watchFile(name) - if err != nil { - logWatcher.Err <- err - return - } - defer func() { - f.Close() - dec.Close() - fileWatcher.Close() - }() - - var retries int - handleRotate := func() error { - f.Close() - fileWatcher.Remove(name) - - // retry when the file doesn't exist - for retries := 0; retries <= 5; retries++ { - f, err = open(name) - if err == nil || !os.IsNotExist(err) { - break - } - } - if err != nil { - return err - } - if err := fileWatcher.Add(name); err != nil { - return err - } - dec.Reset(f) - return nil - } - - errRetry := errors.New("retry") - errDone := errors.New("done") - - handleMustClose := func(evictErr error) { - f.Close() - dec.Close() - logWatcher.Err <- errors.Wrap(err, "log reader evicted due to errors") - logrus.WithField("file", f.Name()).Error("Log reader notified that it must re-open log file, some log data may not be streamed to the client.") - } - - waitRead := func() error { - select { - case e := <-notifyEvict: - if e != nil { - err := e.(error) - handleMustClose(err) - } - return errDone - case e := <-fileWatcher.Events(): - switch e.Op { - case fsnotify.Write: - dec.Reset(f) - return nil - case fsnotify.Rename, fsnotify.Remove: - select { - case <-notifyRotate: - case <-logWatcher.WatchProducerGone(): - return errDone - case <-logWatcher.WatchConsumerGone(): - return errDone - } - if err := handleRotate(); err != nil { - return err - } - return nil - } - return errRetry - case err := <-fileWatcher.Errors(): - logrus.Debugf("logger got error watching file: %v", err) - // Something happened, let's try and stay alive and create a new watcher - if retries <= 5 { - fileWatcher.Close() - fileWatcher, err = watchFile(name) - if err != nil { - return err - } - retries++ - return errRetry - } - return err - case <-logWatcher.WatchProducerGone(): - return errDone - case <-logWatcher.WatchConsumerGone(): - return errDone - } - } - - oldSize := int64(-1) - handleDecodeErr := func(err error) error { - if !errors.Is(err, io.EOF) { - return err - } - - // Handle special case (#39235): max-file=1 and file was truncated - st, stErr := f.Stat() - if stErr == nil { - size := st.Size() - defer func() { oldSize = size }() - if size < oldSize { // truncated - f.Seek(0, 0) - return nil - } - } else { - logrus.WithError(stErr).Warn("logger: stat error") - } - - for { - err := waitRead() - if err == nil { - break - } - if err == errRetry { - continue - } - return err - } - return nil - } - - // main loop - for { - select { - case err := <-notifyEvict: - if err != nil { - handleMustClose(err.(error)) - } - return - default: - } - msg, err := dec.Decode() - if err != nil { - if err := handleDecodeErr(err); err != nil { - if err == errDone { - return - } - // we got an unrecoverable error, so return - logWatcher.Err <- err - return - } - // ready to try again - continue - } - - retries = 0 // reset retries since we've succeeded - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - if !until.IsZero() && msg.Timestamp.After(until) { - return - } - // send the message, unless the consumer is gone - select { - case e := <-notifyEvict: - if e != nil { - err := e.(error) - logrus.WithError(err).Debug("Reader evicted while sending log message") - logWatcher.Err <- err - } - return - case logWatcher.Msg <- msg: - case <-logWatcher.WatchConsumerGone(): - return - } - } -} - func watchFile(name string) (filenotify.FileWatcher, error) { var fileWatcher filenotify.FileWatcher diff --git a/vendor/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go index e383aca84..a42eafcef 100644 --- a/vendor/github.com/docker/docker/dockerversion/version_lib.go +++ b/vendor/github.com/docker/docker/dockerversion/version_lib.go @@ -10,7 +10,6 @@ var ( Version = "library-import" BuildTime = "library-import" IAmStatic = "library-import" - InitCommitID = "library-import" PlatformName = "" ProductName = "" DefaultProductLicense = "" diff --git a/vendor/modules.txt b/vendor/modules.txt index dedf2bd15..de0a13302 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -326,7 +326,7 @@ github.com/docker/distribution/digestset github.com/docker/distribution/reference github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v20.10.12+incompatible +# github.com/docker/docker v20.10.13+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types