chore(deps): bump github.com/go-git/go-git/v5 from 5.10.0 to 5.10.1 (#2890)

Bumps [github.com/go-git/go-git/v5](https://github.com/go-git/go-git) from 5.10.0 to 5.10.1.
- [Release notes](https://github.com/go-git/go-git/releases)
- [Commits](https://github.com/go-git/go-git/compare/v5.10.0...v5.10.1)

---
updated-dependencies:
- dependency-name: github.com/go-git/go-git/v5
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
dependabot[bot] 2023-12-04 11:02:55 -08:00 committed by GitHub
parent 111205f010
commit 1903ab4df2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 697 additions and 549 deletions

5
go.mod
View File

@ -19,7 +19,7 @@ require (
github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect
github.com/docker/docker v23.0.5+incompatible github.com/docker/docker v23.0.5+incompatible
github.com/go-git/go-billy/v5 v5.5.0 github.com/go-git/go-billy/v5 v5.5.0
github.com/go-git/go-git/v5 v5.10.0 github.com/go-git/go-git/v5 v5.10.1
github.com/golang/mock v1.6.0 github.com/golang/mock v1.6.0
github.com/google/go-cmp v0.6.0 github.com/google/go-cmp v0.6.0
github.com/google/go-containerregistry v0.15.2 github.com/google/go-containerregistry v0.15.2
@ -64,7 +64,6 @@ require (
github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
github.com/acomagu/bufpipe v1.0.4 // indirect
github.com/agext/levenshtein v1.2.3 // indirect github.com/agext/levenshtein v1.2.3 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.16.8 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.16.8 // indirect
@ -176,7 +175,7 @@ require (
github.com/moby/swarmkit/v2 v2.0.0-20230315203717-e28e8ba9bc83 // indirect github.com/moby/swarmkit/v2 v2.0.0-20230315203717-e28e8ba9bc83 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/skeema/knownhosts v1.2.0 // indirect github.com/skeema/knownhosts v1.2.1 // indirect
github.com/spf13/cast v1.3.1 // indirect github.com/spf13/cast v1.3.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/viper v1.8.1 // indirect github.com/spf13/viper v1.8.1 // indirect

12
go.sum
View File

@ -91,8 +91,6 @@ github.com/Microsoft/hcsshim v0.11.4 h1:68vKo2VN8DE9AdN4tnkWnmdhqdbpUFM8OF3Airm7
github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w=
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -305,8 +303,8 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmS
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git/v5 v5.10.0 h1:F0x3xXrAWmhwtzoCokU4IMPcBdncG+HAAqi9FcOOjbQ= github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk=
github.com/go-git/go-git/v5 v5.10.0/go.mod h1:1FOZ/pQnqw24ghP2n7cunVl0ON55BsjPYvhWHvZGhoo= github.com/go-git/go-git/v5 v5.10.1/go.mod h1:uEuHjxkHap8kAl//V5F/nNWwqIYtP/402ddd05mp0wg=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@ -497,8 +495,6 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
github.com/mattn/go-ieproxy v0.0.2 h1:5wxtrdEeCqukidp2kWqwOb9tf4aqqEdIJCBsZ0Jc4/c= github.com/mattn/go-ieproxy v0.0.2 h1:5wxtrdEeCqukidp2kWqwOb9tf4aqqEdIJCBsZ0Jc4/c=
@ -632,8 +628,8 @@ github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=

View File

@ -1,60 +0,0 @@
Go (the standard library)
https://golang.org/
----------------------------------------------------------------
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================
github.com/matryer/is
https://github.com/matryer/is
----------------------------------------------------------------
MIT License
Copyright (c) 2017-2018 Mat Ryer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================================

View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2019 acomagu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,52 +0,0 @@
# bufpipe: Buffered Pipe
[![CircleCI](https://img.shields.io/circleci/build/github/acomagu/bufpipe.svg?style=flat-square)](https://circleci.com/gh/acomagu/bufpipe) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/acomagu/bufpipe)
The buffered version of io.Pipe. It's safe for concurrent use.
## How does it differ from io.Pipe?
Writes never block because the pipe has variable-sized buffer.
```Go
r, w := bufpipe.New(nil)
io.WriteString(w, "abc") // No blocking.
io.WriteString(w, "def") // No blocking, too.
w.Close()
io.Copy(os.Stdout, r)
// Output: abcdef
```
[Playground](https://play.golang.org/p/PdyBAS3pVob)
## How does it differ from bytes.Buffer?
Reads block if the internal buffer is empty until the writer is closed.
```Go
r, w := bufpipe.New(nil)
done := make(chan struct{})
go func() {
io.Copy(os.Stdout, r) // The reads block until the writer is closed.
done <- struct{}{}
}()
io.WriteString(w, "abc")
io.WriteString(w, "def")
w.Close()
<-done
// Output: abcdef
```
[Playground](https://play.golang.org/p/UppmyLeRgX6)
## Contribution
### Generate CREDITS
The [CREDITS](./CREDITS) file are generated by [gocredits](https://github.com/Songmu/gocredits). Update it when the dependencies are changed.
```
$ gocredits > CREDITS
```

View File

@ -1,129 +0,0 @@
package bufpipe
import (
"bytes"
"errors"
"io"
"sync"
)
// ErrClosedPipe is the error used for read or write operations on a closed pipe.
var ErrClosedPipe = errors.New("bufpipe: read/write on closed pipe")
type pipe struct {
cond *sync.Cond
buf *bytes.Buffer
rerr, werr error
}
// A PipeReader is the read half of a pipe.
type PipeReader struct {
*pipe
}
// A PipeWriter is the write half of a pipe.
type PipeWriter struct {
*pipe
}
// New creates a synchronous pipe using buf as its initial contents. It can be
// used to connect code expecting an io.Reader with code expecting an io.Writer.
//
// Unlike io.Pipe, writes never block because the internal buffer has variable
// size. Reads block only when the buffer is empty.
//
// It is safe to call Read and Write in parallel with each other or with Close.
// Parallel calls to Read and parallel calls to Write are also safe: the
// individual calls will be gated sequentially.
//
// The new pipe takes ownership of buf, and the caller should not use buf after
// this call. New is intended to prepare a PipeReader to read existing data. It
// can also be used to set the initial size of the internal buffer for writing.
// To do that, buf should have the desired capacity but a length of zero.
func New(buf []byte) (*PipeReader, *PipeWriter) {
p := &pipe{
buf: bytes.NewBuffer(buf),
cond: sync.NewCond(new(sync.Mutex)),
}
return &PipeReader{
pipe: p,
}, &PipeWriter{
pipe: p,
}
}
// Read implements the standard Read interface: it reads data from the pipe,
// reading from the internal buffer, otherwise blocking until a writer arrives
// or the write end is closed. If the write end is closed with an error, that
// error is returned as err; otherwise err is io.EOF.
func (r *PipeReader) Read(data []byte) (int, error) {
r.cond.L.Lock()
defer r.cond.L.Unlock()
RETRY:
n, err := r.buf.Read(data)
// If not closed and no read, wait for writing.
if err == io.EOF && r.rerr == nil && n == 0 {
r.cond.Wait()
goto RETRY
}
if err == io.EOF {
return n, r.rerr
}
return n, err
}
// Close closes the reader; subsequent writes from the write half of the pipe
// will return error ErrClosedPipe.
func (r *PipeReader) Close() error {
return r.CloseWithError(nil)
}
// CloseWithError closes the reader; subsequent writes to the write half of the
// pipe will return the error err.
func (r *PipeReader) CloseWithError(err error) error {
r.cond.L.Lock()
defer r.cond.L.Unlock()
if err == nil {
err = ErrClosedPipe
}
r.werr = err
return nil
}
// Write implements the standard Write interface: it writes data to the internal
// buffer. If the read end is closed with an error, that err is returned as err;
// otherwise err is ErrClosedPipe.
func (w *PipeWriter) Write(data []byte) (int, error) {
w.cond.L.Lock()
defer w.cond.L.Unlock()
if w.werr != nil {
return 0, w.werr
}
n, err := w.buf.Write(data)
w.cond.Signal()
return n, err
}
// Close closes the writer; subsequent reads from the read half of the pipe will
// return io.EOF once the internal buffer get empty.
func (w *PipeWriter) Close() error {
return w.CloseWithError(nil)
}
// Close closes the writer; subsequent reads from the read half of the pipe will
// return err once the internal buffer get empty.
func (w *PipeWriter) CloseWithError(err error) error {
w.cond.L.Lock()
defer w.cond.L.Unlock()
if err == nil {
err = io.EOF
}
w.rerr = err
w.cond.Broadcast()
return nil
}

View File

@ -1,2 +0,0 @@
// Package bufpipe provides a IO pipe, has variable-sized buffer.
package bufpipe

View File

@ -109,10 +109,10 @@ compatibility status with go-git.
## Server admin ## Server admin
| Feature | Sub-feature | Status | Notes | Examples | | Feature | Sub-feature | Status | Notes | Examples |
| -------------------- | ----------- | ------ | ----- | -------- | | -------------------- | ----------- | ------ | ----- | ----------------------------------------- |
| `daemon` | | ❌ | | | | `daemon` | | ❌ | | |
| `update-server-info` | | ❌ | | | | `update-server-info` | | ✅ | | [cli](./cli/go-git/update_server_info.go) |
## Advanced ## Advanced

View File

@ -17,8 +17,11 @@ const (
s = 16 s = 16
// https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428 // https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428
// Max size of a copy operation (64KB) // Max size of a copy operation (64KB).
maxCopySize = 64 * 1024 maxCopySize = 64 * 1024
// Min size of a copy operation.
minCopySize = 4
) )
// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object, // GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object,

View File

@ -3,6 +3,7 @@ package packfile
import ( import (
"bytes" "bytes"
"errors" "errors"
"fmt"
"io" "io"
"github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing"
@ -174,13 +175,25 @@ func (p *Parser) init() error {
return nil return nil
} }
type objectHeaderWriter func(typ plumbing.ObjectType, sz int64) error
type lazyObjectWriter interface {
// LazyWriter enables an object to be lazily written.
// It returns:
// - w: a writer to receive the object's content.
// - lwh: a func to write the object header.
// - err: any error from the initial writer creation process.
//
// Note that if the object header is not written BEFORE the writer
// is used, this will result in an invalid object.
LazyWriter() (w io.WriteCloser, lwh objectHeaderWriter, err error)
}
func (p *Parser) indexObjects() error { func (p *Parser) indexObjects() error {
buf := sync.GetBytesBuffer() buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf) defer sync.PutBytesBuffer(buf)
for i := uint32(0); i < p.count; i++ { for i := uint32(0); i < p.count; i++ {
buf.Reset()
oh, err := p.scanner.NextObjectHeader() oh, err := p.scanner.NextObjectHeader()
if err != nil { if err != nil {
return err return err
@ -220,21 +233,60 @@ func (p *Parser) indexObjects() error {
ota = newBaseObject(oh.Offset, oh.Length, t) ota = newBaseObject(oh.Offset, oh.Length, t)
} }
buf.Grow(int(oh.Length)) hasher := plumbing.NewHasher(oh.Type, oh.Length)
_, crc, err := p.scanner.NextObject(buf) writers := []io.Writer{hasher}
var obj *plumbing.MemoryObject
// Lazy writing is only available for non-delta objects.
if p.storage != nil && !delta {
// When a storage is set and supports lazy writing,
// use that instead of creating a memory object.
if low, ok := p.storage.(lazyObjectWriter); ok {
ow, lwh, err := low.LazyWriter()
if err != nil {
return err
}
if err = lwh(oh.Type, oh.Length); err != nil {
return err
}
defer ow.Close()
writers = append(writers, ow)
} else {
obj = new(plumbing.MemoryObject)
obj.SetSize(oh.Length)
obj.SetType(oh.Type)
writers = append(writers, obj)
}
}
if delta && !p.scanner.IsSeekable {
buf.Reset()
buf.Grow(int(oh.Length))
writers = append(writers, buf)
}
mw := io.MultiWriter(writers...)
_, crc, err := p.scanner.NextObject(mw)
if err != nil { if err != nil {
return err return err
} }
// Non delta objects needs to be added into the storage. This
// is only required when lazy writing is not supported.
if obj != nil {
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
ota.Crc32 = crc ota.Crc32 = crc
ota.Length = oh.Length ota.Length = oh.Length
data := buf.Bytes()
if !delta { if !delta {
sha1, err := getSHA1(ota.Type, data) sha1 := hasher.Sum()
if err != nil {
return err
}
// Move children of placeholder parent into actual parent, in case this // Move children of placeholder parent into actual parent, in case this
// was a non-external delta reference. // was a non-external delta reference.
@ -249,20 +301,8 @@ func (p *Parser) indexObjects() error {
p.oiByHash[ota.SHA1] = ota p.oiByHash[ota.SHA1] = ota
} }
if p.storage != nil && !delta {
obj := new(plumbing.MemoryObject)
obj.SetSize(oh.Length)
obj.SetType(oh.Type)
if _, err := obj.Write(data); err != nil {
return err
}
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
if delta && !p.scanner.IsSeekable { if delta && !p.scanner.IsSeekable {
data := buf.Bytes()
p.deltas[oh.Offset] = make([]byte, len(data)) p.deltas[oh.Offset] = make([]byte, len(data))
copy(p.deltas[oh.Offset], data) copy(p.deltas[oh.Offset], data)
} }
@ -280,23 +320,29 @@ func (p *Parser) resolveDeltas() error {
for _, obj := range p.oi { for _, obj := range p.oi {
buf.Reset() buf.Reset()
buf.Grow(int(obj.Length))
err := p.get(obj, buf) err := p.get(obj, buf)
if err != nil { if err != nil {
return err return err
} }
content := buf.Bytes()
if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil { if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
return err return err
} }
if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil { if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, nil); err != nil {
return err return err
} }
if !obj.IsDelta() && len(obj.Children) > 0 { if !obj.IsDelta() && len(obj.Children) > 0 {
// Dealing with an io.ReaderAt object, means we can
// create it once and reuse across all children.
r := bytes.NewReader(buf.Bytes())
for _, child := range obj.Children { for _, child := range obj.Children {
if err := p.resolveObject(io.Discard, child, content); err != nil { // Even though we are discarding the output, we still need to read it to
// so that the scanner can advance to the next object, and the SHA1 can be
// calculated.
if err := p.resolveObject(io.Discard, child, r); err != nil {
return err return err
} }
p.resolveExternalRef(child) p.resolveExternalRef(child)
@ -361,13 +407,13 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
if o.DiskType.IsDelta() { if o.DiskType.IsDelta() {
b := sync.GetBytesBuffer() b := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(b) defer sync.PutBytesBuffer(b)
buf.Grow(int(o.Length))
err := p.get(o.Parent, b) err := p.get(o.Parent, b)
if err != nil { if err != nil {
return err return err
} }
base := b.Bytes()
err = p.resolveObject(buf, o, base) err = p.resolveObject(buf, o, bytes.NewReader(b.Bytes()))
if err != nil { if err != nil {
return err return err
} }
@ -378,6 +424,13 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
} }
} }
// If the scanner is seekable, caching this data into
// memory by offset seems wasteful.
// There is a trade-off to be considered here in terms
// of execution time vs memory consumption.
//
// TODO: improve seekable execution time, so that we can
// skip this cache.
if len(o.Children) > 0 { if len(o.Children) > 0 {
data := make([]byte, buf.Len()) data := make([]byte, buf.Len())
copy(data, buf.Bytes()) copy(data, buf.Bytes())
@ -386,10 +439,25 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
return nil return nil
} }
// resolveObject resolves an object from base, using information
// provided by o.
//
// This call has the side-effect of changing field values
// from the object info o:
// - Type: OFSDeltaObject may become the target type (e.g. Blob).
// - Size: The size may be update with the target size.
// - Hash: Zero hashes will be calculated as part of the object
// resolution. Hence why this process can't be avoided even when w
// is an io.Discard.
//
// base must be an io.ReaderAt, which is a requirement from
// patchDeltaStream. The main reason being that reversing an
// delta object may lead to going backs and forths within base,
// which is not supported by io.Reader.
func (p *Parser) resolveObject( func (p *Parser) resolveObject(
w io.Writer, w io.Writer,
o *objectInfo, o *objectInfo,
base []byte, base io.ReaderAt,
) error { ) error {
if !o.DiskType.IsDelta() { if !o.DiskType.IsDelta() {
return nil return nil
@ -400,26 +468,46 @@ func (p *Parser) resolveObject(
if err != nil { if err != nil {
return err return err
} }
data := buf.Bytes()
data, err = applyPatchBase(o, data, base) writers := []io.Writer{w}
var obj *plumbing.MemoryObject
var lwh objectHeaderWriter
if p.storage != nil {
if low, ok := p.storage.(lazyObjectWriter); ok {
ow, wh, err := low.LazyWriter()
if err != nil {
return err
}
lwh = wh
defer ow.Close()
writers = append(writers, ow)
} else {
obj = new(plumbing.MemoryObject)
ow, err := obj.Writer()
if err != nil {
return err
}
writers = append(writers, ow)
}
}
mw := io.MultiWriter(writers...)
err = applyPatchBase(o, base, buf, mw, lwh)
if err != nil { if err != nil {
return err return err
} }
if p.storage != nil { if obj != nil {
obj := new(plumbing.MemoryObject)
obj.SetSize(o.Size())
obj.SetType(o.Type) obj.SetType(o.Type)
if _, err := obj.Write(data); err != nil { obj.SetSize(o.Size()) // Size here is correct as it was populated by applyPatchBase.
return err
}
if _, err := p.storage.SetEncodedObject(obj); err != nil { if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err return err
} }
} }
_, err = w.Write(data)
return err return err
} }
@ -443,24 +531,31 @@ func (p *Parser) readData(w io.Writer, o *objectInfo) error {
return nil return nil
} }
func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) { // applyPatchBase applies the patch to target.
patched, err := PatchDelta(base, data) //
// Note that ota will be updated based on the description in resolveObject.
func applyPatchBase(ota *objectInfo, base io.ReaderAt, delta io.Reader, target io.Writer, wh objectHeaderWriter) error {
if target == nil {
return fmt.Errorf("cannot apply patch against nil target")
}
typ := ota.Type
if ota.SHA1 == plumbing.ZeroHash {
typ = ota.Parent.Type
}
sz, h, err := patchDeltaWriter(target, base, delta, typ, wh)
if err != nil { if err != nil {
return nil, err return err
} }
if ota.SHA1 == plumbing.ZeroHash { if ota.SHA1 == plumbing.ZeroHash {
ota.Type = ota.Parent.Type ota.Type = typ
sha1, err := getSHA1(ota.Type, patched) ota.Length = int64(sz)
if err != nil { ota.SHA1 = h
return nil, err
}
ota.SHA1 = sha1
ota.Length = int64(len(patched))
} }
return patched, nil return nil
} }
func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) { func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) {

View File

@ -4,6 +4,7 @@ import (
"bufio" "bufio"
"bytes" "bytes"
"errors" "errors"
"fmt"
"io" "io"
"math" "math"
@ -17,7 +18,33 @@ import (
// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js // and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
// for details about the delta format. // for details about the delta format.
const deltaSizeMin = 4 var (
ErrInvalidDelta = errors.New("invalid delta")
ErrDeltaCmd = errors.New("wrong delta command")
)
const (
payload = 0x7f // 0111 1111
continuation = 0x80 // 1000 0000
)
type offset struct {
mask byte
shift uint
}
var offsets = []offset{
{mask: 0x01, shift: 0},
{mask: 0x02, shift: 8},
{mask: 0x04, shift: 16},
{mask: 0x08, shift: 24},
}
var sizes = []offset{
{mask: 0x10, shift: 0},
{mask: 0x20, shift: 8},
{mask: 0x40, shift: 16},
}
// ApplyDelta writes to target the result of applying the modification deltas in delta to base. // ApplyDelta writes to target the result of applying the modification deltas in delta to base.
func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) { func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
@ -58,11 +85,6 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
return err return err
} }
var (
ErrInvalidDelta = errors.New("invalid delta")
ErrDeltaCmd = errors.New("wrong delta command")
)
// PatchDelta returns the result of applying the modification deltas in delta to src. // PatchDelta returns the result of applying the modification deltas in delta to src.
// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command // An error will be returned if delta is corrupted (ErrDeltaLen) or an action command
// is not copy from source or copy from delta (ErrDeltaCmd). // is not copy from source or copy from delta (ErrDeltaCmd).
@ -120,7 +142,8 @@ func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadClo
return return
} }
if isCopyFromSrc(cmd) { switch {
case isCopyFromSrc(cmd):
offset, err := decodeOffsetByteReader(cmd, deltaBuf) offset, err := decodeOffsetByteReader(cmd, deltaBuf)
if err != nil { if err != nil {
_ = dstWr.CloseWithError(err) _ = dstWr.CloseWithError(err)
@ -173,7 +196,8 @@ func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadClo
} }
remainingTargetSz -= sz remainingTargetSz -= sz
basePos += sz basePos += sz
} else if isCopyFromDelta(cmd) {
case isCopyFromDelta(cmd):
sz := uint(cmd) // cmd is the size itself sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) { if invalidSize(sz, targetSz) {
_ = dstWr.CloseWithError(ErrInvalidDelta) _ = dstWr.CloseWithError(ErrInvalidDelta)
@ -185,10 +209,12 @@ func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadClo
} }
remainingTargetSz -= sz remainingTargetSz -= sz
} else {
default:
_ = dstWr.CloseWithError(ErrDeltaCmd) _ = dstWr.CloseWithError(ErrDeltaCmd)
return return
} }
if remainingTargetSz <= 0 { if remainingTargetSz <= 0 {
_ = dstWr.Close() _ = dstWr.Close()
return return
@ -200,7 +226,7 @@ func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadClo
} }
func patchDelta(dst *bytes.Buffer, src, delta []byte) error { func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
if len(delta) < deltaSizeMin { if len(delta) < minCopySize {
return ErrInvalidDelta return ErrInvalidDelta
} }
@ -221,7 +247,9 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
cmd = delta[0] cmd = delta[0]
delta = delta[1:] delta = delta[1:]
if isCopyFromSrc(cmd) {
switch {
case isCopyFromSrc(cmd):
var offset, sz uint var offset, sz uint
var err error var err error
offset, delta, err = decodeOffset(cmd, delta) offset, delta, err = decodeOffset(cmd, delta)
@ -240,7 +268,8 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
} }
dst.Write(src[offset : offset+sz]) dst.Write(src[offset : offset+sz])
remainingTargetSz -= sz remainingTargetSz -= sz
} else if isCopyFromDelta(cmd) {
case isCopyFromDelta(cmd):
sz := uint(cmd) // cmd is the size itself sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) { if invalidSize(sz, targetSz) {
return ErrInvalidDelta return ErrInvalidDelta
@ -253,7 +282,8 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
dst.Write(delta[0:sz]) dst.Write(delta[0:sz])
remainingTargetSz -= sz remainingTargetSz -= sz
delta = delta[sz:] delta = delta[sz:]
} else {
default:
return ErrDeltaCmd return ErrDeltaCmd
} }
@ -265,6 +295,107 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
return nil return nil
} }
func patchDeltaWriter(dst io.Writer, base io.ReaderAt, delta io.Reader,
typ plumbing.ObjectType, writeHeader objectHeaderWriter) (uint, plumbing.Hash, error) {
deltaBuf := bufio.NewReaderSize(delta, 1024)
srcSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
return 0, plumbing.ZeroHash, err
}
if r, ok := base.(*bytes.Reader); ok && srcSz != uint(r.Size()) {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
targetSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
return 0, plumbing.ZeroHash, err
}
// If header still needs to be written, caller will provide
// a LazyObjectWriterHeader. This seems to be the case when
// dealing with thin-packs.
if writeHeader != nil {
err = writeHeader(typ, int64(targetSz))
if err != nil {
return 0, plumbing.ZeroHash, fmt.Errorf("could not lazy write header: %w", err)
}
}
remainingTargetSz := targetSz
hasher := plumbing.NewHasher(typ, int64(targetSz))
mw := io.MultiWriter(dst, hasher)
bufp := sync.GetByteSlice()
defer sync.PutByteSlice(bufp)
sr := io.NewSectionReader(base, int64(0), int64(srcSz))
// Keep both the io.LimitedReader types, so we can reset N.
baselr := io.LimitReader(sr, 0).(*io.LimitedReader)
deltalr := io.LimitReader(deltaBuf, 0).(*io.LimitedReader)
for {
buf := *bufp
cmd, err := deltaBuf.ReadByte()
if err == io.EOF {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
if err != nil {
return 0, plumbing.ZeroHash, err
}
if isCopyFromSrc(cmd) {
offset, err := decodeOffsetByteReader(cmd, deltaBuf)
if err != nil {
return 0, plumbing.ZeroHash, err
}
sz, err := decodeSizeByteReader(cmd, deltaBuf)
if err != nil {
return 0, plumbing.ZeroHash, err
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
return 0, plumbing.ZeroHash, err
}
if _, err := sr.Seek(int64(offset), io.SeekStart); err != nil {
return 0, plumbing.ZeroHash, err
}
baselr.N = int64(sz)
if _, err := io.CopyBuffer(mw, baselr, buf); err != nil {
return 0, plumbing.ZeroHash, err
}
remainingTargetSz -= sz
} else if isCopyFromDelta(cmd) {
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
return 0, plumbing.ZeroHash, ErrInvalidDelta
}
deltalr.N = int64(sz)
if _, err := io.CopyBuffer(mw, deltalr, buf); err != nil {
return 0, plumbing.ZeroHash, err
}
remainingTargetSz -= sz
} else {
return 0, plumbing.ZeroHash, err
}
if remainingTargetSz <= 0 {
break
}
}
return targetSz, hasher.Sum(), nil
}
// Decodes a number encoded as an unsigned LEB128 at the start of some // Decodes a number encoded as an unsigned LEB128 at the start of some
// binary data and returns the decoded number and the rest of the // binary data and returns the decoded number and the rest of the
// stream. // stream.
@ -306,48 +437,24 @@ func decodeLEB128ByteReader(input io.ByteReader) (uint, error) {
return num, nil return num, nil
} }
const (
payload = 0x7f // 0111 1111
continuation = 0x80 // 1000 0000
)
func isCopyFromSrc(cmd byte) bool { func isCopyFromSrc(cmd byte) bool {
return (cmd & 0x80) != 0 return (cmd & continuation) != 0
} }
func isCopyFromDelta(cmd byte) bool { func isCopyFromDelta(cmd byte) bool {
return (cmd&0x80) == 0 && cmd != 0 return (cmd&continuation) == 0 && cmd != 0
} }
func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) { func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var offset uint var offset uint
if (cmd & 0x01) != 0 { for _, o := range offsets {
next, err := delta.ReadByte() if (cmd & o.mask) != 0 {
if err != nil { next, err := delta.ReadByte()
return 0, err if err != nil {
return 0, err
}
offset |= uint(next) << o.shift
} }
offset = uint(next)
}
if (cmd & 0x02) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << 8
}
if (cmd & 0x04) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << 16
}
if (cmd & 0x08) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << 24
} }
return offset, nil return offset, nil
@ -355,33 +462,14 @@ func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) {
func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) { func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
var offset uint var offset uint
if (cmd & 0x01) != 0 { for _, o := range offsets {
if len(delta) == 0 { if (cmd & o.mask) != 0 {
return 0, nil, ErrInvalidDelta if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << o.shift
delta = delta[1:]
} }
offset = uint(delta[0])
delta = delta[1:]
}
if (cmd & 0x02) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 8
delta = delta[1:]
}
if (cmd & 0x04) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 16
delta = delta[1:]
}
if (cmd & 0x08) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 24
delta = delta[1:]
} }
return offset, delta, nil return offset, delta, nil
@ -389,29 +477,18 @@ func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) { func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var sz uint var sz uint
if (cmd & 0x10) != 0 { for _, s := range sizes {
next, err := delta.ReadByte() if (cmd & s.mask) != 0 {
if err != nil { next, err := delta.ReadByte()
return 0, err if err != nil {
return 0, err
}
sz |= uint(next) << s.shift
} }
sz = uint(next)
}
if (cmd & 0x20) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz |= uint(next) << 8
}
if (cmd & 0x40) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz |= uint(next) << 16
} }
if sz == 0 { if sz == 0 {
sz = 0x10000 sz = maxCopySize
} }
return sz, nil return sz, nil
@ -419,29 +496,17 @@ func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) {
func decodeSize(cmd byte, delta []byte) (uint, []byte, error) { func decodeSize(cmd byte, delta []byte) (uint, []byte, error) {
var sz uint var sz uint
if (cmd & 0x10) != 0 { for _, s := range sizes {
if len(delta) == 0 { if (cmd & s.mask) != 0 {
return 0, nil, ErrInvalidDelta if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << s.shift
delta = delta[1:]
} }
sz = uint(delta[0])
delta = delta[1:]
}
if (cmd & 0x20) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << 8
delta = delta[1:]
}
if (cmd & 0x40) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << 16
delta = delta[1:]
} }
if sz == 0 { if sz == 0 {
sz = 0x10000 sz = maxCopySize
} }
return sz, delta, nil return sz, delta, nil

View File

@ -7,6 +7,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"github.com/go-git/go-git/v5/utils/trace"
) )
// An Encoder writes pkt-lines to an output stream. // An Encoder writes pkt-lines to an output stream.
@ -43,6 +45,7 @@ func NewEncoder(w io.Writer) *Encoder {
// Flush encodes a flush-pkt to the output stream. // Flush encodes a flush-pkt to the output stream.
func (e *Encoder) Flush() error { func (e *Encoder) Flush() error {
defer trace.Packet.Print("packet: > 0000")
_, err := e.w.Write(FlushPkt) _, err := e.w.Write(FlushPkt)
return err return err
} }
@ -70,6 +73,7 @@ func (e *Encoder) encodeLine(p []byte) error {
} }
n := len(p) + 4 n := len(p) + 4
defer trace.Packet.Printf("packet: > %04x %s", n, p)
if _, err := e.w.Write(asciiHex16(n)); err != nil { if _, err := e.w.Write(asciiHex16(n)); err != nil {
return err return err
} }

View File

@ -0,0 +1,51 @@
package pktline
import (
"bytes"
"errors"
"io"
"strings"
)
var (
// ErrInvalidErrorLine is returned by Decode when the packet line is not an
// error line.
ErrInvalidErrorLine = errors.New("expected an error-line")
errPrefix = []byte("ERR ")
)
// ErrorLine is a packet line that contains an error message.
// Once this packet is sent by client or server, the data transfer process is
// terminated.
// See https://git-scm.com/docs/pack-protocol#_pkt_line_format
type ErrorLine struct {
Text string
}
// Error implements the error interface.
func (e *ErrorLine) Error() string {
return e.Text
}
// Encode encodes the ErrorLine into a packet line.
func (e *ErrorLine) Encode(w io.Writer) error {
p := NewEncoder(w)
return p.Encodef("%s%s\n", string(errPrefix), e.Text)
}
// Decode decodes a packet line into an ErrorLine.
func (e *ErrorLine) Decode(r io.Reader) error {
s := NewScanner(r)
if !s.Scan() {
return s.Err()
}
line := s.Bytes()
if !bytes.HasPrefix(line, errPrefix) {
return ErrInvalidErrorLine
}
e.Text = strings.TrimSpace(string(line[4:]))
return nil
}

View File

@ -1,8 +1,12 @@
package pktline package pktline
import ( import (
"bytes"
"errors" "errors"
"io" "io"
"strings"
"github.com/go-git/go-git/v5/utils/trace"
) )
const ( const (
@ -65,6 +69,14 @@ func (s *Scanner) Scan() bool {
return false return false
} }
s.payload = s.payload[:l] s.payload = s.payload[:l]
trace.Packet.Printf("packet: < %04x %s", l, s.payload)
if bytes.HasPrefix(s.payload, errPrefix) {
s.err = &ErrorLine{
Text: strings.TrimSpace(string(s.payload[4:])),
}
return false
}
return true return true
} }

View File

@ -48,6 +48,11 @@ func isFlush(payload []byte) bool {
return len(payload) == 0 return len(payload) == 0
} }
var (
// ErrNilWriter is returned when a nil writer is passed to the encoder.
ErrNilWriter = fmt.Errorf("nil writer")
)
// ErrUnexpectedData represents an unexpected data decoding a message // ErrUnexpectedData represents an unexpected data decoding a message
type ErrUnexpectedData struct { type ErrUnexpectedData struct {
Msg string Msg string

View File

@ -0,0 +1,120 @@
package packp
import (
"fmt"
"io"
"strings"
"github.com/go-git/go-git/v5/plumbing/format/pktline"
)
var (
// ErrInvalidGitProtoRequest is returned by Decode if the input is not a
// valid git protocol request.
ErrInvalidGitProtoRequest = fmt.Errorf("invalid git protocol request")
)
// GitProtoRequest is a command request for the git protocol.
// It is used to send the command, endpoint, and extra parameters to the
// remote.
// See https://git-scm.com/docs/pack-protocol#_git_transport
type GitProtoRequest struct {
RequestCommand string
Pathname string
// Optional
Host string
// Optional
ExtraParams []string
}
// validate validates the request.
func (g *GitProtoRequest) validate() error {
if g.RequestCommand == "" {
return fmt.Errorf("%w: empty request command", ErrInvalidGitProtoRequest)
}
if g.Pathname == "" {
return fmt.Errorf("%w: empty pathname", ErrInvalidGitProtoRequest)
}
return nil
}
// Encode encodes the request into the writer.
func (g *GitProtoRequest) Encode(w io.Writer) error {
if w == nil {
return ErrNilWriter
}
if err := g.validate(); err != nil {
return err
}
p := pktline.NewEncoder(w)
req := fmt.Sprintf("%s %s\x00", g.RequestCommand, g.Pathname)
if host := g.Host; host != "" {
req += fmt.Sprintf("host=%s\x00", host)
}
if len(g.ExtraParams) > 0 {
req += "\x00"
for _, param := range g.ExtraParams {
req += param + "\x00"
}
}
if err := p.Encode([]byte(req)); err != nil {
return err
}
return nil
}
// Decode decodes the request from the reader.
func (g *GitProtoRequest) Decode(r io.Reader) error {
s := pktline.NewScanner(r)
if !s.Scan() {
err := s.Err()
if err == nil {
return ErrInvalidGitProtoRequest
}
return err
}
line := string(s.Bytes())
if len(line) == 0 {
return io.EOF
}
if line[len(line)-1] != 0 {
return fmt.Errorf("%w: missing null terminator", ErrInvalidGitProtoRequest)
}
parts := strings.SplitN(line, " ", 2)
if len(parts) != 2 {
return fmt.Errorf("%w: short request", ErrInvalidGitProtoRequest)
}
g.RequestCommand = parts[0]
params := strings.Split(parts[1], string(null))
if len(params) < 1 {
return fmt.Errorf("%w: missing pathname", ErrInvalidGitProtoRequest)
}
g.Pathname = params[0]
if len(params) > 1 {
g.Host = strings.TrimPrefix(params[1], "host=")
}
if len(params) > 2 {
for _, param := range params[2:] {
if param != "" {
g.ExtraParams = append(g.ExtraParams, param)
}
}
}
return nil
}

View File

@ -11,7 +11,6 @@ import (
"github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/internal/common" "github.com/go-git/go-git/v5/plumbing/transport/internal/common"
"github.com/go-git/go-git/v5/utils/ioutil"
"golang.org/x/sys/execabs" "golang.org/x/sys/execabs"
) )
@ -112,7 +111,7 @@ func (c *command) Start() error {
func (c *command) StderrPipe() (io.Reader, error) { func (c *command) StderrPipe() (io.Reader, error) {
// Pipe returned by Command.StderrPipe has a race with Read + Command.Wait. // Pipe returned by Command.StderrPipe has a race with Read + Command.Wait.
// We use an io.Pipe and close it after the command finishes. // We use an io.Pipe and close it after the command finishes.
r, w := ioutil.Pipe() r, w := io.Pipe()
c.cmd.Stderr = w c.cmd.Stderr = w
c.stderrCloser = r c.stderrCloser = r
return r, nil return r, nil

View File

@ -2,12 +2,11 @@
package git package git
import ( import (
"fmt"
"io" "io"
"net" "net"
"strconv" "strconv"
"github.com/go-git/go-git/v5/plumbing/format/pktline" "github.com/go-git/go-git/v5/plumbing/protocol/packp"
"github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/internal/common" "github.com/go-git/go-git/v5/plumbing/transport/internal/common"
"github.com/go-git/go-git/v5/utils/ioutil" "github.com/go-git/go-git/v5/utils/ioutil"
@ -42,10 +41,18 @@ type command struct {
// Start executes the command sending the required message to the TCP connection // Start executes the command sending the required message to the TCP connection
func (c *command) Start() error { func (c *command) Start() error {
cmd := endpointToCommand(c.command, c.endpoint) req := packp.GitProtoRequest{
RequestCommand: c.command,
Pathname: c.endpoint.Path,
}
host := c.endpoint.Host
if c.endpoint.Port != DefaultPort {
host = net.JoinHostPort(c.endpoint.Host, strconv.Itoa(c.endpoint.Port))
}
e := pktline.NewEncoder(c.conn) req.Host = host
return e.Encode([]byte(cmd))
return req.Encode(c.conn)
} }
func (c *command) connect() error { func (c *command) connect() error {
@ -90,15 +97,6 @@ func (c *command) StdoutPipe() (io.Reader, error) {
return c.conn, nil return c.conn, nil
} }
func endpointToCommand(cmd string, ep *transport.Endpoint) string {
host := ep.Host
if ep.Port != DefaultPort {
host = net.JoinHostPort(ep.Host, strconv.Itoa(ep.Port))
}
return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, host, 0)
}
// Close closes the TCP connection and connection. // Close closes the TCP connection and connection.
func (c *command) Close() error { func (c *command) Close() error {
if !c.connected { if !c.connected {

View File

@ -203,9 +203,22 @@ func (s *session) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRe
} }
func (s *session) handleAdvRefDecodeError(err error) error { func (s *session) handleAdvRefDecodeError(err error) error {
var errLine *pktline.ErrorLine
if errors.As(err, &errLine) {
if isRepoNotFoundError(errLine.Text) {
return transport.ErrRepositoryNotFound
}
return errLine
}
// If repository is not found, we get empty stdout and server writes an // If repository is not found, we get empty stdout and server writes an
// error to stderr. // error to stderr.
if err == packp.ErrEmptyInput { if errors.Is(err, packp.ErrEmptyInput) {
// TODO:(v6): handle this error in a better way.
// Instead of checking the stderr output for a specific error message,
// define an ExitError and embed the stderr output and exit (if one
// exists) in the error struct. Just like exec.ExitError.
s.finished = true s.finished = true
if err := s.checkNotFoundError(); err != nil { if err := s.checkNotFoundError(); err != nil {
return err return err
@ -245,6 +258,12 @@ func (s *session) handleAdvRefDecodeError(err error) error {
// returned with the packfile content. The reader must be closed after reading. // returned with the packfile content. The reader must be closed after reading.
func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) {
if req.IsEmpty() { if req.IsEmpty() {
// XXX: IsEmpty means haves are a subset of wants, in that case we have
// everything we asked for. Close the connection and return nil.
if err := s.finish(); err != nil {
return nil, err
}
// TODO:(v6) return nil here
return nil, transport.ErrEmptyUploadPackRequest return nil, transport.ErrEmptyUploadPackRequest
} }
@ -393,59 +412,43 @@ func (s *session) checkNotFoundError() error {
return transport.ErrRepositoryNotFound return transport.ErrRepositoryNotFound
} }
// TODO:(v6): return server error just as it is without a prefix
return fmt.Errorf("unknown error: %s", line) return fmt.Errorf("unknown error: %s", line)
} }
} }
var ( const (
githubRepoNotFoundErr = "ERROR: Repository not found." githubRepoNotFoundErr = "Repository not found."
bitbucketRepoNotFoundErr = "conq: repository does not exist." bitbucketRepoNotFoundErr = "repository does not exist."
localRepoNotFoundErr = "does not appear to be a git repository" localRepoNotFoundErr = "does not appear to be a git repository"
gitProtocolNotFoundErr = "ERR \n Repository not found." gitProtocolNotFoundErr = "Repository not found."
gitProtocolNoSuchErr = "ERR no such repository" gitProtocolNoSuchErr = "no such repository"
gitProtocolAccessDeniedErr = "ERR access denied" gitProtocolAccessDeniedErr = "access denied"
gogsAccessDeniedErr = "Gogs: Repository does not exist or you do not have access" gogsAccessDeniedErr = "Repository does not exist or you do not have access"
gitlabRepoNotFoundErr = "remote: ERROR: The project you were looking for could not be found" gitlabRepoNotFoundErr = "The project you were looking for could not be found"
) )
func isRepoNotFoundError(s string) bool { func isRepoNotFoundError(s string) bool {
if strings.HasPrefix(s, githubRepoNotFoundErr) { for _, err := range []string{
return true githubRepoNotFoundErr,
} bitbucketRepoNotFoundErr,
localRepoNotFoundErr,
if strings.HasPrefix(s, bitbucketRepoNotFoundErr) { gitProtocolNotFoundErr,
return true gitProtocolNoSuchErr,
} gitProtocolAccessDeniedErr,
gogsAccessDeniedErr,
if strings.HasSuffix(s, localRepoNotFoundErr) { gitlabRepoNotFoundErr,
return true } {
} if strings.Contains(s, err) {
return true
if strings.HasPrefix(s, gitProtocolNotFoundErr) { }
return true
}
if strings.HasPrefix(s, gitProtocolNoSuchErr) {
return true
}
if strings.HasPrefix(s, gitProtocolAccessDeniedErr) {
return true
}
if strings.HasPrefix(s, gogsAccessDeniedErr) {
return true
}
if strings.HasPrefix(s, gitlabRepoNotFoundErr) {
return true
} }
return false return false
} }
// uploadPack implements the git-upload-pack protocol. // uploadPack implements the git-upload-pack protocol.
func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error { func uploadPack(w io.WriteCloser, _ io.Reader, req *packp.UploadPackRequest) error {
// TODO support multi_ack mode // TODO support multi_ack mode
// TODO support multi_ack_detailed mode // TODO support multi_ack_detailed mode
// TODO support acks for common objects // TODO support acks for common objects

View File

@ -166,7 +166,7 @@ func (s *upSession) UploadPack(ctx context.Context, req *packp.UploadPackRequest
return nil, err return nil, err
} }
pr, pw := ioutil.Pipe() pr, pw := io.Pipe()
e := packfile.NewEncoder(pw, s.storer, false) e := packfile.NewEncoder(pw, s.storer, false)
go func() { go func() {
// TODO: plumb through a pack window. // TODO: plumb through a pack window.

View File

@ -168,7 +168,7 @@ func dial(network, addr string, proxyOpts transport.ProxyOptions, config *ssh.Cl
defer cancel() defer cancel()
var conn net.Conn var conn net.Conn
var err error var dialErr error
if proxyOpts.URL != "" { if proxyOpts.URL != "" {
proxyUrl, err := proxyOpts.FullURL() proxyUrl, err := proxyOpts.FullURL()
@ -186,12 +186,12 @@ func dial(network, addr string, proxyOpts transport.ProxyOptions, config *ssh.Cl
return nil, fmt.Errorf("expected ssh proxy dialer to be of type %s; got %s", return nil, fmt.Errorf("expected ssh proxy dialer to be of type %s; got %s",
reflect.TypeOf(ctxDialer), reflect.TypeOf(dialer)) reflect.TypeOf(ctxDialer), reflect.TypeOf(dialer))
} }
conn, err = ctxDialer.DialContext(ctx, "tcp", addr) conn, dialErr = ctxDialer.DialContext(ctx, "tcp", addr)
} else { } else {
conn, err = proxy.Dial(ctx, network, addr) conn, dialErr = proxy.Dial(ctx, network, addr)
} }
if err != nil { if dialErr != nil {
return nil, err return nil, dialErr
} }
c, chans, reqs, err := ssh.NewClientConn(conn, addr, config) c, chans, reqs, err := ssh.NewClientConn(conn, addr, config)

View File

@ -552,6 +552,10 @@ func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.Upl
reader, err := s.UploadPack(ctx, req) reader, err := s.UploadPack(ctx, req)
if err != nil { if err != nil {
if errors.Is(err, transport.ErrEmptyUploadPackRequest) {
// XXX: no packfile provided, everything is up-to-date.
return nil
}
return err return err
} }
@ -1198,9 +1202,9 @@ func (r *Remote) updateLocalReferenceStorage(
old, _ := storer.ResolveReference(r.s, localName) old, _ := storer.ResolveReference(r.s, localName)
new := plumbing.NewHashReference(localName, ref.Hash()) new := plumbing.NewHashReference(localName, ref.Hash())
// If the ref exists locally as a branch and force is not specified, // If the ref exists locally as a non-tag and force is not
// only update if the new ref is an ancestor of the old // specified, only update if the new ref is an ancestor of the old
if old != nil && old.Name().IsBranch() && !force && !spec.IsForceUpdate() { if old != nil && !old.Name().IsTag() && !force && !spec.IsForceUpdate() {
ff, err := isFastForward(r.s, old.Hash(), new.Hash()) ff, err := isFastForward(r.s, old.Hash(), new.Hash())
if err != nil { if err != nil {
return updated, err return updated, err
@ -1387,7 +1391,7 @@ func pushHashes(
allDelete bool, allDelete bool,
) (*packp.ReportStatus, error) { ) (*packp.ReportStatus, error) {
rd, wr := ioutil.Pipe() rd, wr := io.Pipe()
config, err := s.Config() config, err := s.Config()
if err != nil { if err != nil {

View File

@ -146,6 +146,19 @@ func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.H
return o.Hash(), err return o.Hash(), err
} }
// LazyWriter returns a lazy ObjectWriter that is bound to a DotGit file.
// It first write the header passing on the object type and size, so
// that the object contents can be written later, without the need to
// create a MemoryObject and buffering its entire contents into memory.
func (s *ObjectStorage) LazyWriter() (w io.WriteCloser, wh func(typ plumbing.ObjectType, sz int64) error, err error) {
ow, err := s.dir.NewObject()
if err != nil {
return nil, nil, err
}
return ow, ow.WriteHeader, nil
}
// HasEncodedObject returns nil if the object exists, without actually // HasEncodedObject returns nil if the object exists, without actually
// reading the object data from storage. // reading the object data from storage.
func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {

View File

@ -195,7 +195,7 @@ func NewWriterOnError(w io.Writer, notify func(error)) io.Writer {
} }
// NewWriteCloserOnError returns a io.WriteCloser that call the notify function // NewWriteCloserOnError returns a io.WriteCloser that call the notify function
//when an unexpected (!io.EOF) error happens, after call Write function. // when an unexpected (!io.EOF) error happens, after call Write function.
func NewWriteCloserOnError(w io.WriteCloser, notify func(error)) io.WriteCloser { func NewWriteCloserOnError(w io.WriteCloser, notify func(error)) io.WriteCloser {
return NewWriteCloser(NewWriterOnError(w, notify), w) return NewWriteCloser(NewWriterOnError(w, notify), w)
} }
@ -208,13 +208,3 @@ func (r *writerOnError) Write(p []byte) (n int, err error) {
return return
} }
type PipeReader interface {
io.ReadCloser
CloseWithError(err error) error
}
type PipeWriter interface {
io.WriteCloser
CloseWithError(err error) error
}

View File

@ -1,9 +0,0 @@
// +build !js
package ioutil
import "io"
func Pipe() (PipeReader, PipeWriter) {
return io.Pipe()
}

View File

@ -1,9 +0,0 @@
// +build js
package ioutil
import "github.com/acomagu/bufpipe"
func Pipe() (PipeReader, PipeWriter) {
return bufpipe.New(nil)
}

View File

@ -103,6 +103,10 @@ func (n *node) calculateChildren() error {
continue continue
} }
if file.Mode()&os.ModeSocket != 0 {
continue
}
c, err := n.newChildNode(file) c, err := n.newChildNode(file)
if err != nil { if err != nil {
return err return err

View File

@ -0,0 +1,55 @@
package trace
import (
"fmt"
"log"
"os"
"sync/atomic"
)
var (
// logger is the logger to use for tracing.
logger = newLogger()
// current is the targets that are enabled for tracing.
current atomic.Int32
)
func newLogger() *log.Logger {
return log.New(os.Stderr, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
}
// Target is a tracing target.
type Target int32
const (
// General traces general operations.
General Target = 1 << iota
// Packet traces git packets.
Packet
)
// SetTarget sets the tracing targets.
func SetTarget(target Target) {
current.Store(int32(target))
}
// SetLogger sets the logger to use for tracing.
func SetLogger(l *log.Logger) {
logger = l
}
// Print prints the given message only if the target is enabled.
func (t Target) Print(args ...interface{}) {
if int32(t)&current.Load() != 0 {
logger.Output(2, fmt.Sprint(args...)) // nolint: errcheck
}
}
// Printf prints the given message only if the target is enabled.
func (t Target) Printf(format string, args ...interface{}) {
if int32(t)&current.Load() != 0 {
logger.Output(2, fmt.Sprintf(format, args...)) // nolint: errcheck
}
}

View File

@ -5,6 +5,7 @@ package knownhosts
import ( import (
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt"
"io" "io"
"net" "net"
"sort" "sort"
@ -68,8 +69,19 @@ func (hkcb HostKeyCallback) HostKeys(hostWithPort string) (keys []ssh.PublicKey)
// known_hosts entries (for different key types), the result will be sorted by // known_hosts entries (for different key types), the result will be sorted by
// known_hosts filename and line number. // known_hosts filename and line number.
func (hkcb HostKeyCallback) HostKeyAlgorithms(hostWithPort string) (algos []string) { func (hkcb HostKeyCallback) HostKeyAlgorithms(hostWithPort string) (algos []string) {
for _, key := range hkcb.HostKeys(hostWithPort) { // We ensure that algos never contains duplicates. This is done for robustness
algos = append(algos, key.Type()) // even though currently golang.org/x/crypto/ssh/knownhosts never exposes
// multiple keys of the same type. This way our behavior here is unaffected
// even if https://github.com/golang/go/issues/28870 is implemented, for
// example by https://github.com/golang/crypto/pull/254.
hostKeys := hkcb.HostKeys(hostWithPort)
seen := make(map[string]struct{}, len(hostKeys))
for _, key := range hostKeys {
typ := key.Type()
if _, already := seen[typ]; !already {
algos = append(algos, typ)
seen[typ] = struct{}{}
}
} }
return algos return algos
} }
@ -140,11 +152,15 @@ func Line(addresses []string, key ssh.PublicKey) string {
func WriteKnownHost(w io.Writer, hostname string, remote net.Addr, key ssh.PublicKey) error { func WriteKnownHost(w io.Writer, hostname string, remote net.Addr, key ssh.PublicKey) error {
// Always include hostname; only also include remote if it isn't a zero value // Always include hostname; only also include remote if it isn't a zero value
// and doesn't normalize to the same string as hostname. // and doesn't normalize to the same string as hostname.
addresses := []string{hostname} hostnameNormalized := Normalize(hostname)
remoteStr := remote.String() if strings.ContainsAny(hostnameNormalized, "\t ") {
remoteStrNormalized := Normalize(remoteStr) return fmt.Errorf("knownhosts: hostname '%s' contains spaces", hostnameNormalized)
if remoteStrNormalized != "[0.0.0.0]:0" && remoteStrNormalized != Normalize(hostname) { }
addresses = append(addresses, remoteStr) addresses := []string{hostnameNormalized}
remoteStrNormalized := Normalize(remote.String())
if remoteStrNormalized != "[0.0.0.0]:0" && remoteStrNormalized != hostnameNormalized &&
!strings.ContainsAny(remoteStrNormalized, "\t ") {
addresses = append(addresses, remoteStrNormalized)
} }
line := Line(addresses, key) + "\n" line := Line(addresses, key) + "\n"
_, err := w.Write([]byte(line)) _, err := w.Write([]byte(line))

8
vendor/modules.txt vendored
View File

@ -102,9 +102,6 @@ github.com/ProtonMail/go-crypto/openpgp/internal/ecc
github.com/ProtonMail/go-crypto/openpgp/internal/encoding github.com/ProtonMail/go-crypto/openpgp/internal/encoding
github.com/ProtonMail/go-crypto/openpgp/packet github.com/ProtonMail/go-crypto/openpgp/packet
github.com/ProtonMail/go-crypto/openpgp/s2k github.com/ProtonMail/go-crypto/openpgp/s2k
# github.com/acomagu/bufpipe v1.0.4
## explicit; go 1.12
github.com/acomagu/bufpipe
# github.com/agext/levenshtein v1.2.3 # github.com/agext/levenshtein v1.2.3
## explicit ## explicit
github.com/agext/levenshtein github.com/agext/levenshtein
@ -471,7 +468,7 @@ github.com/go-git/go-billy/v5/helper/polyfill
github.com/go-git/go-billy/v5/memfs github.com/go-git/go-billy/v5/memfs
github.com/go-git/go-billy/v5/osfs github.com/go-git/go-billy/v5/osfs
github.com/go-git/go-billy/v5/util github.com/go-git/go-billy/v5/util
# github.com/go-git/go-git/v5 v5.10.0 # github.com/go-git/go-git/v5 v5.10.1
## explicit; go 1.19 ## explicit; go 1.19
github.com/go-git/go-git/v5 github.com/go-git/go-git/v5
github.com/go-git/go-git/v5/config github.com/go-git/go-git/v5/config
@ -518,6 +515,7 @@ github.com/go-git/go-git/v5/utils/merkletrie/index
github.com/go-git/go-git/v5/utils/merkletrie/internal/frame github.com/go-git/go-git/v5/utils/merkletrie/internal/frame
github.com/go-git/go-git/v5/utils/merkletrie/noder github.com/go-git/go-git/v5/utils/merkletrie/noder
github.com/go-git/go-git/v5/utils/sync github.com/go-git/go-git/v5/utils/sync
github.com/go-git/go-git/v5/utils/trace
# github.com/godbus/dbus/v5 v5.1.0 # github.com/godbus/dbus/v5 v5.1.0
## explicit; go 1.12 ## explicit; go 1.12
github.com/godbus/dbus/v5 github.com/godbus/dbus/v5
@ -804,7 +802,7 @@ github.com/sergi/go-diff/diffmatchpatch
# github.com/sirupsen/logrus v1.9.3 # github.com/sirupsen/logrus v1.9.3
## explicit; go 1.13 ## explicit; go 1.13
github.com/sirupsen/logrus github.com/sirupsen/logrus
# github.com/skeema/knownhosts v1.2.0 # github.com/skeema/knownhosts v1.2.1
## explicit; go 1.17 ## explicit; go 1.17
github.com/skeema/knownhosts github.com/skeema/knownhosts
# github.com/spf13/afero v1.11.0 # github.com/spf13/afero v1.11.0