chore(deps): bump github.com/go-git/go-git/v5 from 5.11.0 to 5.12.0 (#3095)
Bumps [github.com/go-git/go-git/v5](https://github.com/go-git/go-git) from 5.11.0 to 5.12.0. - [Release notes](https://github.com/go-git/go-git/releases) - [Commits](https://github.com/go-git/go-git/compare/v5.11.0...v5.12.0) --- updated-dependencies: - dependency-name: github.com/go-git/go-git/v5 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
This commit is contained in:
parent
c841c3c82d
commit
b228f2f4b0
6
go.mod
6
go.mod
|
|
@ -14,7 +14,7 @@ require (
|
||||||
github.com/containerd/cgroups v1.1.0 // indirect
|
github.com/containerd/cgroups v1.1.0 // indirect
|
||||||
github.com/docker/docker v26.0.0+incompatible
|
github.com/docker/docker v26.0.0+incompatible
|
||||||
github.com/go-git/go-billy/v5 v5.5.0
|
github.com/go-git/go-billy/v5 v5.5.0
|
||||||
github.com/go-git/go-git/v5 v5.11.0
|
github.com/go-git/go-git/v5 v5.12.0
|
||||||
github.com/golang/mock v1.6.0
|
github.com/golang/mock v1.6.0
|
||||||
github.com/google/go-cmp v0.6.0
|
github.com/google/go-cmp v0.6.0
|
||||||
github.com/google/go-containerregistry v0.19.1
|
github.com/google/go-containerregistry v0.19.1
|
||||||
|
|
@ -129,7 +129,7 @@ require (
|
||||||
github.com/prometheus/client_model v0.5.0 // indirect
|
github.com/prometheus/client_model v0.5.0 // indirect
|
||||||
github.com/prometheus/common v0.44.0 // indirect
|
github.com/prometheus/common v0.44.0 // indirect
|
||||||
github.com/prometheus/procfs v0.12.0 // indirect
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
github.com/sergi/go-diff v1.3.1 // indirect
|
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
|
||||||
github.com/vbatts/tar-split v0.11.5 // indirect
|
github.com/vbatts/tar-split v0.11.5 // indirect
|
||||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||||
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
|
go.etcd.io/etcd/raft/v3 v3.5.6 // indirect
|
||||||
|
|
@ -181,7 +181,7 @@ require (
|
||||||
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
github.com/pjbgf/sha1cd v0.3.0 // indirect
|
||||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||||
github.com/skeema/knownhosts v1.2.1 // indirect
|
github.com/skeema/knownhosts v1.2.2 // indirect
|
||||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||||
github.com/spf13/cast v1.6.0 // indirect
|
github.com/spf13/cast v1.6.0 // indirect
|
||||||
github.com/spf13/viper v1.18.2 // indirect
|
github.com/spf13/viper v1.18.2 // indirect
|
||||||
|
|
|
||||||
16
go.sum
16
go.sum
|
|
@ -209,16 +209,16 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z
|
||||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||||
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||||
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
|
github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE=
|
||||||
github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
|
github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8=
|
||||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
|
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
|
||||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
||||||
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
||||||
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
|
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
|
||||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
|
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
|
||||||
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
|
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
|
||||||
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
|
github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys=
|
||||||
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
|
github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY=
|
||||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
|
|
@ -435,14 +435,14 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
|
||||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
|
||||||
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
|
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
|
github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A=
|
||||||
github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
|
github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
|
||||||
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
|
||||||
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
|
||||||
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
|
||||||
|
|
|
||||||
|
|
@ -27,14 +27,14 @@ compatibility status with go-git.
|
||||||
|
|
||||||
## Branching and merging
|
## Branching and merging
|
||||||
|
|
||||||
| Feature | Sub-feature | Status | Notes | Examples |
|
| Feature | Sub-feature | Status | Notes | Examples |
|
||||||
| ----------- | ----------- | ------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- |
|
| ----------- | ----------- | ------------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- |
|
||||||
| `branch` | | ✅ | | - [branch](_examples/branch/main.go) |
|
| `branch` | | ✅ | | - [branch](_examples/branch/main.go) |
|
||||||
| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) |
|
| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) |
|
||||||
| `merge` | | ❌ | | |
|
| `merge` | | ⚠️ (partial) | Fast-forward only | |
|
||||||
| `mergetool` | | ❌ | | |
|
| `mergetool` | | ❌ | | |
|
||||||
| `stash` | | ❌ | | |
|
| `stash` | | ❌ | | |
|
||||||
| `tag` | | ✅ | | - [tag](_examples/tag/main.go) <br/> - [tag create and push](_examples/tag-create-push/main.go) |
|
| `tag` | | ✅ | | - [tag](_examples/tag/main.go) <br/> - [tag create and push](_examples/tag-create-push/main.go) |
|
||||||
|
|
||||||
## Sharing and updating projects
|
## Sharing and updating projects
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,7 @@ build-git:
|
||||||
test:
|
test:
|
||||||
@echo "running against `git version`"; \
|
@echo "running against `git version`"; \
|
||||||
$(GOTEST) -race ./...
|
$(GOTEST) -race ./...
|
||||||
|
$(GOTEST) -v _examples/common_test.go _examples/common.go --examples
|
||||||
|
|
||||||
TEMP_REPO := $(shell mktemp)
|
TEMP_REPO := $(shell mktemp)
|
||||||
test-sha256:
|
test-sha256:
|
||||||
|
|
|
||||||
|
|
@ -89,6 +89,25 @@ type CloneOptions struct {
|
||||||
Shared bool
|
Shared bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MergeOptions describes how a merge should be performed.
|
||||||
|
type MergeOptions struct {
|
||||||
|
// Strategy defines the merge strategy to be used.
|
||||||
|
Strategy MergeStrategy
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeStrategy represents the different types of merge strategies.
|
||||||
|
type MergeStrategy int8
|
||||||
|
|
||||||
|
const (
|
||||||
|
// FastForwardMerge represents a Git merge strategy where the current
|
||||||
|
// branch can be simply updated to point to the HEAD of the branch being
|
||||||
|
// merged. This is only possible if the history of the branch being merged
|
||||||
|
// is a linear descendant of the current branch, with no conflicting commits.
|
||||||
|
//
|
||||||
|
// This is the default option.
|
||||||
|
FastForwardMerge MergeStrategy = iota
|
||||||
|
)
|
||||||
|
|
||||||
// Validate validates the fields and sets the default values.
|
// Validate validates the fields and sets the default values.
|
||||||
func (o *CloneOptions) Validate() error {
|
func (o *CloneOptions) Validate() error {
|
||||||
if o.URL == "" {
|
if o.URL == "" {
|
||||||
|
|
@ -166,7 +185,7 @@ const (
|
||||||
// AllTags fetch all tags from the remote (i.e., fetch remote tags
|
// AllTags fetch all tags from the remote (i.e., fetch remote tags
|
||||||
// refs/tags/* into local tags with the same name)
|
// refs/tags/* into local tags with the same name)
|
||||||
AllTags
|
AllTags
|
||||||
//NoTags fetch no tags from the remote at all
|
// NoTags fetch no tags from the remote at all
|
||||||
NoTags
|
NoTags
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -198,6 +217,9 @@ type FetchOptions struct {
|
||||||
CABundle []byte
|
CABundle []byte
|
||||||
// ProxyOptions provides info required for connecting to a proxy.
|
// ProxyOptions provides info required for connecting to a proxy.
|
||||||
ProxyOptions transport.ProxyOptions
|
ProxyOptions transport.ProxyOptions
|
||||||
|
// Prune specify that local refs that match given RefSpecs and that do
|
||||||
|
// not exist remotely will be removed.
|
||||||
|
Prune bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate validates the fields and sets the default values.
|
// Validate validates the fields and sets the default values.
|
||||||
|
|
@ -324,9 +346,9 @@ var (
|
||||||
|
|
||||||
// CheckoutOptions describes how a checkout operation should be performed.
|
// CheckoutOptions describes how a checkout operation should be performed.
|
||||||
type CheckoutOptions struct {
|
type CheckoutOptions struct {
|
||||||
// Hash is the hash of the commit to be checked out. If used, HEAD will be
|
// Hash is the hash of a commit or tag to be checked out. If used, HEAD
|
||||||
// in detached mode. If Create is not used, Branch and Hash are mutually
|
// will be in detached mode. If Create is not used, Branch and Hash are
|
||||||
// exclusive.
|
// mutually exclusive.
|
||||||
Hash plumbing.Hash
|
Hash plumbing.Hash
|
||||||
// Branch to be checked out, if Branch and Hash are empty is set to `master`.
|
// Branch to be checked out, if Branch and Hash are empty is set to `master`.
|
||||||
Branch plumbing.ReferenceName
|
Branch plumbing.ReferenceName
|
||||||
|
|
@ -405,6 +427,11 @@ func (o *ResetOptions) Validate(r *Repository) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
o.Commit = ref.Hash()
|
o.Commit = ref.Hash()
|
||||||
|
} else {
|
||||||
|
_, err := r.CommitObject(o.Commit)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid reset option: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -474,6 +501,11 @@ type AddOptions struct {
|
||||||
// Glob adds all paths, matching pattern, to the index. If pattern matches a
|
// Glob adds all paths, matching pattern, to the index. If pattern matches a
|
||||||
// directory path, all directory contents are added to the index recursively.
|
// directory path, all directory contents are added to the index recursively.
|
||||||
Glob string
|
Glob string
|
||||||
|
// SkipStatus adds the path with no status check. This option is relevant only
|
||||||
|
// when the `Path` option is specified and does not apply when the `All` option is used.
|
||||||
|
// Notice that when passing an ignored path it will be added anyway.
|
||||||
|
// When true it can speed up adding files to the worktree in very large repositories.
|
||||||
|
SkipStatus bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate validates the fields and sets the default values.
|
// Validate validates the fields and sets the default values.
|
||||||
|
|
@ -507,6 +539,10 @@ type CommitOptions struct {
|
||||||
// commit will not be signed. The private key must be present and already
|
// commit will not be signed. The private key must be present and already
|
||||||
// decrypted.
|
// decrypted.
|
||||||
SignKey *openpgp.Entity
|
SignKey *openpgp.Entity
|
||||||
|
// Signer denotes a cryptographic signer to sign the commit with.
|
||||||
|
// A nil value here means the commit will not be signed.
|
||||||
|
// Takes precedence over SignKey.
|
||||||
|
Signer Signer
|
||||||
// Amend will create a new commit object and replace the commit that HEAD currently
|
// Amend will create a new commit object and replace the commit that HEAD currently
|
||||||
// points to. Cannot be used with All nor Parents.
|
// points to. Cannot be used with All nor Parents.
|
||||||
Amend bool
|
Amend bool
|
||||||
|
|
|
||||||
|
|
@ -116,7 +116,7 @@ func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadGlobalPatterns loads gitignore patterns from from the gitignore file
|
// LoadGlobalPatterns loads gitignore patterns from the gitignore file
|
||||||
// declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not
|
// declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not
|
||||||
// exist the function will return nil. If the core.excludesfile property
|
// exist the function will return nil. If the core.excludesfile property
|
||||||
// is not declared, the function will return nil. If the file pointed to by
|
// is not declared, the function will return nil. If the file pointed to by
|
||||||
|
|
@ -132,7 +132,7 @@ func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) {
|
||||||
return loadPatterns(fs, fs.Join(home, gitconfigFile))
|
return loadPatterns(fs, fs.Join(home, gitconfigFile))
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadSystemPatterns loads gitignore patterns from from the gitignore file
|
// LoadSystemPatterns loads gitignore patterns from the gitignore file
|
||||||
// declared in a system's /etc/gitconfig file. If the /etc/gitconfig file does
|
// declared in a system's /etc/gitconfig file. If the /etc/gitconfig file does
|
||||||
// not exist the function will return nil. If the core.excludesfile property
|
// not exist the function will return nil. If the core.excludesfile property
|
||||||
// is not declared, the function will return nil. If the file pointed to by
|
// is not declared, the function will return nil. If the file pointed to by
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ const (
|
||||||
// the commit with the "mergetag" header.
|
// the commit with the "mergetag" header.
|
||||||
headermergetag string = "mergetag"
|
headermergetag string = "mergetag"
|
||||||
|
|
||||||
defaultUtf8CommitMesageEncoding MessageEncoding = "UTF-8"
|
defaultUtf8CommitMessageEncoding MessageEncoding = "UTF-8"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Hash represents the hash of an object
|
// Hash represents the hash of an object
|
||||||
|
|
@ -189,7 +189,7 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Hash = o.Hash()
|
c.Hash = o.Hash()
|
||||||
c.Encoding = defaultUtf8CommitMesageEncoding
|
c.Encoding = defaultUtf8CommitMessageEncoding
|
||||||
|
|
||||||
reader, err := o.Reader()
|
reader, err := o.Reader()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -335,7 +335,7 @@ func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMesageEncoding {
|
if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMessageEncoding {
|
||||||
if _, err = fmt.Fprintf(w, "\n%s %s", headerencoding, c.Encoding); err != nil {
|
if _, err = fmt.Fprintf(w, "\n%s %s", headerencoding, c.Encoding); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -57,6 +57,8 @@ func (c *commitPathIter) Next() (*Commit, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *commitPathIter) getNextFileCommit() (*Commit, error) {
|
func (c *commitPathIter) getNextFileCommit() (*Commit, error) {
|
||||||
|
var parentTree, currentTree *Tree
|
||||||
|
|
||||||
for {
|
for {
|
||||||
// Parent-commit can be nil if the current-commit is the initial commit
|
// Parent-commit can be nil if the current-commit is the initial commit
|
||||||
parentCommit, parentCommitErr := c.sourceIter.Next()
|
parentCommit, parentCommitErr := c.sourceIter.Next()
|
||||||
|
|
@ -68,13 +70,17 @@ func (c *commitPathIter) getNextFileCommit() (*Commit, error) {
|
||||||
parentCommit = nil
|
parentCommit = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch the trees of the current and parent commits
|
if parentTree == nil {
|
||||||
currentTree, currTreeErr := c.currentCommit.Tree()
|
var currTreeErr error
|
||||||
if currTreeErr != nil {
|
currentTree, currTreeErr = c.currentCommit.Tree()
|
||||||
return nil, currTreeErr
|
if currTreeErr != nil {
|
||||||
|
return nil, currTreeErr
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
currentTree = parentTree
|
||||||
|
parentTree = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var parentTree *Tree
|
|
||||||
if parentCommit != nil {
|
if parentCommit != nil {
|
||||||
var parentTreeErr error
|
var parentTreeErr error
|
||||||
parentTree, parentTreeErr = parentCommit.Tree()
|
parentTree, parentTreeErr = parentCommit.Tree()
|
||||||
|
|
@ -115,7 +121,8 @@ func (c *commitPathIter) hasFileChange(changes Changes, parent *Commit) bool {
|
||||||
|
|
||||||
// filename matches, now check if source iterator contains all commits (from all refs)
|
// filename matches, now check if source iterator contains all commits (from all refs)
|
||||||
if c.checkParent {
|
if c.checkParent {
|
||||||
if parent != nil && isParentHash(parent.Hash, c.currentCommit) {
|
// Check if parent is beyond the initial commit
|
||||||
|
if parent == nil || isParentHash(parent.Hash, c.currentCommit) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/go-git/go-git/v5/plumbing"
|
"github.com/go-git/go-git/v5/plumbing"
|
||||||
|
|
@ -234,69 +234,56 @@ func (fileStats FileStats) String() string {
|
||||||
return printStat(fileStats)
|
return printStat(fileStats)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// printStat prints the stats of changes in content of files.
|
||||||
|
// Original implementation: https://github.com/git/git/blob/1a87c842ece327d03d08096395969aca5e0a6996/diff.c#L2615
|
||||||
|
// Parts of the output:
|
||||||
|
// <pad><filename><pad>|<pad><changeNumber><pad><+++/---><newline>
|
||||||
|
// example: " main.go | 10 +++++++--- "
|
||||||
func printStat(fileStats []FileStat) string {
|
func printStat(fileStats []FileStat) string {
|
||||||
padLength := float64(len(" "))
|
maxGraphWidth := uint(53)
|
||||||
newlineLength := float64(len("\n"))
|
maxNameLen := 0
|
||||||
separatorLength := float64(len("|"))
|
maxChangeLen := 0
|
||||||
// Soft line length limit. The text length calculation below excludes
|
|
||||||
// length of the change number. Adding that would take it closer to 80,
|
scaleLinear := func(it, width, max uint) uint {
|
||||||
// but probably not more than 80, until it's a huge number.
|
if it == 0 || max == 0 {
|
||||||
lineLength := 72.0
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1 + (it * (width - 1) / max)
|
||||||
|
}
|
||||||
|
|
||||||
// Get the longest filename and longest total change.
|
|
||||||
var longestLength float64
|
|
||||||
var longestTotalChange float64
|
|
||||||
for _, fs := range fileStats {
|
for _, fs := range fileStats {
|
||||||
if int(longestLength) < len(fs.Name) {
|
if len(fs.Name) > maxNameLen {
|
||||||
longestLength = float64(len(fs.Name))
|
maxNameLen = len(fs.Name)
|
||||||
}
|
}
|
||||||
totalChange := fs.Addition + fs.Deletion
|
|
||||||
if int(longestTotalChange) < totalChange {
|
changes := strconv.Itoa(fs.Addition + fs.Deletion)
|
||||||
longestTotalChange = float64(totalChange)
|
if len(changes) > maxChangeLen {
|
||||||
|
maxChangeLen = len(changes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parts of the output:
|
result := ""
|
||||||
// <pad><filename><pad>|<pad><changeNumber><pad><+++/---><newline>
|
|
||||||
// example: " main.go | 10 +++++++--- "
|
|
||||||
|
|
||||||
// <pad><filename><pad>
|
|
||||||
leftTextLength := padLength + longestLength + padLength
|
|
||||||
|
|
||||||
// <pad><number><pad><+++++/-----><newline>
|
|
||||||
// Excluding number length here.
|
|
||||||
rightTextLength := padLength + padLength + newlineLength
|
|
||||||
|
|
||||||
totalTextArea := leftTextLength + separatorLength + rightTextLength
|
|
||||||
heightOfHistogram := lineLength - totalTextArea
|
|
||||||
|
|
||||||
// Scale the histogram.
|
|
||||||
var scaleFactor float64
|
|
||||||
if longestTotalChange > heightOfHistogram {
|
|
||||||
// Scale down to heightOfHistogram.
|
|
||||||
scaleFactor = longestTotalChange / heightOfHistogram
|
|
||||||
} else {
|
|
||||||
scaleFactor = 1.0
|
|
||||||
}
|
|
||||||
|
|
||||||
finalOutput := ""
|
|
||||||
for _, fs := range fileStats {
|
for _, fs := range fileStats {
|
||||||
addn := float64(fs.Addition)
|
add := uint(fs.Addition)
|
||||||
deln := float64(fs.Deletion)
|
del := uint(fs.Deletion)
|
||||||
addc := int(math.Floor(addn/scaleFactor))
|
np := maxNameLen - len(fs.Name)
|
||||||
delc := int(math.Floor(deln/scaleFactor))
|
cp := maxChangeLen - len(strconv.Itoa(fs.Addition+fs.Deletion))
|
||||||
if addc < 0 {
|
|
||||||
addc = 0
|
|
||||||
}
|
|
||||||
if delc < 0 {
|
|
||||||
delc = 0
|
|
||||||
}
|
|
||||||
adds := strings.Repeat("+", addc)
|
|
||||||
dels := strings.Repeat("-", delc)
|
|
||||||
finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels)
|
|
||||||
}
|
|
||||||
|
|
||||||
return finalOutput
|
total := add + del
|
||||||
|
if total > maxGraphWidth {
|
||||||
|
add = scaleLinear(add, maxGraphWidth, total)
|
||||||
|
del = scaleLinear(del, maxGraphWidth, total)
|
||||||
|
}
|
||||||
|
|
||||||
|
adds := strings.Repeat("+", int(add))
|
||||||
|
dels := strings.Repeat("-", int(del))
|
||||||
|
namePad := strings.Repeat(" ", np)
|
||||||
|
changePad := strings.Repeat(" ", cp)
|
||||||
|
|
||||||
|
result += fmt.Sprintf(" %s%s | %s%d %s%s\n", fs.Name, namePad, changePad, total, adds, dels)
|
||||||
|
}
|
||||||
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
|
func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
|
||||||
|
|
|
||||||
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/go-git/go-git/v5/plumbing"
|
"github.com/go-git/go-git/v5/plumbing"
|
||||||
|
|
@ -27,6 +28,7 @@ var (
|
||||||
ErrFileNotFound = errors.New("file not found")
|
ErrFileNotFound = errors.New("file not found")
|
||||||
ErrDirectoryNotFound = errors.New("directory not found")
|
ErrDirectoryNotFound = errors.New("directory not found")
|
||||||
ErrEntryNotFound = errors.New("entry not found")
|
ErrEntryNotFound = errors.New("entry not found")
|
||||||
|
ErrEntriesNotSorted = errors.New("entries in tree are not sorted")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Tree is basically like a directory - it references a bunch of other trees
|
// Tree is basically like a directory - it references a bunch of other trees
|
||||||
|
|
@ -270,6 +272,28 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TreeEntrySorter []TreeEntry
|
||||||
|
|
||||||
|
func (s TreeEntrySorter) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s TreeEntrySorter) Less(i, j int) bool {
|
||||||
|
name1 := s[i].Name
|
||||||
|
name2 := s[j].Name
|
||||||
|
if s[i].Mode == filemode.Dir {
|
||||||
|
name1 += "/"
|
||||||
|
}
|
||||||
|
if s[j].Mode == filemode.Dir {
|
||||||
|
name2 += "/"
|
||||||
|
}
|
||||||
|
return name1 < name2
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s TreeEntrySorter) Swap(i, j int) {
|
||||||
|
s[i], s[j] = s[j], s[i]
|
||||||
|
}
|
||||||
|
|
||||||
// Encode transforms a Tree into a plumbing.EncodedObject.
|
// Encode transforms a Tree into a plumbing.EncodedObject.
|
||||||
func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
|
func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
|
||||||
o.SetType(plumbing.TreeObject)
|
o.SetType(plumbing.TreeObject)
|
||||||
|
|
@ -279,7 +303,15 @@ func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
defer ioutil.CheckClose(w, &err)
|
defer ioutil.CheckClose(w, &err)
|
||||||
|
|
||||||
|
if !sort.IsSorted(TreeEntrySorter(t.Entries)) {
|
||||||
|
return ErrEntriesNotSorted
|
||||||
|
}
|
||||||
|
|
||||||
for _, entry := range t.Entries {
|
for _, entry := range t.Entries {
|
||||||
|
if strings.IndexByte(entry.Name, 0) != -1 {
|
||||||
|
return fmt.Errorf("malformed filename %q", entry.Name)
|
||||||
|
}
|
||||||
if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil {
|
if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -88,7 +88,9 @@ func (t *treeNoder) Children() ([]noder.Noder, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return transformChildren(parent)
|
var err error
|
||||||
|
t.children, err = transformChildren(parent)
|
||||||
|
return t.children, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the children of a tree as treenoders.
|
// Returns the children of a tree as treenoders.
|
||||||
|
|
|
||||||
|
|
@ -91,9 +91,9 @@ func advertisedReferences(ctx context.Context, s *session, serviceName string) (
|
||||||
}
|
}
|
||||||
|
|
||||||
type client struct {
|
type client struct {
|
||||||
c *http.Client
|
client *http.Client
|
||||||
transports *lru.Cache
|
transports *lru.Cache
|
||||||
m sync.RWMutex
|
mutex sync.RWMutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientOptions holds user configurable options for the client.
|
// ClientOptions holds user configurable options for the client.
|
||||||
|
|
@ -147,7 +147,7 @@ func NewClientWithOptions(c *http.Client, opts *ClientOptions) transport.Transpo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cl := &client{
|
cl := &client{
|
||||||
c: c,
|
client: c,
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts != nil {
|
if opts != nil {
|
||||||
|
|
@ -234,10 +234,10 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (*
|
||||||
// if the client wasn't configured to have a cache for transports then just configure
|
// if the client wasn't configured to have a cache for transports then just configure
|
||||||
// the transport and use it directly, otherwise try to use the cache.
|
// the transport and use it directly, otherwise try to use the cache.
|
||||||
if c.transports == nil {
|
if c.transports == nil {
|
||||||
tr, ok := c.c.Transport.(*http.Transport)
|
tr, ok := c.client.Transport.(*http.Transport)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("expected underlying client transport to be of type: %s; got: %s",
|
return nil, fmt.Errorf("expected underlying client transport to be of type: %s; got: %s",
|
||||||
reflect.TypeOf(transport), reflect.TypeOf(c.c.Transport))
|
reflect.TypeOf(transport), reflect.TypeOf(c.client.Transport))
|
||||||
}
|
}
|
||||||
|
|
||||||
transport = tr.Clone()
|
transport = tr.Clone()
|
||||||
|
|
@ -258,7 +258,7 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (*
|
||||||
transport, found = c.fetchTransport(transportOpts)
|
transport, found = c.fetchTransport(transportOpts)
|
||||||
|
|
||||||
if !found {
|
if !found {
|
||||||
transport = c.c.Transport.(*http.Transport).Clone()
|
transport = c.client.Transport.(*http.Transport).Clone()
|
||||||
configureTransport(transport, ep)
|
configureTransport(transport, ep)
|
||||||
c.addTransport(transportOpts, transport)
|
c.addTransport(transportOpts, transport)
|
||||||
}
|
}
|
||||||
|
|
@ -266,12 +266,12 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (*
|
||||||
|
|
||||||
httpClient = &http.Client{
|
httpClient = &http.Client{
|
||||||
Transport: transport,
|
Transport: transport,
|
||||||
CheckRedirect: c.c.CheckRedirect,
|
CheckRedirect: c.client.CheckRedirect,
|
||||||
Jar: c.c.Jar,
|
Jar: c.client.Jar,
|
||||||
Timeout: c.c.Timeout,
|
Timeout: c.client.Timeout,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
httpClient = c.c
|
httpClient = c.client
|
||||||
}
|
}
|
||||||
|
|
||||||
s := &session{
|
s := &session{
|
||||||
|
|
|
||||||
|
|
@ -14,21 +14,21 @@ type transportOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) addTransport(opts transportOptions, transport *http.Transport) {
|
func (c *client) addTransport(opts transportOptions, transport *http.Transport) {
|
||||||
c.m.Lock()
|
c.mutex.Lock()
|
||||||
c.transports.Add(opts, transport)
|
c.transports.Add(opts, transport)
|
||||||
c.m.Unlock()
|
c.mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) removeTransport(opts transportOptions) {
|
func (c *client) removeTransport(opts transportOptions) {
|
||||||
c.m.Lock()
|
c.mutex.Lock()
|
||||||
c.transports.Remove(opts)
|
c.transports.Remove(opts)
|
||||||
c.m.Unlock()
|
c.mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) fetchTransport(opts transportOptions) (*http.Transport, bool) {
|
func (c *client) fetchTransport(opts transportOptions) (*http.Transport, bool) {
|
||||||
c.m.RLock()
|
c.mutex.RLock()
|
||||||
t, ok := c.transports.Get(opts)
|
t, ok := c.transports.Get(opts)
|
||||||
c.m.RUnlock()
|
c.mutex.RUnlock()
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,9 @@ type runner struct {
|
||||||
func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) {
|
func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) {
|
||||||
c := &command{command: cmd, endpoint: ep, config: r.config}
|
c := &command{command: cmd, endpoint: ep, config: r.config}
|
||||||
if auth != nil {
|
if auth != nil {
|
||||||
c.setAuth(auth)
|
if err := c.setAuth(auth); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.connect(); err != nil {
|
if err := c.connect(); err != nil {
|
||||||
|
|
|
||||||
|
|
@ -470,6 +470,14 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var updatedPrune bool
|
||||||
|
if o.Prune {
|
||||||
|
updatedPrune, err = r.pruneRemotes(o.RefSpecs, localRefs, remoteRefs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, specToRefs, o.Tags, o.Force)
|
updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, specToRefs, o.Tags, o.Force)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -482,7 +490,7 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !updated {
|
if !updated && !updatedPrune {
|
||||||
return remoteRefs, NoErrAlreadyUpToDate
|
return remoteRefs, NoErrAlreadyUpToDate
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -574,6 +582,27 @@ func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.Upl
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (r *Remote) pruneRemotes(specs []config.RefSpec, localRefs []*plumbing.Reference, remoteRefs memory.ReferenceStorage) (bool, error) {
|
||||||
|
var updatedPrune bool
|
||||||
|
for _, spec := range specs {
|
||||||
|
rev := spec.Reverse()
|
||||||
|
for _, ref := range localRefs {
|
||||||
|
if !rev.Match(ref.Name()) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_, err := remoteRefs.Reference(rev.Dst(ref.Name()))
|
||||||
|
if errors.Is(err, plumbing.ErrReferenceNotFound) {
|
||||||
|
updatedPrune = true
|
||||||
|
err := r.s.RemoveReference(ref.Name())
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return updatedPrune, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (r *Remote) addReferencesToUpdate(
|
func (r *Remote) addReferencesToUpdate(
|
||||||
refspecs []config.RefSpec,
|
refspecs []config.RefSpec,
|
||||||
localRefs []*plumbing.Reference,
|
localRefs []*plumbing.Reference,
|
||||||
|
|
@ -1099,7 +1128,7 @@ func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash, earlies
|
||||||
}
|
}
|
||||||
|
|
||||||
found := false
|
found := false
|
||||||
// stop iterating at the earlist shallow commit, ignoring its parents
|
// stop iterating at the earliest shallow commit, ignoring its parents
|
||||||
// note: when pull depth is smaller than the number of new changes on the remote, this fails due to missing parents.
|
// note: when pull depth is smaller than the number of new changes on the remote, this fails due to missing parents.
|
||||||
// as far as i can tell, without the commits in-between the shallow pull and the earliest shallow, there's no
|
// as far as i can tell, without the commits in-between the shallow pull and the earliest shallow, there's no
|
||||||
// real way of telling whether it will be a fast-forward merge.
|
// real way of telling whether it will be a fast-forward merge.
|
||||||
|
|
|
||||||
|
|
@ -51,19 +51,21 @@ var (
|
||||||
// ErrFetching is returned when the packfile could not be downloaded
|
// ErrFetching is returned when the packfile could not be downloaded
|
||||||
ErrFetching = errors.New("unable to fetch packfile")
|
ErrFetching = errors.New("unable to fetch packfile")
|
||||||
|
|
||||||
ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch")
|
ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch")
|
||||||
ErrRepositoryNotExists = errors.New("repository does not exist")
|
ErrRepositoryNotExists = errors.New("repository does not exist")
|
||||||
ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist")
|
ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist")
|
||||||
ErrRepositoryAlreadyExists = errors.New("repository already exists")
|
ErrRepositoryAlreadyExists = errors.New("repository already exists")
|
||||||
ErrRemoteNotFound = errors.New("remote not found")
|
ErrRemoteNotFound = errors.New("remote not found")
|
||||||
ErrRemoteExists = errors.New("remote already exists")
|
ErrRemoteExists = errors.New("remote already exists")
|
||||||
ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'")
|
ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'")
|
||||||
ErrWorktreeNotProvided = errors.New("worktree should be provided")
|
ErrWorktreeNotProvided = errors.New("worktree should be provided")
|
||||||
ErrIsBareRepository = errors.New("worktree not available in a bare repository")
|
ErrIsBareRepository = errors.New("worktree not available in a bare repository")
|
||||||
ErrUnableToResolveCommit = errors.New("unable to resolve commit")
|
ErrUnableToResolveCommit = errors.New("unable to resolve commit")
|
||||||
ErrPackedObjectsNotSupported = errors.New("packed objects not supported")
|
ErrPackedObjectsNotSupported = errors.New("packed objects not supported")
|
||||||
ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support")
|
ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support")
|
||||||
ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme")
|
ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme")
|
||||||
|
ErrUnsupportedMergeStrategy = errors.New("unsupported merge strategy")
|
||||||
|
ErrFastForwardMergeNotPossible = errors.New("not possible to fast-forward merge changes")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Repository represents a git repository
|
// Repository represents a git repository
|
||||||
|
|
@ -1769,8 +1771,43 @@ func (r *Repository) RepackObjects(cfg *RepackConfig) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Merge merges the reference branch into the current branch.
|
||||||
|
//
|
||||||
|
// If the merge is not possible (or supported) returns an error without changing
|
||||||
|
// the HEAD for the current branch. Possible errors include:
|
||||||
|
// - The merge strategy is not supported.
|
||||||
|
// - The specific strategy cannot be used (e.g. using FastForwardMerge when one is not possible).
|
||||||
|
func (r *Repository) Merge(ref plumbing.Reference, opts MergeOptions) error {
|
||||||
|
if opts.Strategy != FastForwardMerge {
|
||||||
|
return ErrUnsupportedMergeStrategy
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore error as not having a shallow list is optional here.
|
||||||
|
shallowList, _ := r.Storer.Shallow()
|
||||||
|
var earliestShallow *plumbing.Hash
|
||||||
|
if len(shallowList) > 0 {
|
||||||
|
earliestShallow = &shallowList[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
head, err := r.Head()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ff, err := isFastForward(r.Storer, head.Hash(), ref.Hash(), earliestShallow)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ff {
|
||||||
|
return ErrFastForwardMergeNotPossible
|
||||||
|
}
|
||||||
|
|
||||||
|
return r.Storer.SetReference(plumbing.NewHashReference(head.Name(), ref.Hash()))
|
||||||
|
}
|
||||||
|
|
||||||
// createNewObjectPack is a helper for RepackObjects taking care
|
// createNewObjectPack is a helper for RepackObjects taking care
|
||||||
// of creating a new pack. It is used so the the PackfileWriter
|
// of creating a new pack. It is used so the PackfileWriter
|
||||||
// deferred close has the right scope.
|
// deferred close has the right scope.
|
||||||
func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, err error) {
|
func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, err error) {
|
||||||
ow := newObjectWalker(r.Storer)
|
ow := newObjectWalker(r.Storer)
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,33 @@
|
||||||
|
package git
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"github.com/go-git/go-git/v5/plumbing"
|
||||||
|
)
|
||||||
|
|
||||||
|
// signableObject is an object which can be signed.
|
||||||
|
type signableObject interface {
|
||||||
|
EncodeWithoutSignature(o plumbing.EncodedObject) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signer is an interface for signing git objects.
|
||||||
|
// message is a reader containing the encoded object to be signed.
|
||||||
|
// Implementors should return the encoded signature and an error if any.
|
||||||
|
// See https://git-scm.com/docs/gitformat-signature for more information.
|
||||||
|
type Signer interface {
|
||||||
|
Sign(message io.Reader) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func signObject(signer Signer, obj signableObject) ([]byte, error) {
|
||||||
|
encoded := &plumbing.MemoryObject{}
|
||||||
|
if err := obj.EncodeWithoutSignature(encoded); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
r, err := encoded.Reader()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return signer.Sign(r)
|
||||||
|
}
|
||||||
|
|
@ -29,6 +29,8 @@ type node struct {
|
||||||
hash []byte
|
hash []byte
|
||||||
children []noder.Noder
|
children []noder.Noder
|
||||||
isDir bool
|
isDir bool
|
||||||
|
mode os.FileMode
|
||||||
|
size int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRootNode returns the root node based on a given billy.Filesystem.
|
// NewRootNode returns the root node based on a given billy.Filesystem.
|
||||||
|
|
@ -48,8 +50,15 @@ func NewRootNode(
|
||||||
// difftree algorithm will detect changes in the contents of files and also in
|
// difftree algorithm will detect changes in the contents of files and also in
|
||||||
// their mode.
|
// their mode.
|
||||||
//
|
//
|
||||||
|
// Please note that the hash is calculated on first invocation of Hash(),
|
||||||
|
// meaning that it will not update when the underlying file changes
|
||||||
|
// between invocations.
|
||||||
|
//
|
||||||
// The hash of a directory is always a 24-bytes slice of zero values
|
// The hash of a directory is always a 24-bytes slice of zero values
|
||||||
func (n *node) Hash() []byte {
|
func (n *node) Hash() []byte {
|
||||||
|
if n.hash == nil {
|
||||||
|
n.calculateHash()
|
||||||
|
}
|
||||||
return n.hash
|
return n.hash
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -121,81 +130,74 @@ func (n *node) calculateChildren() error {
|
||||||
func (n *node) newChildNode(file os.FileInfo) (*node, error) {
|
func (n *node) newChildNode(file os.FileInfo) (*node, error) {
|
||||||
path := path.Join(n.path, file.Name())
|
path := path.Join(n.path, file.Name())
|
||||||
|
|
||||||
hash, err := n.calculateHash(path, file)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
node := &node{
|
node := &node{
|
||||||
fs: n.fs,
|
fs: n.fs,
|
||||||
submodules: n.submodules,
|
submodules: n.submodules,
|
||||||
|
|
||||||
path: path,
|
path: path,
|
||||||
hash: hash,
|
|
||||||
isDir: file.IsDir(),
|
isDir: file.IsDir(),
|
||||||
|
size: file.Size(),
|
||||||
|
mode: file.Mode(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if hash, isSubmodule := n.submodules[path]; isSubmodule {
|
if _, isSubmodule := n.submodules[path]; isSubmodule {
|
||||||
node.hash = append(hash[:], filemode.Submodule.Bytes()...)
|
|
||||||
node.isDir = false
|
node.isDir = false
|
||||||
}
|
}
|
||||||
|
|
||||||
return node, nil
|
return node, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *node) calculateHash(path string, file os.FileInfo) ([]byte, error) {
|
func (n *node) calculateHash() {
|
||||||
if file.IsDir() {
|
if n.isDir {
|
||||||
return make([]byte, 24), nil
|
n.hash = make([]byte, 24)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
mode, err := filemode.NewFromOSFileMode(n.mode)
|
||||||
|
if err != nil {
|
||||||
|
n.hash = plumbing.ZeroHash[:]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if submoduleHash, isSubmodule := n.submodules[n.path]; isSubmodule {
|
||||||
|
n.hash = append(submoduleHash[:], filemode.Submodule.Bytes()...)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var hash plumbing.Hash
|
var hash plumbing.Hash
|
||||||
var err error
|
if n.mode&os.ModeSymlink != 0 {
|
||||||
if file.Mode()&os.ModeSymlink != 0 {
|
hash = n.doCalculateHashForSymlink()
|
||||||
hash, err = n.doCalculateHashForSymlink(path, file)
|
|
||||||
} else {
|
} else {
|
||||||
hash, err = n.doCalculateHashForRegular(path, file)
|
hash = n.doCalculateHashForRegular()
|
||||||
}
|
}
|
||||||
|
n.hash = append(hash[:], mode.Bytes()...)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
mode, err := filemode.NewFromOSFileMode(file.Mode())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return append(hash[:], mode.Bytes()...), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *node) doCalculateHashForRegular(path string, file os.FileInfo) (plumbing.Hash, error) {
|
func (n *node) doCalculateHashForRegular() plumbing.Hash {
|
||||||
f, err := n.fs.Open(path)
|
f, err := n.fs.Open(n.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return plumbing.ZeroHash, err
|
return plumbing.ZeroHash
|
||||||
}
|
}
|
||||||
|
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
h := plumbing.NewHasher(plumbing.BlobObject, file.Size())
|
h := plumbing.NewHasher(plumbing.BlobObject, n.size)
|
||||||
if _, err := io.Copy(h, f); err != nil {
|
if _, err := io.Copy(h, f); err != nil {
|
||||||
return plumbing.ZeroHash, err
|
return plumbing.ZeroHash
|
||||||
}
|
}
|
||||||
|
|
||||||
return h.Sum(), nil
|
return h.Sum()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *node) doCalculateHashForSymlink(path string, file os.FileInfo) (plumbing.Hash, error) {
|
func (n *node) doCalculateHashForSymlink() plumbing.Hash {
|
||||||
target, err := n.fs.Readlink(path)
|
target, err := n.fs.Readlink(n.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return plumbing.ZeroHash, err
|
return plumbing.ZeroHash
|
||||||
}
|
}
|
||||||
|
|
||||||
h := plumbing.NewHasher(plumbing.BlobObject, file.Size())
|
h := plumbing.NewHasher(plumbing.BlobObject, n.size)
|
||||||
if _, err := h.Write([]byte(target)); err != nil {
|
if _, err := h.Write([]byte(target)); err != nil {
|
||||||
return plumbing.ZeroHash, err
|
return plumbing.ZeroHash
|
||||||
}
|
}
|
||||||
|
|
||||||
return h.Sum(), nil
|
return h.Sum()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *node) String() string {
|
func (n *node) String() string {
|
||||||
|
|
|
||||||
|
|
@ -227,20 +227,17 @@ func (w *Worktree) createBranch(opts *CheckoutOptions) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) {
|
func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) {
|
||||||
if !opts.Hash.IsZero() {
|
hash := opts.Hash
|
||||||
return opts.Hash, nil
|
if hash.IsZero() {
|
||||||
|
b, err := w.r.Reference(opts.Branch, true)
|
||||||
|
if err != nil {
|
||||||
|
return plumbing.ZeroHash, err
|
||||||
|
}
|
||||||
|
|
||||||
|
hash = b.Hash()
|
||||||
}
|
}
|
||||||
|
|
||||||
b, err := w.r.Reference(opts.Branch, true)
|
o, err := w.r.Object(plumbing.AnyObject, hash)
|
||||||
if err != nil {
|
|
||||||
return plumbing.ZeroHash, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !b.Name().IsTag() {
|
|
||||||
return b.Hash(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
o, err := w.r.Object(plumbing.AnyObject, b.Hash())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return plumbing.ZeroHash, err
|
return plumbing.ZeroHash, err
|
||||||
}
|
}
|
||||||
|
|
@ -248,7 +245,7 @@ func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing
|
||||||
switch o := o.(type) {
|
switch o := o.(type) {
|
||||||
case *object.Tag:
|
case *object.Tag:
|
||||||
if o.TargetType != plumbing.CommitObject {
|
if o.TargetType != plumbing.CommitObject {
|
||||||
return plumbing.ZeroHash, fmt.Errorf("unsupported tag object target %q", o.TargetType)
|
return plumbing.ZeroHash, fmt.Errorf("%w: tag target %q", object.ErrUnsupportedObject, o.TargetType)
|
||||||
}
|
}
|
||||||
|
|
||||||
return o.Target, nil
|
return o.Target, nil
|
||||||
|
|
@ -256,7 +253,7 @@ func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing
|
||||||
return o.Hash, nil
|
return o.Hash, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return plumbing.ZeroHash, fmt.Errorf("unsupported tag target %q", o.Type())
|
return plumbing.ZeroHash, fmt.Errorf("%w: %q", object.ErrUnsupportedObject, o.Type())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error {
|
func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error {
|
||||||
|
|
@ -431,6 +428,10 @@ var worktreeDeny = map[string]struct{}{
|
||||||
func validPath(paths ...string) error {
|
func validPath(paths ...string) error {
|
||||||
for _, p := range paths {
|
for _, p := range paths {
|
||||||
parts := strings.FieldsFunc(p, func(r rune) bool { return (r == '\\' || r == '/') })
|
parts := strings.FieldsFunc(p, func(r rune) bool { return (r == '\\' || r == '/') })
|
||||||
|
if len(parts) == 0 {
|
||||||
|
return fmt.Errorf("invalid path: %q", p)
|
||||||
|
}
|
||||||
|
|
||||||
if _, denied := worktreeDeny[strings.ToLower(parts[0])]; denied {
|
if _, denied := worktreeDeny[strings.ToLower(parts[0])]; denied {
|
||||||
return fmt.Errorf("invalid path prefix: %q", p)
|
return fmt.Errorf("invalid path prefix: %q", p)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ package git
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -14,6 +15,7 @@ import (
|
||||||
"github.com/go-git/go-git/v5/storage"
|
"github.com/go-git/go-git/v5/storage"
|
||||||
|
|
||||||
"github.com/ProtonMail/go-crypto/openpgp"
|
"github.com/ProtonMail/go-crypto/openpgp"
|
||||||
|
"github.com/ProtonMail/go-crypto/openpgp/packet"
|
||||||
"github.com/go-git/go-billy/v5"
|
"github.com/go-git/go-billy/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -43,29 +45,30 @@ func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return plumbing.ZeroHash, err
|
return plumbing.ZeroHash, err
|
||||||
}
|
}
|
||||||
|
headCommit, err := w.r.CommitObject(head.Hash())
|
||||||
t, err := w.r.getTreeFromCommitHash(head.Hash())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return plumbing.ZeroHash, err
|
return plumbing.ZeroHash, err
|
||||||
}
|
}
|
||||||
|
|
||||||
treeHash = t.Hash
|
opts.Parents = nil
|
||||||
opts.Parents = []plumbing.Hash{head.Hash()}
|
if len(headCommit.ParentHashes) != 0 {
|
||||||
} else {
|
opts.Parents = []plumbing.Hash{headCommit.ParentHashes[0]}
|
||||||
idx, err := w.r.Storer.Index()
|
|
||||||
if err != nil {
|
|
||||||
return plumbing.ZeroHash, err
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
h := &buildTreeHelper{
|
idx, err := w.r.Storer.Index()
|
||||||
fs: w.Filesystem,
|
if err != nil {
|
||||||
s: w.r.Storer,
|
return plumbing.ZeroHash, err
|
||||||
}
|
}
|
||||||
|
|
||||||
treeHash, err = h.BuildTree(idx, opts)
|
h := &buildTreeHelper{
|
||||||
if err != nil {
|
fs: w.Filesystem,
|
||||||
return plumbing.ZeroHash, err
|
s: w.r.Storer,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
treeHash, err = h.BuildTree(idx, opts)
|
||||||
|
if err != nil {
|
||||||
|
return plumbing.ZeroHash, err
|
||||||
}
|
}
|
||||||
|
|
||||||
commit, err := w.buildCommitObject(msg, opts, treeHash)
|
commit, err := w.buildCommitObject(msg, opts, treeHash)
|
||||||
|
|
@ -125,12 +128,17 @@ func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumb
|
||||||
ParentHashes: opts.Parents,
|
ParentHashes: opts.Parents,
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.SignKey != nil {
|
// Convert SignKey into a Signer if set. Existing Signer should take priority.
|
||||||
sig, err := w.buildCommitSignature(commit, opts.SignKey)
|
signer := opts.Signer
|
||||||
|
if signer == nil && opts.SignKey != nil {
|
||||||
|
signer = &gpgSigner{key: opts.SignKey}
|
||||||
|
}
|
||||||
|
if signer != nil {
|
||||||
|
sig, err := signObject(signer, commit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return plumbing.ZeroHash, err
|
return plumbing.ZeroHash, err
|
||||||
}
|
}
|
||||||
commit.PGPSignature = sig
|
commit.PGPSignature = string(sig)
|
||||||
}
|
}
|
||||||
|
|
||||||
obj := w.r.Storer.NewEncodedObject()
|
obj := w.r.Storer.NewEncodedObject()
|
||||||
|
|
@ -140,20 +148,17 @@ func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumb
|
||||||
return w.r.Storer.SetEncodedObject(obj)
|
return w.r.Storer.SetEncodedObject(obj)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Worktree) buildCommitSignature(commit *object.Commit, signKey *openpgp.Entity) (string, error) {
|
type gpgSigner struct {
|
||||||
encoded := &plumbing.MemoryObject{}
|
key *openpgp.Entity
|
||||||
if err := commit.Encode(encoded); err != nil {
|
cfg *packet.Config
|
||||||
return "", err
|
}
|
||||||
}
|
|
||||||
r, err := encoded.Reader()
|
func (s *gpgSigner) Sign(message io.Reader) ([]byte, error) {
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
if err := openpgp.ArmoredDetachSign(&b, signKey, r, nil); err != nil {
|
if err := openpgp.ArmoredDetachSign(&b, s.key, message, s.cfg); err != nil {
|
||||||
return "", err
|
return nil, err
|
||||||
}
|
}
|
||||||
return b.String(), nil
|
return b.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildTreeHelper converts a given index.Index file into multiple git objects
|
// buildTreeHelper converts a given index.Index file into multiple git objects
|
||||||
|
|
@ -263,4 +268,4 @@ func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tr
|
||||||
return hash, nil
|
return hash, nil
|
||||||
}
|
}
|
||||||
return h.s.SetEncodedObject(o)
|
return h.s.SetEncodedObject(o)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -271,7 +271,7 @@ func diffTreeIsEquals(a, b noder.Hasher) bool {
|
||||||
// no error is returned. When path is a file, the blob.Hash is returned.
|
// no error is returned. When path is a file, the blob.Hash is returned.
|
||||||
func (w *Worktree) Add(path string) (plumbing.Hash, error) {
|
func (w *Worktree) Add(path string) (plumbing.Hash, error) {
|
||||||
// TODO(mcuadros): deprecate in favor of AddWithOption in v6.
|
// TODO(mcuadros): deprecate in favor of AddWithOption in v6.
|
||||||
return w.doAdd(path, make([]gitignore.Pattern, 0))
|
return w.doAdd(path, make([]gitignore.Pattern, 0), false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) {
|
func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) {
|
||||||
|
|
@ -321,7 +321,7 @@ func (w *Worktree) AddWithOptions(opts *AddOptions) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.All {
|
if opts.All {
|
||||||
_, err := w.doAdd(".", w.Excludes)
|
_, err := w.doAdd(".", w.Excludes, false)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -329,16 +329,11 @@ func (w *Worktree) AddWithOptions(opts *AddOptions) error {
|
||||||
return w.AddGlob(opts.Glob)
|
return w.AddGlob(opts.Glob)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := w.Add(opts.Path)
|
_, err := w.doAdd(opts.Path, make([]gitignore.Pattern, 0), opts.SkipStatus)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbing.Hash, error) {
|
func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern, skipStatus bool) (plumbing.Hash, error) {
|
||||||
s, err := w.Status()
|
|
||||||
if err != nil {
|
|
||||||
return plumbing.ZeroHash, err
|
|
||||||
}
|
|
||||||
|
|
||||||
idx, err := w.r.Storer.Index()
|
idx, err := w.r.Storer.Index()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return plumbing.ZeroHash, err
|
return plumbing.ZeroHash, err
|
||||||
|
|
@ -348,6 +343,17 @@ func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbi
|
||||||
var added bool
|
var added bool
|
||||||
|
|
||||||
fi, err := w.Filesystem.Lstat(path)
|
fi, err := w.Filesystem.Lstat(path)
|
||||||
|
|
||||||
|
// status is required for doAddDirectory
|
||||||
|
var s Status
|
||||||
|
var err2 error
|
||||||
|
if !skipStatus || fi == nil || fi.IsDir() {
|
||||||
|
s, err2 = w.Status()
|
||||||
|
if err2 != nil {
|
||||||
|
return plumbing.ZeroHash, err2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil || !fi.IsDir() {
|
if err != nil || !fi.IsDir() {
|
||||||
added, h, err = w.doAddFile(idx, s, path, ignorePattern)
|
added, h, err = w.doAddFile(idx, s, path, ignorePattern)
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -421,8 +427,9 @@ func (w *Worktree) AddGlob(pattern string) error {
|
||||||
|
|
||||||
// doAddFile create a new blob from path and update the index, added is true if
|
// doAddFile create a new blob from path and update the index, added is true if
|
||||||
// the file added is different from the index.
|
// the file added is different from the index.
|
||||||
|
// if s status is nil will skip the status check and update the index anyway
|
||||||
func (w *Worktree) doAddFile(idx *index.Index, s Status, path string, ignorePattern []gitignore.Pattern) (added bool, h plumbing.Hash, err error) {
|
func (w *Worktree) doAddFile(idx *index.Index, s Status, path string, ignorePattern []gitignore.Pattern) (added bool, h plumbing.Hash, err error) {
|
||||||
if s.File(path).Worktree == Unmodified {
|
if s != nil && s.File(path).Worktree == Unmodified {
|
||||||
return false, h, nil
|
return false, h, nil
|
||||||
}
|
}
|
||||||
if len(ignorePattern) > 0 {
|
if len(ignorePattern) > 0 {
|
||||||
|
|
|
||||||
|
|
@ -34,8 +34,6 @@ const (
|
||||||
DiffInsert Operation = 1
|
DiffInsert Operation = 1
|
||||||
// DiffEqual item represents an equal diff.
|
// DiffEqual item represents an equal diff.
|
||||||
DiffEqual Operation = 0
|
DiffEqual Operation = 0
|
||||||
//IndexSeparator is used to seperate the array indexes in an index string
|
|
||||||
IndexSeparator = ","
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Diff represents one diff operation
|
// Diff represents one diff operation
|
||||||
|
|
@ -406,14 +404,11 @@ func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune
|
||||||
func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff {
|
func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff {
|
||||||
hydrated := make([]Diff, 0, len(diffs))
|
hydrated := make([]Diff, 0, len(diffs))
|
||||||
for _, aDiff := range diffs {
|
for _, aDiff := range diffs {
|
||||||
chars := strings.Split(aDiff.Text, IndexSeparator)
|
runes := []rune(aDiff.Text)
|
||||||
text := make([]string, len(chars))
|
text := make([]string, len(runes))
|
||||||
|
|
||||||
for i, r := range chars {
|
for i, r := range runes {
|
||||||
i1, err := strconv.Atoi(r)
|
text[i] = lineArray[runeToInt(r)]
|
||||||
if err == nil {
|
|
||||||
text[i] = lineArray[i1]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
aDiff.Text = strings.Join(text, "")
|
aDiff.Text = strings.Join(text, "")
|
||||||
|
|
|
||||||
|
|
@ -9,11 +9,16 @@
|
||||||
package diffmatchpatch
|
package diffmatchpatch
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strconv"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const UNICODE_INVALID_RANGE_START = 0xD800
|
||||||
|
const UNICODE_INVALID_RANGE_END = 0xDFFF
|
||||||
|
const UNICODE_INVALID_RANGE_DELTA = UNICODE_INVALID_RANGE_END - UNICODE_INVALID_RANGE_START + 1
|
||||||
|
const UNICODE_RANGE_MAX = 0x10FFFF
|
||||||
|
|
||||||
// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI.
|
// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI.
|
||||||
// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc.
|
// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc.
|
||||||
var unescaper = strings.NewReplacer(
|
var unescaper = strings.NewReplacer(
|
||||||
|
|
@ -93,14 +98,93 @@ func intArrayToString(ns []uint32) string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
indexSeparator := IndexSeparator[0]
|
b := []rune{}
|
||||||
|
|
||||||
// Appr. 3 chars per num plus the comma.
|
|
||||||
b := []byte{}
|
|
||||||
for _, n := range ns {
|
for _, n := range ns {
|
||||||
b = strconv.AppendInt(b, int64(n), 10)
|
b = append(b, intToRune(n))
|
||||||
b = append(b, indexSeparator)
|
|
||||||
}
|
}
|
||||||
b = b[:len(b)-1]
|
|
||||||
return string(b)
|
return string(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// These constants define the number of bits representable
|
||||||
|
// in 1,2,3,4 byte utf8 sequences, respectively.
|
||||||
|
const ONE_BYTE_BITS = 7
|
||||||
|
const TWO_BYTE_BITS = 11
|
||||||
|
const THREE_BYTE_BITS = 16
|
||||||
|
const FOUR_BYTE_BITS = 21
|
||||||
|
|
||||||
|
// Helper for getting a sequence of bits from an integer.
|
||||||
|
func getBits(i uint32, cnt byte, from byte) byte {
|
||||||
|
return byte((i >> from) & ((1 << cnt) - 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Converts an integer in the range 0~1112060 into a rune.
|
||||||
|
// Based on the ranges table in https://en.wikipedia.org/wiki/UTF-8
|
||||||
|
func intToRune(i uint32) rune {
|
||||||
|
if i < (1 << ONE_BYTE_BITS) {
|
||||||
|
return rune(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
if i < (1 << TWO_BYTE_BITS) {
|
||||||
|
r, size := utf8.DecodeRune([]byte{0b11000000 | getBits(i, 5, 6), 0b10000000 | getBits(i, 6, 0)})
|
||||||
|
if size != 2 || r == utf8.RuneError {
|
||||||
|
panic(fmt.Sprintf("Error encoding an int %d with size 2, got rune %v and size %d", size, r, i))
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// Last -3 here needed because for some reason 3rd to last codepoint 65533 in this range
|
||||||
|
// was returning utf8.RuneError during encoding.
|
||||||
|
if i < ((1 << THREE_BYTE_BITS) - UNICODE_INVALID_RANGE_DELTA - 3) {
|
||||||
|
if i >= UNICODE_INVALID_RANGE_START {
|
||||||
|
i += UNICODE_INVALID_RANGE_DELTA
|
||||||
|
}
|
||||||
|
|
||||||
|
r, size := utf8.DecodeRune([]byte{0b11100000 | getBits(i, 4, 12), 0b10000000 | getBits(i, 6, 6), 0b10000000 | getBits(i, 6, 0)})
|
||||||
|
if size != 3 || r == utf8.RuneError {
|
||||||
|
panic(fmt.Sprintf("Error encoding an int %d with size 3, got rune %v and size %d", size, r, i))
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
if i < (1<<FOUR_BYTE_BITS - UNICODE_INVALID_RANGE_DELTA - 3) {
|
||||||
|
i += UNICODE_INVALID_RANGE_DELTA + 3
|
||||||
|
r, size := utf8.DecodeRune([]byte{0b11110000 | getBits(i, 3, 18), 0b10000000 | getBits(i, 6, 12), 0b10000000 | getBits(i, 6, 6), 0b10000000 | getBits(i, 6, 0)})
|
||||||
|
if size != 4 || r == utf8.RuneError {
|
||||||
|
panic(fmt.Sprintf("Error encoding an int %d with size 4, got rune %v and size %d", size, r, i))
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("The integer %d is too large for runeToInt()", i))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Converts a rune generated by intToRune back to an integer
|
||||||
|
func runeToInt(r rune) uint32 {
|
||||||
|
i := uint32(r)
|
||||||
|
if i < (1 << ONE_BYTE_BITS) {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes := []byte{0, 0, 0, 0}
|
||||||
|
|
||||||
|
size := utf8.EncodeRune(bytes, r)
|
||||||
|
|
||||||
|
if size == 2 {
|
||||||
|
return uint32(bytes[0]&0b11111)<<6 | uint32(bytes[1]&0b111111)
|
||||||
|
}
|
||||||
|
|
||||||
|
if size == 3 {
|
||||||
|
result := uint32(bytes[0]&0b1111)<<12 | uint32(bytes[1]&0b111111)<<6 | uint32(bytes[2]&0b111111)
|
||||||
|
if result >= UNICODE_INVALID_RANGE_END {
|
||||||
|
return result - UNICODE_INVALID_RANGE_DELTA
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
if size == 4 {
|
||||||
|
result := uint32(bytes[0]&0b111)<<18 | uint32(bytes[1]&0b111111)<<12 | uint32(bytes[2]&0b111111)<<6 | uint32(bytes[3]&0b111111)
|
||||||
|
return result - UNICODE_INVALID_RANGE_DELTA - 3
|
||||||
|
}
|
||||||
|
|
||||||
|
panic(fmt.Sprintf("Unexpected state decoding rune=%v size=%d", r, size))
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
Copyright 2023 Skeema LLC and the Skeema Knownhosts authors
|
Copyright 2024 Skeema LLC and the Skeema Knownhosts authors
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|
|
||||||
|
|
@ -100,7 +100,7 @@ config := &ssh.ClientConfig{
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
**Source code copyright 2023 Skeema LLC and the Skeema Knownhosts authors**
|
**Source code copyright 2024 Skeema LLC and the Skeema Knownhosts authors**
|
||||||
|
|
||||||
```text
|
```text
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
||||||
|
|
@ -76,13 +76,23 @@ func (hkcb HostKeyCallback) HostKeyAlgorithms(hostWithPort string) (algos []stri
|
||||||
// example by https://github.com/golang/crypto/pull/254.
|
// example by https://github.com/golang/crypto/pull/254.
|
||||||
hostKeys := hkcb.HostKeys(hostWithPort)
|
hostKeys := hkcb.HostKeys(hostWithPort)
|
||||||
seen := make(map[string]struct{}, len(hostKeys))
|
seen := make(map[string]struct{}, len(hostKeys))
|
||||||
for _, key := range hostKeys {
|
addAlgo := func(typ string) {
|
||||||
typ := key.Type()
|
|
||||||
if _, already := seen[typ]; !already {
|
if _, already := seen[typ]; !already {
|
||||||
algos = append(algos, typ)
|
algos = append(algos, typ)
|
||||||
seen[typ] = struct{}{}
|
seen[typ] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for _, key := range hostKeys {
|
||||||
|
typ := key.Type()
|
||||||
|
if typ == ssh.KeyAlgoRSA {
|
||||||
|
// KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms,
|
||||||
|
// not public key formats, so they can't appear as a PublicKey.Type.
|
||||||
|
// The corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2.
|
||||||
|
addAlgo(ssh.KeyAlgoRSASHA512)
|
||||||
|
addAlgo(ssh.KeyAlgoRSASHA256)
|
||||||
|
}
|
||||||
|
addAlgo(typ)
|
||||||
|
}
|
||||||
return algos
|
return algos
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -636,7 +636,7 @@ github.com/go-git/go-billy/v5/helper/polyfill
|
||||||
github.com/go-git/go-billy/v5/memfs
|
github.com/go-git/go-billy/v5/memfs
|
||||||
github.com/go-git/go-billy/v5/osfs
|
github.com/go-git/go-billy/v5/osfs
|
||||||
github.com/go-git/go-billy/v5/util
|
github.com/go-git/go-billy/v5/util
|
||||||
# github.com/go-git/go-git/v5 v5.11.0
|
# github.com/go-git/go-git/v5 v5.12.0
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/go-git/go-git/v5
|
github.com/go-git/go-git/v5
|
||||||
github.com/go-git/go-git/v5/config
|
github.com/go-git/go-git/v5/config
|
||||||
|
|
@ -972,13 +972,13 @@ github.com/sagikazarmark/locafero
|
||||||
# github.com/sagikazarmark/slog-shim v0.1.0
|
# github.com/sagikazarmark/slog-shim v0.1.0
|
||||||
## explicit; go 1.20
|
## explicit; go 1.20
|
||||||
github.com/sagikazarmark/slog-shim
|
github.com/sagikazarmark/slog-shim
|
||||||
# github.com/sergi/go-diff v1.3.1
|
# github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3
|
||||||
## explicit; go 1.12
|
## explicit; go 1.13
|
||||||
github.com/sergi/go-diff/diffmatchpatch
|
github.com/sergi/go-diff/diffmatchpatch
|
||||||
# github.com/sirupsen/logrus v1.9.3
|
# github.com/sirupsen/logrus v1.9.3
|
||||||
## explicit; go 1.13
|
## explicit; go 1.13
|
||||||
github.com/sirupsen/logrus
|
github.com/sirupsen/logrus
|
||||||
# github.com/skeema/knownhosts v1.2.1
|
# github.com/skeema/knownhosts v1.2.2
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/skeema/knownhosts
|
github.com/skeema/knownhosts
|
||||||
# github.com/sourcegraph/conc v0.3.0
|
# github.com/sourcegraph/conc v0.3.0
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue