feat(dir2config): defaults

This commit is contained in:
Mikaël Cluseau
2019-02-28 19:27:09 +11:00
parent d2b212ae6b
commit ea6fce68e1
383 changed files with 74236 additions and 41 deletions

144
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/blob.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
package object
import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
// Blob is used to store arbitrary data - it is generally a file.
type Blob struct {
// Hash of the blob.
Hash plumbing.Hash
// Size of the (uncompressed) blob.
Size int64
obj plumbing.EncodedObject
}
// GetBlob gets a blob from an object storer and decodes it.
func GetBlob(s storer.EncodedObjectStorer, h plumbing.Hash) (*Blob, error) {
o, err := s.EncodedObject(plumbing.BlobObject, h)
if err != nil {
return nil, err
}
return DecodeBlob(o)
}
// DecodeObject decodes an encoded object into a *Blob.
func DecodeBlob(o plumbing.EncodedObject) (*Blob, error) {
b := &Blob{}
if err := b.Decode(o); err != nil {
return nil, err
}
return b, nil
}
// ID returns the object ID of the blob. The returned value will always match
// the current value of Blob.Hash.
//
// ID is present to fulfill the Object interface.
func (b *Blob) ID() plumbing.Hash {
return b.Hash
}
// Type returns the type of object. It always returns plumbing.BlobObject.
//
// Type is present to fulfill the Object interface.
func (b *Blob) Type() plumbing.ObjectType {
return plumbing.BlobObject
}
// Decode transforms a plumbing.EncodedObject into a Blob struct.
func (b *Blob) Decode(o plumbing.EncodedObject) error {
if o.Type() != plumbing.BlobObject {
return ErrUnsupportedObject
}
b.Hash = o.Hash()
b.Size = o.Size()
b.obj = o
return nil
}
// Encode transforms a Blob into a plumbing.EncodedObject.
func (b *Blob) Encode(o plumbing.EncodedObject) (err error) {
o.SetType(plumbing.BlobObject)
w, err := o.Writer()
if err != nil {
return err
}
defer ioutil.CheckClose(w, &err)
r, err := b.Reader()
if err != nil {
return err
}
defer ioutil.CheckClose(r, &err)
_, err = io.Copy(w, r)
return err
}
// Reader returns a reader allow the access to the content of the blob
func (b *Blob) Reader() (io.ReadCloser, error) {
return b.obj.Reader()
}
// BlobIter provides an iterator for a set of blobs.
type BlobIter struct {
storer.EncodedObjectIter
s storer.EncodedObjectStorer
}
// NewBlobIter takes a storer.EncodedObjectStorer and a
// storer.EncodedObjectIter and returns a *BlobIter that iterates over all
// blobs contained in the storer.EncodedObjectIter.
//
// Any non-blob object returned by the storer.EncodedObjectIter is skipped.
func NewBlobIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *BlobIter {
return &BlobIter{iter, s}
}
// Next moves the iterator to the next blob and returns a pointer to it. If
// there are no more blobs, it returns io.EOF.
func (iter *BlobIter) Next() (*Blob, error) {
for {
obj, err := iter.EncodedObjectIter.Next()
if err != nil {
return nil, err
}
if obj.Type() != plumbing.BlobObject {
continue
}
return DecodeBlob(obj)
}
}
// ForEach call the cb function for each blob contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *BlobIter) ForEach(cb func(*Blob) error) error {
return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
if obj.Type() != plumbing.BlobObject {
return nil
}
b, err := DecodeBlob(obj)
if err != nil {
return err
}
return cb(b)
})
}

View File

@ -0,0 +1,157 @@
package object
import (
"bytes"
"context"
"fmt"
"strings"
"gopkg.in/src-d/go-git.v4/utils/merkletrie"
)
// Change values represent a detected change between two git trees. For
// modifications, From is the original status of the node and To is its
// final status. For insertions, From is the zero value and for
// deletions To is the zero value.
type Change struct {
From ChangeEntry
To ChangeEntry
}
var empty = ChangeEntry{}
// Action returns the kind of action represented by the change, an
// insertion, a deletion or a modification.
func (c *Change) Action() (merkletrie.Action, error) {
if c.From == empty && c.To == empty {
return merkletrie.Action(0),
fmt.Errorf("malformed change: empty from and to")
}
if c.From == empty {
return merkletrie.Insert, nil
}
if c.To == empty {
return merkletrie.Delete, nil
}
return merkletrie.Modify, nil
}
// Files return the files before and after a change.
// For insertions from will be nil. For deletions to will be nil.
func (c *Change) Files() (from, to *File, err error) {
action, err := c.Action()
if err != nil {
return
}
if action == merkletrie.Insert || action == merkletrie.Modify {
to, err = c.To.Tree.TreeEntryFile(&c.To.TreeEntry)
if !c.To.TreeEntry.Mode.IsFile() {
return nil, nil, nil
}
if err != nil {
return
}
}
if action == merkletrie.Delete || action == merkletrie.Modify {
from, err = c.From.Tree.TreeEntryFile(&c.From.TreeEntry)
if !c.From.TreeEntry.Mode.IsFile() {
return nil, nil, nil
}
if err != nil {
return
}
}
return
}
func (c *Change) String() string {
action, err := c.Action()
if err != nil {
return fmt.Sprintf("malformed change")
}
return fmt.Sprintf("<Action: %s, Path: %s>", action, c.name())
}
// Patch returns a Patch with all the file changes in chunks. This
// representation can be used to create several diff outputs.
func (c *Change) Patch() (*Patch, error) {
return c.PatchContext(context.Background())
}
// Patch returns a Patch with all the file changes in chunks. This
// representation can be used to create several diff outputs.
// If context expires, an non-nil error will be returned
// Provided context must be non-nil
func (c *Change) PatchContext(ctx context.Context) (*Patch, error) {
return getPatchContext(ctx, "", c)
}
func (c *Change) name() string {
if c.From != empty {
return c.From.Name
}
return c.To.Name
}
// ChangeEntry values represent a node that has suffered a change.
type ChangeEntry struct {
// Full path of the node using "/" as separator.
Name string
// Parent tree of the node that has changed.
Tree *Tree
// The entry of the node.
TreeEntry TreeEntry
}
// Changes represents a collection of changes between two git trees.
// Implements sort.Interface lexicographically over the path of the
// changed files.
type Changes []*Change
func (c Changes) Len() int {
return len(c)
}
func (c Changes) Swap(i, j int) {
c[i], c[j] = c[j], c[i]
}
func (c Changes) Less(i, j int) bool {
return strings.Compare(c[i].name(), c[j].name()) < 0
}
func (c Changes) String() string {
var buffer bytes.Buffer
buffer.WriteString("[")
comma := ""
for _, v := range c {
buffer.WriteString(comma)
buffer.WriteString(v.String())
comma = ", "
}
buffer.WriteString("]")
return buffer.String()
}
// Patch returns a Patch with all the changes in chunks. This
// representation can be used to create several diff outputs.
func (c Changes) Patch() (*Patch, error) {
return c.PatchContext(context.Background())
}
// Patch returns a Patch with all the changes in chunks. This
// representation can be used to create several diff outputs.
// If context expires, an non-nil error will be returned
// Provided context must be non-nil
func (c Changes) PatchContext(ctx context.Context) (*Patch, error) {
return getPatchContext(ctx, "", c...)
}

View File

@ -0,0 +1,61 @@
package object
import (
"errors"
"fmt"
"gopkg.in/src-d/go-git.v4/utils/merkletrie"
"gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
)
// The following functions transform changes types form the merkletrie
// package to changes types from this package.
func newChange(c merkletrie.Change) (*Change, error) {
ret := &Change{}
var err error
if ret.From, err = newChangeEntry(c.From); err != nil {
return nil, fmt.Errorf("From field: %s", err)
}
if ret.To, err = newChangeEntry(c.To); err != nil {
return nil, fmt.Errorf("To field: %s", err)
}
return ret, nil
}
func newChangeEntry(p noder.Path) (ChangeEntry, error) {
if p == nil {
return empty, nil
}
asTreeNoder, ok := p.Last().(*treeNoder)
if !ok {
return ChangeEntry{}, errors.New("cannot transform non-TreeNoders")
}
return ChangeEntry{
Name: p.String(),
Tree: asTreeNoder.parent,
TreeEntry: TreeEntry{
Name: asTreeNoder.name,
Mode: asTreeNoder.mode,
Hash: asTreeNoder.hash,
},
}, nil
}
func newChanges(src merkletrie.Changes) (Changes, error) {
ret := make(Changes, len(src))
var err error
for i, e := range src {
ret[i], err = newChange(e)
if err != nil {
return nil, fmt.Errorf("change #%d: %s", i, err)
}
}
return ret, nil
}

View File

@ -0,0 +1,415 @@
package object
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"strings"
"golang.org/x/crypto/openpgp"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
const (
beginpgp string = "-----BEGIN PGP SIGNATURE-----"
endpgp string = "-----END PGP SIGNATURE-----"
headerpgp string = "gpgsig"
)
// Hash represents the hash of an object
type Hash plumbing.Hash
// Commit points to a single tree, marking it as what the project looked like
// at a certain point in time. It contains meta-information about that point
// in time, such as a timestamp, the author of the changes since the last
// commit, a pointer to the previous commit(s), etc.
// http://shafiulazam.com/gitbook/1_the_git_object_model.html
type Commit struct {
// Hash of the commit object.
Hash plumbing.Hash
// Author is the original author of the commit.
Author Signature
// Committer is the one performing the commit, might be different from
// Author.
Committer Signature
// PGPSignature is the PGP signature of the commit.
PGPSignature string
// Message is the commit message, contains arbitrary text.
Message string
// TreeHash is the hash of the root tree of the commit.
TreeHash plumbing.Hash
// ParentHashes are the hashes of the parent commits of the commit.
ParentHashes []plumbing.Hash
s storer.EncodedObjectStorer
}
// GetCommit gets a commit from an object storer and decodes it.
func GetCommit(s storer.EncodedObjectStorer, h plumbing.Hash) (*Commit, error) {
o, err := s.EncodedObject(plumbing.CommitObject, h)
if err != nil {
return nil, err
}
return DecodeCommit(s, o)
}
// DecodeCommit decodes an encoded object into a *Commit and associates it to
// the given object storer.
func DecodeCommit(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Commit, error) {
c := &Commit{s: s}
if err := c.Decode(o); err != nil {
return nil, err
}
return c, nil
}
// Tree returns the Tree from the commit.
func (c *Commit) Tree() (*Tree, error) {
return GetTree(c.s, c.TreeHash)
}
// Patch returns the Patch between the actual commit and the provided one.
// Error will be return if context expires. Provided context must be non-nil
func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) {
fromTree, err := c.Tree()
if err != nil {
return nil, err
}
toTree, err := to.Tree()
if err != nil {
return nil, err
}
return fromTree.PatchContext(ctx, toTree)
}
// Patch returns the Patch between the actual commit and the provided one.
func (c *Commit) Patch(to *Commit) (*Patch, error) {
return c.PatchContext(context.Background(), to)
}
// Parents return a CommitIter to the parent Commits.
func (c *Commit) Parents() CommitIter {
return NewCommitIter(c.s,
storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, c.ParentHashes),
)
}
// NumParents returns the number of parents in a commit.
func (c *Commit) NumParents() int {
return len(c.ParentHashes)
}
var ErrParentNotFound = errors.New("commit parent not found")
// Parent returns the ith parent of a commit.
func (c *Commit) Parent(i int) (*Commit, error) {
if len(c.ParentHashes) == 0 || i > len(c.ParentHashes)-1 {
return nil, ErrParentNotFound
}
return GetCommit(c.s, c.ParentHashes[i])
}
// File returns the file with the specified "path" in the commit and a
// nil error if the file exists. If the file does not exist, it returns
// a nil file and the ErrFileNotFound error.
func (c *Commit) File(path string) (*File, error) {
tree, err := c.Tree()
if err != nil {
return nil, err
}
return tree.File(path)
}
// Files returns a FileIter allowing to iterate over the Tree
func (c *Commit) Files() (*FileIter, error) {
tree, err := c.Tree()
if err != nil {
return nil, err
}
return tree.Files(), nil
}
// ID returns the object ID of the commit. The returned value will always match
// the current value of Commit.Hash.
//
// ID is present to fulfill the Object interface.
func (c *Commit) ID() plumbing.Hash {
return c.Hash
}
// Type returns the type of object. It always returns plumbing.CommitObject.
//
// Type is present to fulfill the Object interface.
func (c *Commit) Type() plumbing.ObjectType {
return plumbing.CommitObject
}
// Decode transforms a plumbing.EncodedObject into a Commit struct.
func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
if o.Type() != plumbing.CommitObject {
return ErrUnsupportedObject
}
c.Hash = o.Hash()
reader, err := o.Reader()
if err != nil {
return err
}
defer ioutil.CheckClose(reader, &err)
r := bufio.NewReader(reader)
var message bool
var pgpsig bool
for {
line, err := r.ReadBytes('\n')
if err != nil && err != io.EOF {
return err
}
if pgpsig {
if len(line) > 0 && line[0] == ' ' {
line = bytes.TrimLeft(line, " ")
c.PGPSignature += string(line)
continue
} else {
pgpsig = false
}
}
if !message {
line = bytes.TrimSpace(line)
if len(line) == 0 {
message = true
continue
}
split := bytes.SplitN(line, []byte{' '}, 2)
var data []byte
if len(split) == 2 {
data = split[1]
}
switch string(split[0]) {
case "tree":
c.TreeHash = plumbing.NewHash(string(data))
case "parent":
c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(data)))
case "author":
c.Author.Decode(data)
case "committer":
c.Committer.Decode(data)
case headerpgp:
c.PGPSignature += string(data) + "\n"
pgpsig = true
}
} else {
c.Message += string(line)
}
if err == io.EOF {
return nil
}
}
}
// Encode transforms a Commit into a plumbing.EncodedObject.
func (b *Commit) Encode(o plumbing.EncodedObject) error {
return b.encode(o, true)
}
func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
o.SetType(plumbing.CommitObject)
w, err := o.Writer()
if err != nil {
return err
}
defer ioutil.CheckClose(w, &err)
if _, err = fmt.Fprintf(w, "tree %s\n", b.TreeHash.String()); err != nil {
return err
}
for _, parent := range b.ParentHashes {
if _, err = fmt.Fprintf(w, "parent %s\n", parent.String()); err != nil {
return err
}
}
if _, err = fmt.Fprint(w, "author "); err != nil {
return err
}
if err = b.Author.Encode(w); err != nil {
return err
}
if _, err = fmt.Fprint(w, "\ncommitter "); err != nil {
return err
}
if err = b.Committer.Encode(w); err != nil {
return err
}
if b.PGPSignature != "" && includeSig {
if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil {
return err
}
// Split all the signature lines and re-write with a left padding and
// newline. Use join for this so it's clear that a newline should not be
// added after this section, as it will be added when the message is
// printed.
signature := strings.TrimSuffix(b.PGPSignature, "\n")
lines := strings.Split(signature, "\n")
if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil {
return err
}
}
if _, err = fmt.Fprintf(w, "\n\n%s", b.Message); err != nil {
return err
}
return err
}
// Stats shows the status of commit.
func (c *Commit) Stats() (FileStats, error) {
// Get the previous commit.
ci := c.Parents()
parentCommit, err := ci.Next()
if err != nil {
if err == io.EOF {
emptyNoder := treeNoder{}
parentCommit = &Commit{
Hash: emptyNoder.hash,
// TreeHash: emptyNoder.parent.Hash,
s: c.s,
}
} else {
return nil, err
}
}
patch, err := parentCommit.Patch(c)
if err != nil {
return nil, err
}
return getFileStatsFromFilePatches(patch.FilePatches()), nil
}
func (c *Commit) String() string {
return fmt.Sprintf(
"%s %s\nAuthor: %s\nDate: %s\n\n%s\n",
plumbing.CommitObject, c.Hash, c.Author.String(),
c.Author.When.Format(DateFormat), indent(c.Message),
)
}
// Verify performs PGP verification of the commit with a provided armored
// keyring and returns openpgp.Entity associated with verifying key on success.
func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
keyRingReader := strings.NewReader(armoredKeyRing)
keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader)
if err != nil {
return nil, err
}
// Extract signature.
signature := strings.NewReader(c.PGPSignature)
encoded := &plumbing.MemoryObject{}
// Encode commit components, excluding signature and get a reader object.
if err := c.encode(encoded, false); err != nil {
return nil, err
}
er, err := encoded.Reader()
if err != nil {
return nil, err
}
return openpgp.CheckArmoredDetachedSignature(keyring, er, signature)
}
func indent(t string) string {
var output []string
for _, line := range strings.Split(t, "\n") {
if len(line) != 0 {
line = " " + line
}
output = append(output, line)
}
return strings.Join(output, "\n")
}
// CommitIter is a generic closable interface for iterating over commits.
type CommitIter interface {
Next() (*Commit, error)
ForEach(func(*Commit) error) error
Close()
}
// storerCommitIter provides an iterator from commits in an EncodedObjectStorer.
type storerCommitIter struct {
storer.EncodedObjectIter
s storer.EncodedObjectStorer
}
// NewCommitIter takes a storer.EncodedObjectStorer and a
// storer.EncodedObjectIter and returns a CommitIter that iterates over all
// commits contained in the storer.EncodedObjectIter.
//
// Any non-commit object returned by the storer.EncodedObjectIter is skipped.
func NewCommitIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) CommitIter {
return &storerCommitIter{iter, s}
}
// Next moves the iterator to the next commit and returns a pointer to it. If
// there are no more commits, it returns io.EOF.
func (iter *storerCommitIter) Next() (*Commit, error) {
obj, err := iter.EncodedObjectIter.Next()
if err != nil {
return nil, err
}
return DecodeCommit(iter.s, obj)
}
// ForEach call the cb function for each commit contained on this iter until
// an error appends or the end of the iter is reached. If ErrStop is sent
// the iteration is stopped but no error is returned. The iterator is closed.
func (iter *storerCommitIter) ForEach(cb func(*Commit) error) error {
return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
c, err := DecodeCommit(iter.s, obj)
if err != nil {
return err
}
return cb(c)
})
}
func (iter *storerCommitIter) Close() {
iter.EncodedObjectIter.Close()
}

View File

@ -0,0 +1,327 @@
package object
import (
"container/list"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage"
)
type commitPreIterator struct {
seenExternal map[plumbing.Hash]bool
seen map[plumbing.Hash]bool
stack []CommitIter
start *Commit
}
// NewCommitPreorderIter returns a CommitIter that walks the commit history,
// starting at the given commit and visiting its parents in pre-order.
// The given callback will be called for each visited commit. Each commit will
// be visited only once. If the callback returns an error, walking will stop
// and will return the error. Other errors might be returned if the history
// cannot be traversed (e.g. missing objects). Ignore allows to skip some
// commits from being iterated.
func NewCommitPreorderIter(
c *Commit,
seenExternal map[plumbing.Hash]bool,
ignore []plumbing.Hash,
) CommitIter {
seen := make(map[plumbing.Hash]bool)
for _, h := range ignore {
seen[h] = true
}
return &commitPreIterator{
seenExternal: seenExternal,
seen: seen,
stack: make([]CommitIter, 0),
start: c,
}
}
func (w *commitPreIterator) Next() (*Commit, error) {
var c *Commit
for {
if w.start != nil {
c = w.start
w.start = nil
} else {
current := len(w.stack) - 1
if current < 0 {
return nil, io.EOF
}
var err error
c, err = w.stack[current].Next()
if err == io.EOF {
w.stack = w.stack[:current]
continue
}
if err != nil {
return nil, err
}
}
if w.seen[c.Hash] || w.seenExternal[c.Hash] {
continue
}
w.seen[c.Hash] = true
if c.NumParents() > 0 {
w.stack = append(w.stack, filteredParentIter(c, w.seen))
}
return c, nil
}
}
func filteredParentIter(c *Commit, seen map[plumbing.Hash]bool) CommitIter {
var hashes []plumbing.Hash
for _, h := range c.ParentHashes {
if !seen[h] {
hashes = append(hashes, h)
}
}
return NewCommitIter(c.s,
storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, hashes),
)
}
func (w *commitPreIterator) ForEach(cb func(*Commit) error) error {
for {
c, err := w.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
err = cb(c)
if err == storer.ErrStop {
break
}
if err != nil {
return err
}
}
return nil
}
func (w *commitPreIterator) Close() {}
type commitPostIterator struct {
stack []*Commit
seen map[plumbing.Hash]bool
}
// NewCommitPostorderIter returns a CommitIter that walks the commit
// history like WalkCommitHistory but in post-order. This means that after
// walking a merge commit, the merged commit will be walked before the base
// it was merged on. This can be useful if you wish to see the history in
// chronological order. Ignore allows to skip some commits from being iterated.
func NewCommitPostorderIter(c *Commit, ignore []plumbing.Hash) CommitIter {
seen := make(map[plumbing.Hash]bool)
for _, h := range ignore {
seen[h] = true
}
return &commitPostIterator{
stack: []*Commit{c},
seen: seen,
}
}
func (w *commitPostIterator) Next() (*Commit, error) {
for {
if len(w.stack) == 0 {
return nil, io.EOF
}
c := w.stack[len(w.stack)-1]
w.stack = w.stack[:len(w.stack)-1]
if w.seen[c.Hash] {
continue
}
w.seen[c.Hash] = true
return c, c.Parents().ForEach(func(p *Commit) error {
w.stack = append(w.stack, p)
return nil
})
}
}
func (w *commitPostIterator) ForEach(cb func(*Commit) error) error {
for {
c, err := w.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
err = cb(c)
if err == storer.ErrStop {
break
}
if err != nil {
return err
}
}
return nil
}
func (w *commitPostIterator) Close() {}
// commitAllIterator stands for commit iterator for all refs.
type commitAllIterator struct {
// currCommit points to the current commit.
currCommit *list.Element
}
// NewCommitAllIter returns a new commit iterator for all refs.
// repoStorer is a repo Storer used to get commits and references.
// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order
func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) {
commitsPath := list.New()
commitsLookup := make(map[plumbing.Hash]*list.Element)
head, err := storer.ResolveReference(repoStorer, plumbing.HEAD)
if err == nil {
err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup)
}
if err != nil && err != plumbing.ErrReferenceNotFound {
return nil, err
}
// add all references along with the HEAD
refIter, err := repoStorer.IterReferences()
if err != nil {
return nil, err
}
defer refIter.Close()
for {
ref, err := refIter.Next()
if err == io.EOF {
break
}
if err == plumbing.ErrReferenceNotFound {
continue
}
if err != nil {
return nil, err
}
if err = addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup); err != nil {
return nil, err
}
}
return &commitAllIterator{commitsPath.Front()}, nil
}
func addReference(
repoStorer storage.Storer,
commitIterFunc func(*Commit) CommitIter,
ref *plumbing.Reference,
commitsPath *list.List,
commitsLookup map[plumbing.Hash]*list.Element) error {
_, exists := commitsLookup[ref.Hash()]
if exists {
// we already have it - skip the reference.
return nil
}
refCommit, _ := GetCommit(repoStorer, ref.Hash())
if refCommit == nil {
// if it's not a commit - skip it.
return nil
}
var (
refCommits []*Commit
parent *list.Element
)
// collect all ref commits to add
commitIter := commitIterFunc(refCommit)
for c, e := commitIter.Next(); e == nil; {
parent, exists = commitsLookup[c.Hash]
if exists {
break
}
refCommits = append(refCommits, c)
c, e = commitIter.Next()
}
commitIter.Close()
if parent == nil {
// common parent - not found
// add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet)
for _, c := range refCommits {
parent = commitsPath.PushBack(c)
commitsLookup[c.Hash] = parent
}
} else {
// add ref's commits to the path in reverse order (from the latest)
for i := len(refCommits) - 1; i >= 0; i-- {
c := refCommits[i]
// insert before found common parent
parent = commitsPath.InsertBefore(c, parent)
commitsLookup[c.Hash] = parent
}
}
return nil
}
func (it *commitAllIterator) Next() (*Commit, error) {
if it.currCommit == nil {
return nil, io.EOF
}
c := it.currCommit.Value.(*Commit)
it.currCommit = it.currCommit.Next()
return c, nil
}
func (it *commitAllIterator) ForEach(cb func(*Commit) error) error {
for {
c, err := it.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
err = cb(c)
if err == storer.ErrStop {
break
}
if err != nil {
return err
}
}
return nil
}
func (it *commitAllIterator) Close() {
it.currCommit = nil
}

View File

@ -0,0 +1,100 @@
package object
import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
type bfsCommitIterator struct {
seenExternal map[plumbing.Hash]bool
seen map[plumbing.Hash]bool
queue []*Commit
}
// NewCommitIterBSF returns a CommitIter that walks the commit history,
// starting at the given commit and visiting its parents in pre-order.
// The given callback will be called for each visited commit. Each commit will
// be visited only once. If the callback returns an error, walking will stop
// and will return the error. Other errors might be returned if the history
// cannot be traversed (e.g. missing objects). Ignore allows to skip some
// commits from being iterated.
func NewCommitIterBSF(
c *Commit,
seenExternal map[plumbing.Hash]bool,
ignore []plumbing.Hash,
) CommitIter {
seen := make(map[plumbing.Hash]bool)
for _, h := range ignore {
seen[h] = true
}
return &bfsCommitIterator{
seenExternal: seenExternal,
seen: seen,
queue: []*Commit{c},
}
}
func (w *bfsCommitIterator) appendHash(store storer.EncodedObjectStorer, h plumbing.Hash) error {
if w.seen[h] || w.seenExternal[h] {
return nil
}
c, err := GetCommit(store, h)
if err != nil {
return err
}
w.queue = append(w.queue, c)
return nil
}
func (w *bfsCommitIterator) Next() (*Commit, error) {
var c *Commit
for {
if len(w.queue) == 0 {
return nil, io.EOF
}
c = w.queue[0]
w.queue = w.queue[1:]
if w.seen[c.Hash] || w.seenExternal[c.Hash] {
continue
}
w.seen[c.Hash] = true
for _, h := range c.ParentHashes {
err := w.appendHash(c.s, h)
if err != nil {
return nil, nil
}
}
return c, nil
}
}
func (w *bfsCommitIterator) ForEach(cb func(*Commit) error) error {
for {
c, err := w.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
err = cb(c)
if err == storer.ErrStop {
break
}
if err != nil {
return err
}
}
return nil
}
func (w *bfsCommitIterator) Close() {}

View File

@ -0,0 +1,103 @@
package object
import (
"io"
"github.com/emirpasic/gods/trees/binaryheap"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
type commitIteratorByCTime struct {
seenExternal map[plumbing.Hash]bool
seen map[plumbing.Hash]bool
heap *binaryheap.Heap
}
// NewCommitIterCTime returns a CommitIter that walks the commit history,
// starting at the given commit and visiting its parents while preserving Committer Time order.
// this appears to be the closest order to `git log`
// The given callback will be called for each visited commit. Each commit will
// be visited only once. If the callback returns an error, walking will stop
// and will return the error. Other errors might be returned if the history
// cannot be traversed (e.g. missing objects). Ignore allows to skip some
// commits from being iterated.
func NewCommitIterCTime(
c *Commit,
seenExternal map[plumbing.Hash]bool,
ignore []plumbing.Hash,
) CommitIter {
seen := make(map[plumbing.Hash]bool)
for _, h := range ignore {
seen[h] = true
}
heap := binaryheap.NewWith(func(a, b interface{}) int {
if a.(*Commit).Committer.When.Before(b.(*Commit).Committer.When) {
return 1
}
return -1
})
heap.Push(c)
return &commitIteratorByCTime{
seenExternal: seenExternal,
seen: seen,
heap: heap,
}
}
func (w *commitIteratorByCTime) Next() (*Commit, error) {
var c *Commit
for {
cIn, ok := w.heap.Pop()
if !ok {
return nil, io.EOF
}
c = cIn.(*Commit)
if w.seen[c.Hash] || w.seenExternal[c.Hash] {
continue
}
w.seen[c.Hash] = true
for _, h := range c.ParentHashes {
if w.seen[h] || w.seenExternal[h] {
continue
}
pc, err := GetCommit(c.s, h)
if err != nil {
return nil, err
}
w.heap.Push(pc)
}
return c, nil
}
}
func (w *commitIteratorByCTime) ForEach(cb func(*Commit) error) error {
for {
c, err := w.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
err = cb(c)
if err == storer.ErrStop {
break
}
if err != nil {
return err
}
}
return nil
}
func (w *commitIteratorByCTime) Close() {}

View File

@ -0,0 +1,145 @@
package object
import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
type commitFileIter struct {
fileName string
sourceIter CommitIter
currentCommit *Commit
checkParent bool
}
// NewCommitFileIterFromIter returns a commit iterator which performs diffTree between
// successive trees returned from the commit iterator from the argument. The purpose of this is
// to find the commits that explain how the files that match the path came to be.
// If checkParent is true then the function double checks if potential parent (next commit in a path)
// is one of the parents in the tree (it's used by `git log --all`).
func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter {
iterator := new(commitFileIter)
iterator.sourceIter = commitIter
iterator.fileName = fileName
iterator.checkParent = checkParent
return iterator
}
func (c *commitFileIter) Next() (*Commit, error) {
if c.currentCommit == nil {
var err error
c.currentCommit, err = c.sourceIter.Next()
if err != nil {
return nil, err
}
}
commit, commitErr := c.getNextFileCommit()
// Setting current-commit to nil to prevent unwanted states when errors are raised
if commitErr != nil {
c.currentCommit = nil
}
return commit, commitErr
}
func (c *commitFileIter) getNextFileCommit() (*Commit, error) {
for {
// Parent-commit can be nil if the current-commit is the initial commit
parentCommit, parentCommitErr := c.sourceIter.Next()
if parentCommitErr != nil {
// If the parent-commit is beyond the initial commit, keep it nil
if parentCommitErr != io.EOF {
return nil, parentCommitErr
}
parentCommit = nil
}
// Fetch the trees of the current and parent commits
currentTree, currTreeErr := c.currentCommit.Tree()
if currTreeErr != nil {
return nil, currTreeErr
}
var parentTree *Tree
if parentCommit != nil {
var parentTreeErr error
parentTree, parentTreeErr = parentCommit.Tree()
if parentTreeErr != nil {
return nil, parentTreeErr
}
}
// Find diff between current and parent trees
changes, diffErr := DiffTree(currentTree, parentTree)
if diffErr != nil {
return nil, diffErr
}
found := c.hasFileChange(changes, parentCommit)
// Storing the current-commit in-case a change is found, and
// Updating the current-commit for the next-iteration
prevCommit := c.currentCommit
c.currentCommit = parentCommit
if found {
return prevCommit, nil
}
// If not matches found and if parent-commit is beyond the initial commit, then return with EOF
if parentCommit == nil {
return nil, io.EOF
}
}
}
func (c *commitFileIter) hasFileChange(changes Changes, parent *Commit) bool {
for _, change := range changes {
if change.name() != c.fileName {
continue
}
// filename matches, now check if source iterator contains all commits (from all refs)
if c.checkParent {
if parent != nil && isParentHash(parent.Hash, c.currentCommit) {
return true
}
continue
}
return true
}
return false
}
func isParentHash(hash plumbing.Hash, commit *Commit) bool {
for _, h := range commit.ParentHashes {
if h == hash {
return true
}
}
return false
}
func (c *commitFileIter) ForEach(cb func(*Commit) error) error {
for {
commit, nextErr := c.Next()
if nextErr != nil {
return nextErr
}
err := cb(commit)
if err == storer.ErrStop {
return nil
} else if err != nil {
return err
}
}
}
func (c *commitFileIter) Close() {
c.sourceIter.Close()
}

View File

@ -0,0 +1,37 @@
package object
import (
"bytes"
"context"
"gopkg.in/src-d/go-git.v4/utils/merkletrie"
"gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
)
// DiffTree compares the content and mode of the blobs found via two
// tree objects.
func DiffTree(a, b *Tree) (Changes, error) {
return DiffTreeContext(context.Background(), a, b)
}
// DiffTree compares the content and mode of the blobs found via two
// tree objects. Provided context must be non-nil.
// An error will be return if context expires
func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) {
from := NewTreeRootNode(a)
to := NewTreeRootNode(b)
hashEqual := func(a, b noder.Hasher) bool {
return bytes.Equal(a.Hash(), b.Hash())
}
merkletrieChanges, err := merkletrie.DiffTreeContext(ctx, from, to, hashEqual)
if err != nil {
if err == merkletrie.ErrCanceled {
return nil, ErrCanceled
}
return nil, err
}
return newChanges(merkletrieChanges)
}

137
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/file.go generated vendored Normal file
View File

@ -0,0 +1,137 @@
package object
import (
"bytes"
"io"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/binary"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
// File represents git file objects.
type File struct {
// Name is the path of the file. It might be relative to a tree,
// depending of the function that generates it.
Name string
// Mode is the file mode.
Mode filemode.FileMode
// Blob with the contents of the file.
Blob
}
// NewFile returns a File based on the given blob object
func NewFile(name string, m filemode.FileMode, b *Blob) *File {
return &File{Name: name, Mode: m, Blob: *b}
}
// Contents returns the contents of a file as a string.
func (f *File) Contents() (content string, err error) {
reader, err := f.Reader()
if err != nil {
return "", err
}
defer ioutil.CheckClose(reader, &err)
buf := new(bytes.Buffer)
if _, err := buf.ReadFrom(reader); err != nil {
return "", err
}
return buf.String(), nil
}
// IsBinary returns if the file is binary or not
func (f *File) IsBinary() (bin bool, err error) {
reader, err := f.Reader()
if err != nil {
return false, err
}
defer ioutil.CheckClose(reader, &err)
return binary.IsBinary(reader)
}
// Lines returns a slice of lines from the contents of a file, stripping
// all end of line characters. If the last line is empty (does not end
// in an end of line), it is also stripped.
func (f *File) Lines() ([]string, error) {
content, err := f.Contents()
if err != nil {
return nil, err
}
splits := strings.Split(content, "\n")
// remove the last line if it is empty
if splits[len(splits)-1] == "" {
return splits[:len(splits)-1], nil
}
return splits, nil
}
// FileIter provides an iterator for the files in a tree.
type FileIter struct {
s storer.EncodedObjectStorer
w TreeWalker
}
// NewFileIter takes a storer.EncodedObjectStorer and a Tree and returns a
// *FileIter that iterates over all files contained in the tree, recursively.
func NewFileIter(s storer.EncodedObjectStorer, t *Tree) *FileIter {
return &FileIter{s: s, w: *NewTreeWalker(t, true, nil)}
}
// Next moves the iterator to the next file and returns a pointer to it. If
// there are no more files, it returns io.EOF.
func (iter *FileIter) Next() (*File, error) {
for {
name, entry, err := iter.w.Next()
if err != nil {
return nil, err
}
if entry.Mode == filemode.Dir || entry.Mode == filemode.Submodule {
continue
}
blob, err := GetBlob(iter.s, entry.Hash)
if err != nil {
return nil, err
}
return NewFile(name, entry.Mode, blob), nil
}
}
// ForEach call the cb function for each file contained in this iter until
// an error happens or the end of the iter is reached. If plumbing.ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *FileIter) ForEach(cb func(*File) error) error {
defer iter.Close()
for {
f, err := iter.Next()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
if err := cb(f); err != nil {
if err == storer.ErrStop {
return nil
}
return err
}
}
}
func (iter *FileIter) Close() {
iter.w.Close()
}

View File

@ -0,0 +1,237 @@
// Package object contains implementations of all Git objects and utility
// functions to work with them.
package object
import (
"bytes"
"errors"
"fmt"
"io"
"strconv"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
// ErrUnsupportedObject trigger when a non-supported object is being decoded.
var ErrUnsupportedObject = errors.New("unsupported object type")
// Object is a generic representation of any git object. It is implemented by
// Commit, Tree, Blob, and Tag, and includes the functions that are common to
// them.
//
// Object is returned when an object can be of any type. It is frequently used
// with a type cast to acquire the specific type of object:
//
// func process(obj Object) {
// switch o := obj.(type) {
// case *Commit:
// // o is a Commit
// case *Tree:
// // o is a Tree
// case *Blob:
// // o is a Blob
// case *Tag:
// // o is a Tag
// }
// }
//
// This interface is intentionally different from plumbing.EncodedObject, which
// is a lower level interface used by storage implementations to read and write
// objects in its encoded form.
type Object interface {
ID() plumbing.Hash
Type() plumbing.ObjectType
Decode(plumbing.EncodedObject) error
Encode(plumbing.EncodedObject) error
}
// GetObject gets an object from an object storer and decodes it.
func GetObject(s storer.EncodedObjectStorer, h plumbing.Hash) (Object, error) {
o, err := s.EncodedObject(plumbing.AnyObject, h)
if err != nil {
return nil, err
}
return DecodeObject(s, o)
}
// DecodeObject decodes an encoded object into an Object and associates it to
// the given object storer.
func DecodeObject(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (Object, error) {
switch o.Type() {
case plumbing.CommitObject:
return DecodeCommit(s, o)
case plumbing.TreeObject:
return DecodeTree(s, o)
case plumbing.BlobObject:
return DecodeBlob(o)
case plumbing.TagObject:
return DecodeTag(s, o)
default:
return nil, plumbing.ErrInvalidType
}
}
// DateFormat is the format being used in the original git implementation
const DateFormat = "Mon Jan 02 15:04:05 2006 -0700"
// Signature is used to identify who and when created a commit or tag.
type Signature struct {
// Name represents a person name. It is an arbitrary string.
Name string
// Email is an email, but it cannot be assumed to be well-formed.
Email string
// When is the timestamp of the signature.
When time.Time
}
// Decode decodes a byte slice into a signature
func (s *Signature) Decode(b []byte) {
open := bytes.LastIndexByte(b, '<')
close := bytes.LastIndexByte(b, '>')
if open == -1 || close == -1 {
return
}
if close < open {
return
}
s.Name = string(bytes.Trim(b[:open], " "))
s.Email = string(b[open+1 : close])
hasTime := close+2 < len(b)
if hasTime {
s.decodeTimeAndTimeZone(b[close+2:])
}
}
// Encode encodes a Signature into a writer.
func (s *Signature) Encode(w io.Writer) error {
if _, err := fmt.Fprintf(w, "%s <%s> ", s.Name, s.Email); err != nil {
return err
}
if err := s.encodeTimeAndTimeZone(w); err != nil {
return err
}
return nil
}
var timeZoneLength = 5
func (s *Signature) decodeTimeAndTimeZone(b []byte) {
space := bytes.IndexByte(b, ' ')
if space == -1 {
space = len(b)
}
ts, err := strconv.ParseInt(string(b[:space]), 10, 64)
if err != nil {
return
}
s.When = time.Unix(ts, 0).In(time.UTC)
var tzStart = space + 1
if tzStart >= len(b) || tzStart+timeZoneLength > len(b) {
return
}
// Include a dummy year in this time.Parse() call to avoid a bug in Go:
// https://github.com/golang/go/issues/19750
//
// Parsing the timezone with no other details causes the tl.Location() call
// below to return time.Local instead of the parsed zone in some cases
tl, err := time.Parse("2006 -0700", "1970 "+string(b[tzStart:tzStart+timeZoneLength]))
if err != nil {
return
}
s.When = s.When.In(tl.Location())
}
func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error {
u := s.When.Unix()
if u < 0 {
u = 0
}
_, err := fmt.Fprintf(w, "%d %s", u, s.When.Format("-0700"))
return err
}
func (s *Signature) String() string {
return fmt.Sprintf("%s <%s>", s.Name, s.Email)
}
// ObjectIter provides an iterator for a set of objects.
type ObjectIter struct {
storer.EncodedObjectIter
s storer.EncodedObjectStorer
}
// NewObjectIter takes a storer.EncodedObjectStorer and a
// storer.EncodedObjectIter and returns an *ObjectIter that iterates over all
// objects contained in the storer.EncodedObjectIter.
func NewObjectIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *ObjectIter {
return &ObjectIter{iter, s}
}
// Next moves the iterator to the next object and returns a pointer to it. If
// there are no more objects, it returns io.EOF.
func (iter *ObjectIter) Next() (Object, error) {
for {
obj, err := iter.EncodedObjectIter.Next()
if err != nil {
return nil, err
}
o, err := iter.toObject(obj)
if err == plumbing.ErrInvalidType {
continue
}
if err != nil {
return nil, err
}
return o, nil
}
}
// ForEach call the cb function for each object contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *ObjectIter) ForEach(cb func(Object) error) error {
return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
o, err := iter.toObject(obj)
if err == plumbing.ErrInvalidType {
return nil
}
if err != nil {
return err
}
return cb(o)
})
}
func (iter *ObjectIter) toObject(obj plumbing.EncodedObject) (Object, error) {
switch obj.Type() {
case plumbing.BlobObject:
blob := &Blob{}
return blob, blob.Decode(obj)
case plumbing.TreeObject:
tree := &Tree{s: iter.s}
return tree, tree.Decode(obj)
case plumbing.CommitObject:
commit := &Commit{}
return commit, commit.Decode(obj)
case plumbing.TagObject:
tag := &Tag{}
return tag, tag.Decode(obj)
default:
return nil, plumbing.ErrInvalidType
}
}

View File

@ -0,0 +1,335 @@
package object
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
fdiff "gopkg.in/src-d/go-git.v4/plumbing/format/diff"
"gopkg.in/src-d/go-git.v4/utils/diff"
dmp "github.com/sergi/go-diff/diffmatchpatch"
)
var (
ErrCanceled = errors.New("operation canceled")
)
func getPatch(message string, changes ...*Change) (*Patch, error) {
ctx := context.Background()
return getPatchContext(ctx, message, changes...)
}
func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) {
var filePatches []fdiff.FilePatch
for _, c := range changes {
select {
case <-ctx.Done():
return nil, ErrCanceled
default:
}
fp, err := filePatchWithContext(ctx, c)
if err != nil {
return nil, err
}
filePatches = append(filePatches, fp)
}
return &Patch{message, filePatches}, nil
}
func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) {
from, to, err := c.Files()
if err != nil {
return nil, err
}
fromContent, fIsBinary, err := fileContent(from)
if err != nil {
return nil, err
}
toContent, tIsBinary, err := fileContent(to)
if err != nil {
return nil, err
}
if fIsBinary || tIsBinary {
return &textFilePatch{from: c.From, to: c.To}, nil
}
diffs := diff.Do(fromContent, toContent)
var chunks []fdiff.Chunk
for _, d := range diffs {
select {
case <-ctx.Done():
return nil, ErrCanceled
default:
}
var op fdiff.Operation
switch d.Type {
case dmp.DiffEqual:
op = fdiff.Equal
case dmp.DiffDelete:
op = fdiff.Delete
case dmp.DiffInsert:
op = fdiff.Add
}
chunks = append(chunks, &textChunk{d.Text, op})
}
return &textFilePatch{
chunks: chunks,
from: c.From,
to: c.To,
}, nil
}
func filePatch(c *Change) (fdiff.FilePatch, error) {
return filePatchWithContext(context.Background(), c)
}
func fileContent(f *File) (content string, isBinary bool, err error) {
if f == nil {
return
}
isBinary, err = f.IsBinary()
if err != nil || isBinary {
return
}
content, err = f.Contents()
return
}
// textPatch is an implementation of fdiff.Patch interface
type Patch struct {
message string
filePatches []fdiff.FilePatch
}
func (t *Patch) FilePatches() []fdiff.FilePatch {
return t.filePatches
}
func (t *Patch) Message() string {
return t.message
}
func (p *Patch) Encode(w io.Writer) error {
ue := fdiff.NewUnifiedEncoder(w, fdiff.DefaultContextLines)
return ue.Encode(p)
}
func (p *Patch) Stats() FileStats {
return getFileStatsFromFilePatches(p.FilePatches())
}
func (p *Patch) String() string {
buf := bytes.NewBuffer(nil)
err := p.Encode(buf)
if err != nil {
return fmt.Sprintf("malformed patch: %s", err.Error())
}
return buf.String()
}
// changeEntryWrapper is an implementation of fdiff.File interface
type changeEntryWrapper struct {
ce ChangeEntry
}
func (f *changeEntryWrapper) Hash() plumbing.Hash {
if !f.ce.TreeEntry.Mode.IsFile() {
return plumbing.ZeroHash
}
return f.ce.TreeEntry.Hash
}
func (f *changeEntryWrapper) Mode() filemode.FileMode {
return f.ce.TreeEntry.Mode
}
func (f *changeEntryWrapper) Path() string {
if !f.ce.TreeEntry.Mode.IsFile() {
return ""
}
return f.ce.Name
}
func (f *changeEntryWrapper) Empty() bool {
return !f.ce.TreeEntry.Mode.IsFile()
}
// textFilePatch is an implementation of fdiff.FilePatch interface
type textFilePatch struct {
chunks []fdiff.Chunk
from, to ChangeEntry
}
func (tf *textFilePatch) Files() (from fdiff.File, to fdiff.File) {
f := &changeEntryWrapper{tf.from}
t := &changeEntryWrapper{tf.to}
if !f.Empty() {
from = f
}
if !t.Empty() {
to = t
}
return
}
func (t *textFilePatch) IsBinary() bool {
return len(t.chunks) == 0
}
func (t *textFilePatch) Chunks() []fdiff.Chunk {
return t.chunks
}
// textChunk is an implementation of fdiff.Chunk interface
type textChunk struct {
content string
op fdiff.Operation
}
func (t *textChunk) Content() string {
return t.content
}
func (t *textChunk) Type() fdiff.Operation {
return t.op
}
// FileStat stores the status of changes in content of a file.
type FileStat struct {
Name string
Addition int
Deletion int
}
func (fs FileStat) String() string {
return printStat([]FileStat{fs})
}
// FileStats is a collection of FileStat.
type FileStats []FileStat
func (fileStats FileStats) String() string {
return printStat(fileStats)
}
func printStat(fileStats []FileStat) string {
padLength := float64(len(" "))
newlineLength := float64(len("\n"))
separatorLength := float64(len("|"))
// Soft line length limit. The text length calculation below excludes
// length of the change number. Adding that would take it closer to 80,
// but probably not more than 80, until it's a huge number.
lineLength := 72.0
// Get the longest filename and longest total change.
var longestLength float64
var longestTotalChange float64
for _, fs := range fileStats {
if int(longestLength) < len(fs.Name) {
longestLength = float64(len(fs.Name))
}
totalChange := fs.Addition + fs.Deletion
if int(longestTotalChange) < totalChange {
longestTotalChange = float64(totalChange)
}
}
// Parts of the output:
// <pad><filename><pad>|<pad><changeNumber><pad><+++/---><newline>
// example: " main.go | 10 +++++++--- "
// <pad><filename><pad>
leftTextLength := padLength + longestLength + padLength
// <pad><number><pad><+++++/-----><newline>
// Excluding number length here.
rightTextLength := padLength + padLength + newlineLength
totalTextArea := leftTextLength + separatorLength + rightTextLength
heightOfHistogram := lineLength - totalTextArea
// Scale the histogram.
var scaleFactor float64
if longestTotalChange > heightOfHistogram {
// Scale down to heightOfHistogram.
scaleFactor = float64(longestTotalChange / heightOfHistogram)
} else {
scaleFactor = 1.0
}
finalOutput := ""
for _, fs := range fileStats {
addn := float64(fs.Addition)
deln := float64(fs.Deletion)
adds := strings.Repeat("+", int(math.Floor(addn/scaleFactor)))
dels := strings.Repeat("-", int(math.Floor(deln/scaleFactor)))
finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels)
}
return finalOutput
}
func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
var fileStats FileStats
for _, fp := range filePatches {
// ignore empty patches (binary files, submodule refs updates)
if len(fp.Chunks()) == 0 {
continue
}
cs := FileStat{}
from, to := fp.Files()
if from == nil {
// New File is created.
cs.Name = to.Path()
} else if to == nil {
// File is deleted.
cs.Name = from.Path()
} else if from.Path() != to.Path() {
// File is renamed. Not supported.
// cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
} else {
cs.Name = from.Path()
}
for _, chunk := range fp.Chunks() {
switch chunk.Type() {
case fdiff.Add:
cs.Addition += strings.Count(chunk.Content(), "\n")
case fdiff.Delete:
cs.Deletion += strings.Count(chunk.Content(), "\n")
}
}
fileStats = append(fileStats, cs)
}
return fileStats
}

350
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go generated vendored Normal file
View File

@ -0,0 +1,350 @@
package object
import (
"bufio"
"bytes"
"fmt"
"io"
stdioutil "io/ioutil"
"strings"
"golang.org/x/crypto/openpgp"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
// Tag represents an annotated tag object. It points to a single git object of
// any type, but tags typically are applied to commit or blob objects. It
// provides a reference that associates the target with a tag name. It also
// contains meta-information about the tag, including the tagger, tag date and
// message.
//
// Note that this is not used for lightweight tags.
//
// https://git-scm.com/book/en/v2/Git-Internals-Git-References#Tags
type Tag struct {
// Hash of the tag.
Hash plumbing.Hash
// Name of the tag.
Name string
// Tagger is the one who created the tag.
Tagger Signature
// Message is an arbitrary text message.
Message string
// PGPSignature is the PGP signature of the tag.
PGPSignature string
// TargetType is the object type of the target.
TargetType plumbing.ObjectType
// Target is the hash of the target object.
Target plumbing.Hash
s storer.EncodedObjectStorer
}
// GetTag gets a tag from an object storer and decodes it.
func GetTag(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tag, error) {
o, err := s.EncodedObject(plumbing.TagObject, h)
if err != nil {
return nil, err
}
return DecodeTag(s, o)
}
// DecodeTag decodes an encoded object into a *Commit and associates it to the
// given object storer.
func DecodeTag(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tag, error) {
t := &Tag{s: s}
if err := t.Decode(o); err != nil {
return nil, err
}
return t, nil
}
// ID returns the object ID of the tag, not the object that the tag references.
// The returned value will always match the current value of Tag.Hash.
//
// ID is present to fulfill the Object interface.
func (t *Tag) ID() plumbing.Hash {
return t.Hash
}
// Type returns the type of object. It always returns plumbing.TagObject.
//
// Type is present to fulfill the Object interface.
func (t *Tag) Type() plumbing.ObjectType {
return plumbing.TagObject
}
// Decode transforms a plumbing.EncodedObject into a Tag struct.
func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
if o.Type() != plumbing.TagObject {
return ErrUnsupportedObject
}
t.Hash = o.Hash()
reader, err := o.Reader()
if err != nil {
return err
}
defer ioutil.CheckClose(reader, &err)
r := bufio.NewReader(reader)
for {
var line []byte
line, err = r.ReadBytes('\n')
if err != nil && err != io.EOF {
return err
}
line = bytes.TrimSpace(line)
if len(line) == 0 {
break // Start of message
}
split := bytes.SplitN(line, []byte{' '}, 2)
switch string(split[0]) {
case "object":
t.Target = plumbing.NewHash(string(split[1]))
case "type":
t.TargetType, err = plumbing.ParseObjectType(string(split[1]))
if err != nil {
return err
}
case "tag":
t.Name = string(split[1])
case "tagger":
t.Tagger.Decode(split[1])
}
if err == io.EOF {
return nil
}
}
data, err := stdioutil.ReadAll(r)
if err != nil {
return err
}
var pgpsig bool
// Check if data contains PGP signature.
if bytes.Contains(data, []byte(beginpgp)) {
// Split the lines at newline.
messageAndSig := bytes.Split(data, []byte("\n"))
for _, l := range messageAndSig {
if pgpsig {
if bytes.Contains(l, []byte(endpgp)) {
t.PGPSignature += endpgp + "\n"
pgpsig = false
} else {
t.PGPSignature += string(l) + "\n"
}
continue
}
// Check if it's the beginning of a PGP signature.
if bytes.Contains(l, []byte(beginpgp)) {
t.PGPSignature += beginpgp + "\n"
pgpsig = true
continue
}
t.Message += string(l) + "\n"
}
} else {
t.Message = string(data)
}
return nil
}
// Encode transforms a Tag into a plumbing.EncodedObject.
func (t *Tag) Encode(o plumbing.EncodedObject) error {
return t.encode(o, true)
}
func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
o.SetType(plumbing.TagObject)
w, err := o.Writer()
if err != nil {
return err
}
defer ioutil.CheckClose(w, &err)
if _, err = fmt.Fprintf(w,
"object %s\ntype %s\ntag %s\ntagger ",
t.Target.String(), t.TargetType.Bytes(), t.Name); err != nil {
return err
}
if err = t.Tagger.Encode(w); err != nil {
return err
}
if _, err = fmt.Fprint(w, "\n\n"); err != nil {
return err
}
if _, err = fmt.Fprint(w, t.Message); err != nil {
return err
}
// Note that this is highly sensitive to what it sent along in the message.
// Message *always* needs to end with a newline, or else the message and the
// signature will be concatenated into a corrupt object. Since this is a
// lower-level method, we assume you know what you are doing and have already
// done the needful on the message in the caller.
if includeSig {
if _, err = fmt.Fprint(w, t.PGPSignature); err != nil {
return err
}
}
return err
}
// Commit returns the commit pointed to by the tag. If the tag points to a
// different type of object ErrUnsupportedObject will be returned.
func (t *Tag) Commit() (*Commit, error) {
if t.TargetType != plumbing.CommitObject {
return nil, ErrUnsupportedObject
}
o, err := t.s.EncodedObject(plumbing.CommitObject, t.Target)
if err != nil {
return nil, err
}
return DecodeCommit(t.s, o)
}
// Tree returns the tree pointed to by the tag. If the tag points to a commit
// object the tree of that commit will be returned. If the tag does not point
// to a commit or tree object ErrUnsupportedObject will be returned.
func (t *Tag) Tree() (*Tree, error) {
switch t.TargetType {
case plumbing.CommitObject:
c, err := t.Commit()
if err != nil {
return nil, err
}
return c.Tree()
case plumbing.TreeObject:
return GetTree(t.s, t.Target)
default:
return nil, ErrUnsupportedObject
}
}
// Blob returns the blob pointed to by the tag. If the tag points to a
// different type of object ErrUnsupportedObject will be returned.
func (t *Tag) Blob() (*Blob, error) {
if t.TargetType != plumbing.BlobObject {
return nil, ErrUnsupportedObject
}
return GetBlob(t.s, t.Target)
}
// Object returns the object pointed to by the tag.
func (t *Tag) Object() (Object, error) {
o, err := t.s.EncodedObject(t.TargetType, t.Target)
if err != nil {
return nil, err
}
return DecodeObject(t.s, o)
}
// String returns the meta information contained in the tag as a formatted
// string.
func (t *Tag) String() string {
obj, _ := t.Object()
return fmt.Sprintf(
"%s %s\nTagger: %s\nDate: %s\n\n%s\n%s",
plumbing.TagObject, t.Name, t.Tagger.String(), t.Tagger.When.Format(DateFormat),
t.Message, objectAsString(obj),
)
}
// Verify performs PGP verification of the tag with a provided armored
// keyring and returns openpgp.Entity associated with verifying key on success.
func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
keyRingReader := strings.NewReader(armoredKeyRing)
keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader)
if err != nil {
return nil, err
}
// Extract signature.
signature := strings.NewReader(t.PGPSignature)
encoded := &plumbing.MemoryObject{}
// Encode tag components, excluding signature and get a reader object.
if err := t.encode(encoded, false); err != nil {
return nil, err
}
er, err := encoded.Reader()
if err != nil {
return nil, err
}
return openpgp.CheckArmoredDetachedSignature(keyring, er, signature)
}
// TagIter provides an iterator for a set of tags.
type TagIter struct {
storer.EncodedObjectIter
s storer.EncodedObjectStorer
}
// NewTagIter takes a storer.EncodedObjectStorer and a
// storer.EncodedObjectIter and returns a *TagIter that iterates over all
// tags contained in the storer.EncodedObjectIter.
//
// Any non-tag object returned by the storer.EncodedObjectIter is skipped.
func NewTagIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TagIter {
return &TagIter{iter, s}
}
// Next moves the iterator to the next tag and returns a pointer to it. If
// there are no more tags, it returns io.EOF.
func (iter *TagIter) Next() (*Tag, error) {
obj, err := iter.EncodedObjectIter.Next()
if err != nil {
return nil, err
}
return DecodeTag(iter.s, obj)
}
// ForEach call the cb function for each tag contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *TagIter) ForEach(cb func(*Tag) error) error {
return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
t, err := DecodeTag(iter.s, obj)
if err != nil {
return err
}
return cb(t)
})
}
func objectAsString(obj Object) string {
switch o := obj.(type) {
case *Commit:
return o.String()
default:
return ""
}
}

511
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go generated vendored Normal file
View File

@ -0,0 +1,511 @@
package object
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"path"
"path/filepath"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
const (
maxTreeDepth = 1024
startingStackSize = 8
)
// New errors defined by this package.
var (
ErrMaxTreeDepth = errors.New("maximum tree depth exceeded")
ErrFileNotFound = errors.New("file not found")
ErrDirectoryNotFound = errors.New("directory not found")
ErrEntryNotFound = errors.New("entry not found")
)
// Tree is basically like a directory - it references a bunch of other trees
// and/or blobs (i.e. files and sub-directories)
type Tree struct {
Entries []TreeEntry
Hash plumbing.Hash
s storer.EncodedObjectStorer
m map[string]*TreeEntry
t map[string]*Tree // tree path cache
}
// GetTree gets a tree from an object storer and decodes it.
func GetTree(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tree, error) {
o, err := s.EncodedObject(plumbing.TreeObject, h)
if err != nil {
return nil, err
}
return DecodeTree(s, o)
}
// DecodeTree decodes an encoded object into a *Tree and associates it to the
// given object storer.
func DecodeTree(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tree, error) {
t := &Tree{s: s}
if err := t.Decode(o); err != nil {
return nil, err
}
return t, nil
}
// TreeEntry represents a file
type TreeEntry struct {
Name string
Mode filemode.FileMode
Hash plumbing.Hash
}
// File returns the hash of the file identified by the `path` argument.
// The path is interpreted as relative to the tree receiver.
func (t *Tree) File(path string) (*File, error) {
e, err := t.FindEntry(path)
if err != nil {
return nil, ErrFileNotFound
}
blob, err := GetBlob(t.s, e.Hash)
if err != nil {
if err == plumbing.ErrObjectNotFound {
return nil, ErrFileNotFound
}
return nil, err
}
return NewFile(path, e.Mode, blob), nil
}
// Size returns the plaintext size of an object, without reading it
// into memory.
func (t *Tree) Size(path string) (int64, error) {
e, err := t.FindEntry(path)
if err != nil {
return 0, ErrEntryNotFound
}
return t.s.EncodedObjectSize(e.Hash)
}
// Tree returns the tree identified by the `path` argument.
// The path is interpreted as relative to the tree receiver.
func (t *Tree) Tree(path string) (*Tree, error) {
e, err := t.FindEntry(path)
if err != nil {
return nil, ErrDirectoryNotFound
}
tree, err := GetTree(t.s, e.Hash)
if err == plumbing.ErrObjectNotFound {
return nil, ErrDirectoryNotFound
}
return tree, err
}
// TreeEntryFile returns the *File for a given *TreeEntry.
func (t *Tree) TreeEntryFile(e *TreeEntry) (*File, error) {
blob, err := GetBlob(t.s, e.Hash)
if err != nil {
return nil, err
}
return NewFile(e.Name, e.Mode, blob), nil
}
// FindEntry search a TreeEntry in this tree or any subtree.
func (t *Tree) FindEntry(path string) (*TreeEntry, error) {
if t.t == nil {
t.t = make(map[string]*Tree)
}
pathParts := strings.Split(path, "/")
startingTree := t
pathCurrent := ""
// search for the longest path in the tree path cache
for i := len(pathParts); i > 1; i-- {
path := filepath.Join(pathParts[:i]...)
tree, ok := t.t[path]
if ok {
startingTree = tree
pathParts = pathParts[i:]
pathCurrent = path
break
}
}
var tree *Tree
var err error
for tree = startingTree; len(pathParts) > 1; pathParts = pathParts[1:] {
if tree, err = tree.dir(pathParts[0]); err != nil {
return nil, err
}
pathCurrent = filepath.Join(pathCurrent, pathParts[0])
t.t[pathCurrent] = tree
}
return tree.entry(pathParts[0])
}
func (t *Tree) dir(baseName string) (*Tree, error) {
entry, err := t.entry(baseName)
if err != nil {
return nil, ErrDirectoryNotFound
}
obj, err := t.s.EncodedObject(plumbing.TreeObject, entry.Hash)
if err != nil {
return nil, err
}
tree := &Tree{s: t.s}
err = tree.Decode(obj)
return tree, err
}
func (t *Tree) entry(baseName string) (*TreeEntry, error) {
if t.m == nil {
t.buildMap()
}
entry, ok := t.m[baseName]
if !ok {
return nil, ErrEntryNotFound
}
return entry, nil
}
// Files returns a FileIter allowing to iterate over the Tree
func (t *Tree) Files() *FileIter {
return NewFileIter(t.s, t)
}
// ID returns the object ID of the tree. The returned value will always match
// the current value of Tree.Hash.
//
// ID is present to fulfill the Object interface.
func (t *Tree) ID() plumbing.Hash {
return t.Hash
}
// Type returns the type of object. It always returns plumbing.TreeObject.
func (t *Tree) Type() plumbing.ObjectType {
return plumbing.TreeObject
}
// Decode transform an plumbing.EncodedObject into a Tree struct
func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
if o.Type() != plumbing.TreeObject {
return ErrUnsupportedObject
}
t.Hash = o.Hash()
if o.Size() == 0 {
return nil
}
t.Entries = nil
t.m = nil
reader, err := o.Reader()
if err != nil {
return err
}
defer ioutil.CheckClose(reader, &err)
r := bufio.NewReader(reader)
for {
str, err := r.ReadString(' ')
if err != nil {
if err == io.EOF {
break
}
return err
}
str = str[:len(str)-1] // strip last byte (' ')
mode, err := filemode.New(str)
if err != nil {
return err
}
name, err := r.ReadString(0)
if err != nil && err != io.EOF {
return err
}
var hash plumbing.Hash
if _, err = io.ReadFull(r, hash[:]); err != nil {
return err
}
baseName := name[:len(name)-1]
t.Entries = append(t.Entries, TreeEntry{
Hash: hash,
Mode: mode,
Name: baseName,
})
}
return nil
}
// Encode transforms a Tree into a plumbing.EncodedObject.
func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
o.SetType(plumbing.TreeObject)
w, err := o.Writer()
if err != nil {
return err
}
defer ioutil.CheckClose(w, &err)
for _, entry := range t.Entries {
if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil {
return err
}
if _, err = w.Write([]byte{0x00}); err != nil {
return err
}
if _, err = w.Write([]byte(entry.Hash[:])); err != nil {
return err
}
}
return err
}
func (t *Tree) buildMap() {
t.m = make(map[string]*TreeEntry)
for i := 0; i < len(t.Entries); i++ {
t.m[t.Entries[i].Name] = &t.Entries[i]
}
}
// Diff returns a list of changes between this tree and the provided one
func (from *Tree) Diff(to *Tree) (Changes, error) {
return DiffTree(from, to)
}
// Diff returns a list of changes between this tree and the provided one
// Error will be returned if context expires
// Provided context must be non nil
func (from *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) {
return DiffTreeContext(ctx, from, to)
}
// Patch returns a slice of Patch objects with all the changes between trees
// in chunks. This representation can be used to create several diff outputs.
func (from *Tree) Patch(to *Tree) (*Patch, error) {
return from.PatchContext(context.Background(), to)
}
// Patch returns a slice of Patch objects with all the changes between trees
// in chunks. This representation can be used to create several diff outputs.
// If context expires, an error will be returned
// Provided context must be non-nil
func (from *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) {
changes, err := DiffTreeContext(ctx, from, to)
if err != nil {
return nil, err
}
return changes.PatchContext(ctx)
}
// treeEntryIter facilitates iterating through the TreeEntry objects in a Tree.
type treeEntryIter struct {
t *Tree
pos int
}
func (iter *treeEntryIter) Next() (TreeEntry, error) {
if iter.pos >= len(iter.t.Entries) {
return TreeEntry{}, io.EOF
}
iter.pos++
return iter.t.Entries[iter.pos-1], nil
}
// TreeWalker provides a means of walking through all of the entries in a Tree.
type TreeWalker struct {
stack []*treeEntryIter
base string
recursive bool
seen map[plumbing.Hash]bool
s storer.EncodedObjectStorer
t *Tree
}
// NewTreeWalker returns a new TreeWalker for the given tree.
//
// It is the caller's responsibility to call Close() when finished with the
// tree walker.
func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWalker {
stack := make([]*treeEntryIter, 0, startingStackSize)
stack = append(stack, &treeEntryIter{t, 0})
return &TreeWalker{
stack: stack,
recursive: recursive,
seen: seen,
s: t.s,
t: t,
}
}
// Next returns the next object from the tree. Objects are returned in order
// and subtrees are included. After the last object has been returned further
// calls to Next() will return io.EOF.
//
// In the current implementation any objects which cannot be found in the
// underlying repository will be skipped automatically. It is possible that this
// may change in future versions.
func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
var obj Object
for {
current := len(w.stack) - 1
if current < 0 {
// Nothing left on the stack so we're finished
err = io.EOF
return
}
if current > maxTreeDepth {
// We're probably following bad data or some self-referencing tree
err = ErrMaxTreeDepth
return
}
entry, err = w.stack[current].Next()
if err == io.EOF {
// Finished with the current tree, move back up to the parent
w.stack = w.stack[:current]
w.base, _ = path.Split(w.base)
w.base = path.Clean(w.base) // Remove trailing slash
continue
}
if err != nil {
return
}
if w.seen[entry.Hash] {
continue
}
if entry.Mode == filemode.Dir {
obj, err = GetTree(w.s, entry.Hash)
}
name = path.Join(w.base, entry.Name)
if err != nil {
err = io.EOF
return
}
break
}
if !w.recursive {
return
}
if t, ok := obj.(*Tree); ok {
w.stack = append(w.stack, &treeEntryIter{t, 0})
w.base = path.Join(w.base, entry.Name)
}
return
}
// Tree returns the tree that the tree walker most recently operated on.
func (w *TreeWalker) Tree() *Tree {
current := len(w.stack) - 1
if w.stack[current].pos == 0 {
current--
}
if current < 0 {
return nil
}
return w.stack[current].t
}
// Close releases any resources used by the TreeWalker.
func (w *TreeWalker) Close() {
w.stack = nil
}
// TreeIter provides an iterator for a set of trees.
type TreeIter struct {
storer.EncodedObjectIter
s storer.EncodedObjectStorer
}
// NewTreeIter takes a storer.EncodedObjectStorer and a
// storer.EncodedObjectIter and returns a *TreeIter that iterates over all
// tree contained in the storer.EncodedObjectIter.
//
// Any non-tree object returned by the storer.EncodedObjectIter is skipped.
func NewTreeIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TreeIter {
return &TreeIter{iter, s}
}
// Next moves the iterator to the next tree and returns a pointer to it. If
// there are no more trees, it returns io.EOF.
func (iter *TreeIter) Next() (*Tree, error) {
for {
obj, err := iter.EncodedObjectIter.Next()
if err != nil {
return nil, err
}
if obj.Type() != plumbing.TreeObject {
continue
}
return DecodeTree(iter.s, obj)
}
}
// ForEach call the cb function for each tree contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *TreeIter) ForEach(cb func(*Tree) error) error {
return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
if obj.Type() != plumbing.TreeObject {
return nil
}
t, err := DecodeTree(iter.s, obj)
if err != nil {
return err
}
return cb(t)
})
}

View File

@ -0,0 +1,136 @@
package object
import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
)
// A treenoder is a helper type that wraps git trees into merkletrie
// noders.
//
// As a merkletrie noder doesn't understand the concept of modes (e.g.
// file permissions), the treenoder includes the mode of the git tree in
// the hash, so changes in the modes will be detected as modifications
// to the file contents by the merkletrie difftree algorithm. This is
// consistent with how the "git diff-tree" command works.
type treeNoder struct {
parent *Tree // the root node is its own parent
name string // empty string for the root node
mode filemode.FileMode
hash plumbing.Hash
children []noder.Noder // memoized
}
// NewTreeRootNode returns the root node of a Tree
func NewTreeRootNode(t *Tree) noder.Noder {
if t == nil {
return &treeNoder{}
}
return &treeNoder{
parent: t,
name: "",
mode: filemode.Dir,
hash: t.Hash,
}
}
func (t *treeNoder) isRoot() bool {
return t.name == ""
}
func (t *treeNoder) String() string {
return "treeNoder <" + t.name + ">"
}
func (t *treeNoder) Hash() []byte {
if t.mode == filemode.Deprecated {
return append(t.hash[:], filemode.Regular.Bytes()...)
}
return append(t.hash[:], t.mode.Bytes()...)
}
func (t *treeNoder) Name() string {
return t.name
}
func (t *treeNoder) IsDir() bool {
return t.mode == filemode.Dir
}
// Children will return the children of a treenoder as treenoders,
// building them from the children of the wrapped git tree.
func (t *treeNoder) Children() ([]noder.Noder, error) {
if t.mode != filemode.Dir {
return noder.NoChildren, nil
}
// children are memoized for efficiency
if t.children != nil {
return t.children, nil
}
// the parent of the returned children will be ourself as a tree if
// we are a not the root treenoder. The root is special as it
// is is own parent.
parent := t.parent
if !t.isRoot() {
var err error
if parent, err = t.parent.Tree(t.name); err != nil {
return nil, err
}
}
return transformChildren(parent)
}
// Returns the children of a tree as treenoders.
// Efficiency is key here.
func transformChildren(t *Tree) ([]noder.Noder, error) {
var err error
var e TreeEntry
// there will be more tree entries than children in the tree,
// due to submodules and empty directories, but I think it is still
// worth it to pre-allocate the whole array now, even if sometimes
// is bigger than needed.
ret := make([]noder.Noder, 0, len(t.Entries))
walker := NewTreeWalker(t, false, nil) // don't recurse
// don't defer walker.Close() for efficiency reasons.
for {
_, e, err = walker.Next()
if err == io.EOF {
break
}
if err != nil {
walker.Close()
return nil, err
}
ret = append(ret, &treeNoder{
parent: t,
name: e.Name,
mode: e.Mode,
hash: e.Hash,
})
}
walker.Close()
return ret, nil
}
// len(t.tree.Entries) != the number of elements walked by treewalker
// for some reason because of empty directories, submodules, etc, so we
// have to walk here.
func (t *treeNoder) NumChildren() (int, error) {
children, err := t.Children()
if err != nil {
return 0, err
}
return len(children), nil
}