feat(dir2config): defaults

This commit is contained in:
Mikaël Cluseau
2019-02-28 19:27:09 +11:00
parent d2b212ae6b
commit ea6fce68e1
383 changed files with 74236 additions and 41 deletions

View File

@ -0,0 +1,98 @@
package cache
import (
"container/list"
"sync"
)
// BufferLRU implements an object cache with an LRU eviction policy and a
// maximum size (measured in object size).
type BufferLRU struct {
MaxSize FileSize
actualSize FileSize
ll *list.List
cache map[int64]*list.Element
mut sync.Mutex
}
// NewBufferLRU creates a new BufferLRU with the given maximum size. The maximum
// size will never be exceeded.
func NewBufferLRU(maxSize FileSize) *BufferLRU {
return &BufferLRU{MaxSize: maxSize}
}
// NewBufferLRUDefault creates a new BufferLRU with the default cache size.
func NewBufferLRUDefault() *BufferLRU {
return &BufferLRU{MaxSize: DefaultMaxSize}
}
type buffer struct {
Key int64
Slice []byte
}
// Put puts a buffer into the cache. If the buffer is already in the cache, it
// will be marked as used. Otherwise, it will be inserted. A buffers might
// be evicted to make room for the new one.
func (c *BufferLRU) Put(key int64, slice []byte) {
c.mut.Lock()
defer c.mut.Unlock()
if c.cache == nil {
c.actualSize = 0
c.cache = make(map[int64]*list.Element, 1000)
c.ll = list.New()
}
bufSize := FileSize(len(slice))
if ee, ok := c.cache[key]; ok {
oldBuf := ee.Value.(buffer)
// in this case bufSize is a delta: new size - old size
bufSize -= FileSize(len(oldBuf.Slice))
c.ll.MoveToFront(ee)
ee.Value = buffer{key, slice}
} else {
if bufSize > c.MaxSize {
return
}
ee := c.ll.PushFront(buffer{key, slice})
c.cache[key] = ee
}
c.actualSize += bufSize
for c.actualSize > c.MaxSize {
last := c.ll.Back()
lastObj := last.Value.(buffer)
lastSize := FileSize(len(lastObj.Slice))
c.ll.Remove(last)
delete(c.cache, lastObj.Key)
c.actualSize -= lastSize
}
}
// Get returns a buffer by its key. It marks the buffer as used. If the buffer
// is not in the cache, (nil, false) will be returned.
func (c *BufferLRU) Get(key int64) ([]byte, bool) {
c.mut.Lock()
defer c.mut.Unlock()
ee, ok := c.cache[key]
if !ok {
return nil, false
}
c.ll.MoveToFront(ee)
return ee.Value.(buffer).Slice, true
}
// Clear the content of this buffer cache.
func (c *BufferLRU) Clear() {
c.mut.Lock()
defer c.mut.Unlock()
c.ll = nil
c.cache = nil
c.actualSize = 0
}

View File

@ -0,0 +1,39 @@
package cache
import "gopkg.in/src-d/go-git.v4/plumbing"
const (
Byte FileSize = 1 << (iota * 10)
KiByte
MiByte
GiByte
)
type FileSize int64
const DefaultMaxSize FileSize = 96 * MiByte
// Object is an interface to a object cache.
type Object interface {
// Put puts the given object into the cache. Whether this object will
// actually be put into the cache or not is implementation specific.
Put(o plumbing.EncodedObject)
// Get gets an object from the cache given its hash. The second return value
// is true if the object was returned, and false otherwise.
Get(k plumbing.Hash) (plumbing.EncodedObject, bool)
// Clear clears every object from the cache.
Clear()
}
// Buffer is an interface to a buffer cache.
type Buffer interface {
// Put puts a buffer into the cache. If the buffer is already in the cache,
// it will be marked as used. Otherwise, it will be inserted. Buffer might
// be evicted to make room for the new one.
Put(key int64, slice []byte)
// Get returns a buffer by its key. It marks the buffer as used. If the
// buffer is not in the cache, (nil, false) will be returned.
Get(key int64) ([]byte, bool)
// Clear clears every object from the cache.
Clear()
}

View File

@ -0,0 +1,96 @@
package cache
import (
"container/list"
"sync"
"gopkg.in/src-d/go-git.v4/plumbing"
)
// ObjectLRU implements an object cache with an LRU eviction policy and a
// maximum size (measured in object size).
type ObjectLRU struct {
MaxSize FileSize
actualSize FileSize
ll *list.List
cache map[interface{}]*list.Element
mut sync.Mutex
}
// NewObjectLRU creates a new ObjectLRU with the given maximum size. The maximum
// size will never be exceeded.
func NewObjectLRU(maxSize FileSize) *ObjectLRU {
return &ObjectLRU{MaxSize: maxSize}
}
// NewObjectLRUDefault creates a new ObjectLRU with the default cache size.
func NewObjectLRUDefault() *ObjectLRU {
return &ObjectLRU{MaxSize: DefaultMaxSize}
}
// Put puts an object into the cache. If the object is already in the cache, it
// will be marked as used. Otherwise, it will be inserted. A single object might
// be evicted to make room for the new object.
func (c *ObjectLRU) Put(obj plumbing.EncodedObject) {
c.mut.Lock()
defer c.mut.Unlock()
if c.cache == nil {
c.actualSize = 0
c.cache = make(map[interface{}]*list.Element, 1000)
c.ll = list.New()
}
objSize := FileSize(obj.Size())
key := obj.Hash()
if ee, ok := c.cache[key]; ok {
oldObj := ee.Value.(plumbing.EncodedObject)
// in this case objSize is a delta: new size - old size
objSize -= FileSize(oldObj.Size())
c.ll.MoveToFront(ee)
ee.Value = obj
} else {
if objSize > c.MaxSize {
return
}
ee := c.ll.PushFront(obj)
c.cache[key] = ee
}
c.actualSize += objSize
for c.actualSize > c.MaxSize {
last := c.ll.Back()
lastObj := last.Value.(plumbing.EncodedObject)
lastSize := FileSize(lastObj.Size())
c.ll.Remove(last)
delete(c.cache, lastObj.Hash())
c.actualSize -= lastSize
}
}
// Get returns an object by its hash. It marks the object as used. If the object
// is not in the cache, (nil, false) will be returned.
func (c *ObjectLRU) Get(k plumbing.Hash) (plumbing.EncodedObject, bool) {
c.mut.Lock()
defer c.mut.Unlock()
ee, ok := c.cache[k]
if !ok {
return nil, false
}
c.ll.MoveToFront(ee)
return ee.Value.(plumbing.EncodedObject), true
}
// Clear the content of this object cache.
func (c *ObjectLRU) Clear() {
c.mut.Lock()
defer c.mut.Unlock()
c.ll = nil
c.cache = nil
c.actualSize = 0
}

35
vendor/gopkg.in/src-d/go-git.v4/plumbing/error.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
package plumbing
import "fmt"
type PermanentError struct {
Err error
}
func NewPermanentError(err error) *PermanentError {
if err == nil {
return nil
}
return &PermanentError{Err: err}
}
func (e *PermanentError) Error() string {
return fmt.Sprintf("permanent client error: %s", e.Err.Error())
}
type UnexpectedError struct {
Err error
}
func NewUnexpectedError(err error) *UnexpectedError {
if err == nil {
return nil
}
return &UnexpectedError{Err: err}
}
func (e *UnexpectedError) Error() string {
return fmt.Sprintf("unexpected client error: %s", e.Err.Error())
}

View File

@ -0,0 +1,188 @@
package filemode
import (
"encoding/binary"
"fmt"
"os"
"strconv"
)
// A FileMode represents the kind of tree entries used by git. It
// resembles regular file systems modes, although FileModes are
// considerably simpler (there are not so many), and there are some,
// like Submodule that has no file system equivalent.
type FileMode uint32
const (
// Empty is used as the FileMode of tree elements when comparing
// trees in the following situations:
//
// - the mode of tree elements before their creation. - the mode of
// tree elements after their deletion. - the mode of unmerged
// elements when checking the index.
//
// Empty has no file system equivalent. As Empty is the zero value
// of FileMode, it is also returned by New and
// NewFromOsNewFromOSFileMode along with an error, when they fail.
Empty FileMode = 0
// Dir represent a Directory.
Dir FileMode = 0040000
// Regular represent non-executable files. Please note this is not
// the same as golang regular files, which include executable files.
Regular FileMode = 0100644
// Deprecated represent non-executable files with the group writable
// bit set. This mode was supported by the first versions of git,
// but it has been deprecatred nowadays. This library uses them
// internally, so you can read old packfiles, but will treat them as
// Regulars when interfacing with the outside world. This is the
// standard git behaviuor.
Deprecated FileMode = 0100664
// Executable represents executable files.
Executable FileMode = 0100755
// Symlink represents symbolic links to files.
Symlink FileMode = 0120000
// Submodule represents git submodules. This mode has no file system
// equivalent.
Submodule FileMode = 0160000
)
// New takes the octal string representation of a FileMode and returns
// the FileMode and a nil error. If the string can not be parsed to a
// 32 bit unsigned octal number, it returns Empty and the parsing error.
//
// Example: "40000" means Dir, "100644" means Regular.
//
// Please note this function does not check if the returned FileMode
// is valid in git or if it is malformed. For instance, "1" will
// return the malformed FileMode(1) and a nil error.
func New(s string) (FileMode, error) {
n, err := strconv.ParseUint(s, 8, 32)
if err != nil {
return Empty, err
}
return FileMode(n), nil
}
// NewFromOSFileMode returns the FileMode used by git to represent
// the provided file system modes and a nil error on success. If the
// file system mode cannot be mapped to any valid git mode (as with
// sockets or named pipes), it will return Empty and an error.
//
// Note that some git modes cannot be generated from os.FileModes, like
// Deprecated and Submodule; while Empty will be returned, along with an
// error, only when the method fails.
func NewFromOSFileMode(m os.FileMode) (FileMode, error) {
if m.IsRegular() {
if isSetTemporary(m) {
return Empty, fmt.Errorf("no equivalent git mode for %s", m)
}
if isSetCharDevice(m) {
return Empty, fmt.Errorf("no equivalent git mode for %s", m)
}
if isSetUserExecutable(m) {
return Executable, nil
}
return Regular, nil
}
if m.IsDir() {
return Dir, nil
}
if isSetSymLink(m) {
return Symlink, nil
}
return Empty, fmt.Errorf("no equivalent git mode for %s", m)
}
func isSetCharDevice(m os.FileMode) bool {
return m&os.ModeCharDevice != 0
}
func isSetTemporary(m os.FileMode) bool {
return m&os.ModeTemporary != 0
}
func isSetUserExecutable(m os.FileMode) bool {
return m&0100 != 0
}
func isSetSymLink(m os.FileMode) bool {
return m&os.ModeSymlink != 0
}
// Bytes return a slice of 4 bytes with the mode in little endian
// encoding.
func (m FileMode) Bytes() []byte {
ret := make([]byte, 4)
binary.LittleEndian.PutUint32(ret, uint32(m))
return ret[:]
}
// IsMalformed returns if the FileMode should not appear in a git packfile,
// this is: Empty and any other mode not mentioned as a constant in this
// package.
func (m FileMode) IsMalformed() bool {
return m != Dir &&
m != Regular &&
m != Deprecated &&
m != Executable &&
m != Symlink &&
m != Submodule
}
// String returns the FileMode as a string in the standatd git format,
// this is, an octal number padded with ceros to 7 digits. Malformed
// modes are printed in that same format, for easier debugging.
//
// Example: Regular is "0100644", Empty is "0000000".
func (m FileMode) String() string {
return fmt.Sprintf("%07o", uint32(m))
}
// IsRegular returns if the FileMode represents that of a regular file,
// this is, either Regular or Deprecated. Please note that Executable
// are not regular even though in the UNIX tradition, they usually are:
// See the IsFile method.
func (m FileMode) IsRegular() bool {
return m == Regular ||
m == Deprecated
}
// IsFile returns if the FileMode represents that of a file, this is,
// Regular, Deprecated, Excutable or Link.
func (m FileMode) IsFile() bool {
return m == Regular ||
m == Deprecated ||
m == Executable ||
m == Symlink
}
// ToOSFileMode returns the os.FileMode to be used when creating file
// system elements with the given git mode and a nil error on success.
//
// When the provided mode cannot be mapped to a valid file system mode
// (e.g. Submodule) it returns os.FileMode(0) and an error.
//
// The returned file mode does not take into account the umask.
func (m FileMode) ToOSFileMode() (os.FileMode, error) {
switch m {
case Dir:
return os.ModePerm | os.ModeDir, nil
case Submodule:
return os.ModePerm | os.ModeDir, nil
case Regular:
return os.FileMode(0644), nil
// Deprecated is no longer allowed: treated as a Regular instead
case Deprecated:
return os.FileMode(0644), nil
case Executable:
return os.FileMode(0755), nil
case Symlink:
return os.ModePerm | os.ModeSymlink, nil
}
return os.FileMode(0), fmt.Errorf("malformed mode (%s)", m)
}

View File

@ -0,0 +1,99 @@
package config
// New creates a new config instance.
func New() *Config {
return &Config{}
}
// Config contains all the sections, comments and includes from a config file.
type Config struct {
Comment *Comment
Sections Sections
Includes Includes
}
// Includes is a list of Includes in a config file.
type Includes []*Include
// Include is a reference to an included config file.
type Include struct {
Path string
Config *Config
}
// Comment string without the prefix '#' or ';'.
type Comment string
const (
// NoSubsection token is passed to Config.Section and Config.SetSection to
// represent the absence of a section.
NoSubsection = ""
)
// Section returns a existing section with the given name or creates a new one.
func (c *Config) Section(name string) *Section {
for i := len(c.Sections) - 1; i >= 0; i-- {
s := c.Sections[i]
if s.IsName(name) {
return s
}
}
s := &Section{Name: name}
c.Sections = append(c.Sections, s)
return s
}
// AddOption adds an option to a given section and subsection. Use the
// NoSubsection constant for the subsection argument if no subsection is wanted.
func (c *Config) AddOption(section string, subsection string, key string, value string) *Config {
if subsection == "" {
c.Section(section).AddOption(key, value)
} else {
c.Section(section).Subsection(subsection).AddOption(key, value)
}
return c
}
// SetOption sets an option to a given section and subsection. Use the
// NoSubsection constant for the subsection argument if no subsection is wanted.
func (c *Config) SetOption(section string, subsection string, key string, value string) *Config {
if subsection == "" {
c.Section(section).SetOption(key, value)
} else {
c.Section(section).Subsection(subsection).SetOption(key, value)
}
return c
}
// RemoveSection removes a section from a config file.
func (c *Config) RemoveSection(name string) *Config {
result := Sections{}
for _, s := range c.Sections {
if !s.IsName(name) {
result = append(result, s)
}
}
c.Sections = result
return c
}
// RemoveSubsection remove s a subsection from a config file.
func (c *Config) RemoveSubsection(section string, subsection string) *Config {
for _, s := range c.Sections {
if s.IsName(section) {
result := Subsections{}
for _, ss := range s.Subsections {
if !ss.IsName(subsection) {
result = append(result, ss)
}
}
s.Subsections = result
}
}
return c
}

View File

@ -0,0 +1,37 @@
package config
import (
"io"
"github.com/src-d/gcfg"
)
// A Decoder reads and decodes config files from an input stream.
type Decoder struct {
io.Reader
}
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r}
}
// Decode reads the whole config from its input and stores it in the
// value pointed to by config.
func (d *Decoder) Decode(config *Config) error {
cb := func(s string, ss string, k string, v string, bv bool) error {
if ss == "" && k == "" {
config.Section(s)
return nil
}
if ss != "" && k == "" {
config.Section(s).Subsection(ss)
return nil
}
config.AddOption(s, ss, k, v)
return nil
}
return gcfg.ReadWithCallback(d, cb)
}

View File

@ -0,0 +1,122 @@
// Package config implements encoding and decoding of git config files.
//
// Configuration File
// ------------------
//
// The Git configuration file contains a number of variables that affect
// the Git commands' behavior. The `.git/config` file in each repository
// is used to store the configuration for that repository, and
// `$HOME/.gitconfig` is used to store a per-user configuration as
// fallback values for the `.git/config` file. The file `/etc/gitconfig`
// can be used to store a system-wide default configuration.
//
// The configuration variables are used by both the Git plumbing
// and the porcelains. The variables are divided into sections, wherein
// the fully qualified variable name of the variable itself is the last
// dot-separated segment and the section name is everything before the last
// dot. The variable names are case-insensitive, allow only alphanumeric
// characters and `-`, and must start with an alphabetic character. Some
// variables may appear multiple times; we say then that the variable is
// multivalued.
//
// Syntax
// ~~~~~~
//
// The syntax is fairly flexible and permissive; whitespaces are mostly
// ignored. The '#' and ';' characters begin comments to the end of line,
// blank lines are ignored.
//
// The file consists of sections and variables. A section begins with
// the name of the section in square brackets and continues until the next
// section begins. Section names are case-insensitive. Only alphanumeric
// characters, `-` and `.` are allowed in section names. Each variable
// must belong to some section, which means that there must be a section
// header before the first setting of a variable.
//
// Sections can be further divided into subsections. To begin a subsection
// put its name in double quotes, separated by space from the section name,
// in the section header, like in the example below:
//
// --------
// [section "subsection"]
//
// --------
//
// Subsection names are case sensitive and can contain any characters except
// newline (doublequote `"` and backslash can be included by escaping them
// as `\"` and `\\`, respectively). Section headers cannot span multiple
// lines. Variables may belong directly to a section or to a given subsection.
// You can have `[section]` if you have `[section "subsection"]`, but you
// don't need to.
//
// There is also a deprecated `[section.subsection]` syntax. With this
// syntax, the subsection name is converted to lower-case and is also
// compared case sensitively. These subsection names follow the same
// restrictions as section names.
//
// All the other lines (and the remainder of the line after the section
// header) are recognized as setting variables, in the form
// 'name = value' (or just 'name', which is a short-hand to say that
// the variable is the boolean "true").
// The variable names are case-insensitive, allow only alphanumeric characters
// and `-`, and must start with an alphabetic character.
//
// A line that defines a value can be continued to the next line by
// ending it with a `\`; the backquote and the end-of-line are
// stripped. Leading whitespaces after 'name =', the remainder of the
// line after the first comment character '#' or ';', and trailing
// whitespaces of the line are discarded unless they are enclosed in
// double quotes. Internal whitespaces within the value are retained
// verbatim.
//
// Inside double quotes, double quote `"` and backslash `\` characters
// must be escaped: use `\"` for `"` and `\\` for `\`.
//
// The following escape sequences (beside `\"` and `\\`) are recognized:
// `\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB)
// and `\b` for backspace (BS). Other char escape sequences (including octal
// escape sequences) are invalid.
//
// Includes
// ~~~~~~~~
//
// You can include one config file from another by setting the special
// `include.path` variable to the name of the file to be included. The
// variable takes a pathname as its value, and is subject to tilde
// expansion.
//
// The included file is expanded immediately, as if its contents had been
// found at the location of the include directive. If the value of the
// `include.path` variable is a relative path, the path is considered to be
// relative to the configuration file in which the include directive was
// found. See below for examples.
//
//
// Example
// ~~~~~~~
//
// # Core variables
// [core]
// ; Don't trust file modes
// filemode = false
//
// # Our diff algorithm
// [diff]
// external = /usr/local/bin/diff-wrapper
// renames = true
//
// [branch "devel"]
// remote = origin
// merge = refs/heads/devel
//
// # Proxy settings
// [core]
// gitProxy="ssh" for "kernel.org"
// gitProxy=default-proxy ; for the rest
//
// [include]
// path = /path/to/foo.inc ; include by absolute path
// path = foo ; expand "foo" relative to the current file
// path = ~/foo ; expand "foo" in your `$HOME` directory
//
package config

View File

@ -0,0 +1,77 @@
package config
import (
"fmt"
"io"
"strings"
)
// An Encoder writes config files to an output stream.
type Encoder struct {
w io.Writer
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w}
}
// Encode writes the config in git config format to the stream of the encoder.
func (e *Encoder) Encode(cfg *Config) error {
for _, s := range cfg.Sections {
if err := e.encodeSection(s); err != nil {
return err
}
}
return nil
}
func (e *Encoder) encodeSection(s *Section) error {
if len(s.Options) > 0 {
if err := e.printf("[%s]\n", s.Name); err != nil {
return err
}
if err := e.encodeOptions(s.Options); err != nil {
return err
}
}
for _, ss := range s.Subsections {
if err := e.encodeSubsection(s.Name, ss); err != nil {
return err
}
}
return nil
}
func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
//TODO: escape
if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil {
return err
}
return e.encodeOptions(s.Options)
}
func (e *Encoder) encodeOptions(opts Options) error {
for _, o := range opts {
pattern := "\t%s = %s\n"
if strings.Contains(o.Value, "\\") {
pattern = "\t%s = %q\n"
}
if err := e.printf(pattern, o.Key, o.Value); err != nil {
return err
}
}
return nil
}
func (e *Encoder) printf(msg string, args ...interface{}) error {
_, err := fmt.Fprintf(e.w, msg, args...)
return err
}

View File

@ -0,0 +1,117 @@
package config
import (
"fmt"
"strings"
)
// Option defines a key/value entity in a config file.
type Option struct {
// Key preserving original caseness.
// Use IsKey instead to compare key regardless of caseness.
Key string
// Original value as string, could be not normalized.
Value string
}
type Options []*Option
// IsKey returns true if the given key matches
// this option's key in a case-insensitive comparison.
func (o *Option) IsKey(key string) bool {
return strings.ToLower(o.Key) == strings.ToLower(key)
}
func (opts Options) GoString() string {
var strs []string
for _, opt := range opts {
strs = append(strs, fmt.Sprintf("%#v", opt))
}
return strings.Join(strs, ", ")
}
// Get gets the value for the given key if set,
// otherwise it returns the empty string.
//
// Note that there is no difference
//
// This matches git behaviour since git v1.8.1-rc1,
// if there are multiple definitions of a key, the
// last one wins.
//
// See: http://article.gmane.org/gmane.linux.kernel/1407184
//
// In order to get all possible values for the same key,
// use GetAll.
func (opts Options) Get(key string) string {
for i := len(opts) - 1; i >= 0; i-- {
o := opts[i]
if o.IsKey(key) {
return o.Value
}
}
return ""
}
// GetAll returns all possible values for the same key.
func (opts Options) GetAll(key string) []string {
result := []string{}
for _, o := range opts {
if o.IsKey(key) {
result = append(result, o.Value)
}
}
return result
}
func (opts Options) withoutOption(key string) Options {
result := Options{}
for _, o := range opts {
if !o.IsKey(key) {
result = append(result, o)
}
}
return result
}
func (opts Options) withAddedOption(key string, value string) Options {
return append(opts, &Option{key, value})
}
func (opts Options) withSettedOption(key string, values ...string) Options {
var result Options
var added []string
for _, o := range opts {
if !o.IsKey(key) {
result = append(result, o)
continue
}
if contains(values, o.Value) {
added = append(added, o.Value)
result = append(result, o)
continue
}
}
for _, value := range values {
if contains(added, value) {
continue
}
result = result.withAddedOption(key, value)
}
return result
}
func contains(haystack []string, needle string) bool {
for _, s := range haystack {
if s == needle {
return true
}
}
return false
}

View File

@ -0,0 +1,146 @@
package config
import (
"fmt"
"strings"
)
// Section is the representation of a section inside git configuration files.
// Each Section contains Options that are used by both the Git plumbing
// and the porcelains.
// Sections can be further divided into subsections. To begin a subsection
// put its name in double quotes, separated by space from the section name,
// in the section header, like in the example below:
//
// [section "subsection"]
//
// All the other lines (and the remainder of the line after the section header)
// are recognized as option variables, in the form "name = value" (or just name,
// which is a short-hand to say that the variable is the boolean "true").
// The variable names are case-insensitive, allow only alphanumeric characters
// and -, and must start with an alphabetic character:
//
// [section "subsection1"]
// option1 = value1
// option2
// [section "subsection2"]
// option3 = value2
//
type Section struct {
Name string
Options Options
Subsections Subsections
}
type Subsection struct {
Name string
Options Options
}
type Sections []*Section
func (s Sections) GoString() string {
var strs []string
for _, ss := range s {
strs = append(strs, fmt.Sprintf("%#v", ss))
}
return strings.Join(strs, ", ")
}
type Subsections []*Subsection
func (s Subsections) GoString() string {
var strs []string
for _, ss := range s {
strs = append(strs, fmt.Sprintf("%#v", ss))
}
return strings.Join(strs, ", ")
}
// IsName checks if the name provided is equals to the Section name, case insensitive.
func (s *Section) IsName(name string) bool {
return strings.ToLower(s.Name) == strings.ToLower(name)
}
// Option return the value for the specified key. Empty string is returned if
// key does not exists.
func (s *Section) Option(key string) string {
return s.Options.Get(key)
}
// AddOption adds a new Option to the Section. The updated Section is returned.
func (s *Section) AddOption(key string, value string) *Section {
s.Options = s.Options.withAddedOption(key, value)
return s
}
// SetOption adds a new Option to the Section. If the option already exists, is replaced.
// The updated Section is returned.
func (s *Section) SetOption(key string, value string) *Section {
s.Options = s.Options.withSettedOption(key, value)
return s
}
// Remove an option with the specified key. The updated Section is returned.
func (s *Section) RemoveOption(key string) *Section {
s.Options = s.Options.withoutOption(key)
return s
}
// Subsection returns a Subsection from the specified Section. If the
// Subsection does not exists, new one is created and added to Section.
func (s *Section) Subsection(name string) *Subsection {
for i := len(s.Subsections) - 1; i >= 0; i-- {
ss := s.Subsections[i]
if ss.IsName(name) {
return ss
}
}
ss := &Subsection{Name: name}
s.Subsections = append(s.Subsections, ss)
return ss
}
// HasSubsection checks if the Section has a Subsection with the specified name.
func (s *Section) HasSubsection(name string) bool {
for _, ss := range s.Subsections {
if ss.IsName(name) {
return true
}
}
return false
}
// IsName checks if the name of the subsection is exactly the specified name.
func (s *Subsection) IsName(name string) bool {
return s.Name == name
}
// Option returns an option with the specified key. If the option does not exists,
// empty spring will be returned.
func (s *Subsection) Option(key string) string {
return s.Options.Get(key)
}
// AddOption adds a new Option to the Subsection. The updated Subsection is returned.
func (s *Subsection) AddOption(key string, value string) *Subsection {
s.Options = s.Options.withAddedOption(key, value)
return s
}
// SetOption adds a new Option to the Subsection. If the option already exists, is replaced.
// The updated Subsection is returned.
func (s *Subsection) SetOption(key string, value ...string) *Subsection {
s.Options = s.Options.withSettedOption(key, value...)
return s
}
// RemoveOption removes the option with the specified key. The updated Subsection is returned.
func (s *Subsection) RemoveOption(key string) *Subsection {
s.Options = s.Options.withoutOption(key)
return s
}

View File

@ -0,0 +1,58 @@
package diff
import (
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
)
// Operation defines the operation of a diff item.
type Operation int
const (
// Equal item represents a equals diff.
Equal Operation = iota
// Add item represents an insert diff.
Add
// Delete item represents a delete diff.
Delete
)
// Patch represents a collection of steps to transform several files.
type Patch interface {
// FilePatches returns a slice of patches per file.
FilePatches() []FilePatch
// Message returns an optional message that can be at the top of the
// Patch representation.
Message() string
}
// FilePatch represents the necessary steps to transform one file to another.
type FilePatch interface {
// IsBinary returns true if this patch is representing a binary file.
IsBinary() bool
// Files returns the from and to Files, with all the necessary metadata to
// about them. If the patch creates a new file, "from" will be nil.
// If the patch deletes a file, "to" will be nil.
Files() (from, to File)
// Chunks returns a slice of ordered changes to transform "from" File to
// "to" File. If the file is a binary one, Chunks will be empty.
Chunks() []Chunk
}
// File contains all the file metadata necessary to print some patch formats.
type File interface {
// Hash returns the File Hash.
Hash() plumbing.Hash
// Mode returns the FileMode.
Mode() filemode.FileMode
// Path returns the complete Path to the file, including the filename.
Path() string
}
// Chunk represents a portion of a file transformation to another.
type Chunk interface {
// Content contains the portion of the file.
Content() string
// Type contains the Operation to do with this Chunk.
Type() Operation
}

View File

@ -0,0 +1,360 @@
package diff
import (
"bytes"
"fmt"
"io"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
)
const (
diffInit = "diff --git a/%s b/%s\n"
chunkStart = "@@ -"
chunkMiddle = " +"
chunkEnd = " @@%s\n"
chunkCount = "%d,%d"
noFilePath = "/dev/null"
aDir = "a/"
bDir = "b/"
fPath = "--- %s\n"
tPath = "+++ %s\n"
binary = "Binary files %s and %s differ\n"
addLine = "+%s\n"
deleteLine = "-%s\n"
equalLine = " %s\n"
oldMode = "old mode %o\n"
newMode = "new mode %o\n"
deletedFileMode = "deleted file mode %o\n"
newFileMode = "new file mode %o\n"
renameFrom = "from"
renameTo = "to"
renameFileMode = "rename %s %s\n"
indexAndMode = "index %s..%s %o\n"
indexNoMode = "index %s..%s\n"
DefaultContextLines = 3
)
// UnifiedEncoder encodes an unified diff into the provided Writer.
// There are some unsupported features:
// - Similarity index for renames
// - Sort hash representation
type UnifiedEncoder struct {
io.Writer
// ctxLines is the count of unchanged lines that will appear
// surrounding a change.
ctxLines int
buf bytes.Buffer
}
func NewUnifiedEncoder(w io.Writer, ctxLines int) *UnifiedEncoder {
return &UnifiedEncoder{ctxLines: ctxLines, Writer: w}
}
func (e *UnifiedEncoder) Encode(patch Patch) error {
e.printMessage(patch.Message())
if err := e.encodeFilePatch(patch.FilePatches()); err != nil {
return err
}
_, err := e.buf.WriteTo(e)
return err
}
func (e *UnifiedEncoder) encodeFilePatch(filePatches []FilePatch) error {
for _, p := range filePatches {
f, t := p.Files()
if err := e.header(f, t, p.IsBinary()); err != nil {
return err
}
g := newHunksGenerator(p.Chunks(), e.ctxLines)
for _, c := range g.Generate() {
c.WriteTo(&e.buf)
}
}
return nil
}
func (e *UnifiedEncoder) printMessage(message string) {
isEmpty := message == ""
hasSuffix := strings.HasSuffix(message, "\n")
if !isEmpty && !hasSuffix {
message = message + "\n"
}
e.buf.WriteString(message)
}
func (e *UnifiedEncoder) header(from, to File, isBinary bool) error {
switch {
case from == nil && to == nil:
return nil
case from != nil && to != nil:
hashEquals := from.Hash() == to.Hash()
fmt.Fprintf(&e.buf, diffInit, from.Path(), to.Path())
if from.Mode() != to.Mode() {
fmt.Fprintf(&e.buf, oldMode+newMode, from.Mode(), to.Mode())
}
if from.Path() != to.Path() {
fmt.Fprintf(&e.buf,
renameFileMode+renameFileMode,
renameFrom, from.Path(), renameTo, to.Path())
}
if from.Mode() != to.Mode() && !hashEquals {
fmt.Fprintf(&e.buf, indexNoMode, from.Hash(), to.Hash())
} else if !hashEquals {
fmt.Fprintf(&e.buf, indexAndMode, from.Hash(), to.Hash(), from.Mode())
}
if !hashEquals {
e.pathLines(isBinary, aDir+from.Path(), bDir+to.Path())
}
case from == nil:
fmt.Fprintf(&e.buf, diffInit, to.Path(), to.Path())
fmt.Fprintf(&e.buf, newFileMode, to.Mode())
fmt.Fprintf(&e.buf, indexNoMode, plumbing.ZeroHash, to.Hash())
e.pathLines(isBinary, noFilePath, bDir+to.Path())
case to == nil:
fmt.Fprintf(&e.buf, diffInit, from.Path(), from.Path())
fmt.Fprintf(&e.buf, deletedFileMode, from.Mode())
fmt.Fprintf(&e.buf, indexNoMode, from.Hash(), plumbing.ZeroHash)
e.pathLines(isBinary, aDir+from.Path(), noFilePath)
}
return nil
}
func (e *UnifiedEncoder) pathLines(isBinary bool, fromPath, toPath string) {
format := fPath + tPath
if isBinary {
format = binary
}
fmt.Fprintf(&e.buf, format, fromPath, toPath)
}
type hunksGenerator struct {
fromLine, toLine int
ctxLines int
chunks []Chunk
current *hunk
hunks []*hunk
beforeContext, afterContext []string
}
func newHunksGenerator(chunks []Chunk, ctxLines int) *hunksGenerator {
return &hunksGenerator{
chunks: chunks,
ctxLines: ctxLines,
}
}
func (c *hunksGenerator) Generate() []*hunk {
for i, chunk := range c.chunks {
ls := splitLines(chunk.Content())
lsLen := len(ls)
switch chunk.Type() {
case Equal:
c.fromLine += lsLen
c.toLine += lsLen
c.processEqualsLines(ls, i)
case Delete:
if lsLen != 0 {
c.fromLine++
}
c.processHunk(i, chunk.Type())
c.fromLine += lsLen - 1
c.current.AddOp(chunk.Type(), ls...)
case Add:
if lsLen != 0 {
c.toLine++
}
c.processHunk(i, chunk.Type())
c.toLine += lsLen - 1
c.current.AddOp(chunk.Type(), ls...)
}
if i == len(c.chunks)-1 && c.current != nil {
c.hunks = append(c.hunks, c.current)
}
}
return c.hunks
}
func (c *hunksGenerator) processHunk(i int, op Operation) {
if c.current != nil {
return
}
var ctxPrefix string
linesBefore := len(c.beforeContext)
if linesBefore > c.ctxLines {
ctxPrefix = " " + c.beforeContext[linesBefore-c.ctxLines-1]
c.beforeContext = c.beforeContext[linesBefore-c.ctxLines:]
linesBefore = c.ctxLines
}
c.current = &hunk{ctxPrefix: ctxPrefix}
c.current.AddOp(Equal, c.beforeContext...)
switch op {
case Delete:
c.current.fromLine, c.current.toLine =
c.addLineNumbers(c.fromLine, c.toLine, linesBefore, i, Add)
case Add:
c.current.toLine, c.current.fromLine =
c.addLineNumbers(c.toLine, c.fromLine, linesBefore, i, Delete)
}
c.beforeContext = nil
}
// addLineNumbers obtains the line numbers in a new chunk
func (c *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op Operation) (cla, clb int) {
cla = la - linesBefore
// we need to search for a reference for the next diff
switch {
case linesBefore != 0 && c.ctxLines != 0:
if lb > c.ctxLines {
clb = lb - c.ctxLines + 1
} else {
clb = 1
}
case c.ctxLines == 0:
clb = lb
case i != len(c.chunks)-1:
next := c.chunks[i+1]
if next.Type() == op || next.Type() == Equal {
// this diff will be into this chunk
clb = lb + 1
}
}
return
}
func (c *hunksGenerator) processEqualsLines(ls []string, i int) {
if c.current == nil {
c.beforeContext = append(c.beforeContext, ls...)
return
}
c.afterContext = append(c.afterContext, ls...)
if len(c.afterContext) <= c.ctxLines*2 && i != len(c.chunks)-1 {
c.current.AddOp(Equal, c.afterContext...)
c.afterContext = nil
} else {
ctxLines := c.ctxLines
if ctxLines > len(c.afterContext) {
ctxLines = len(c.afterContext)
}
c.current.AddOp(Equal, c.afterContext[:ctxLines]...)
c.hunks = append(c.hunks, c.current)
c.current = nil
c.beforeContext = c.afterContext[ctxLines:]
c.afterContext = nil
}
}
func splitLines(s string) []string {
out := strings.Split(s, "\n")
if out[len(out)-1] == "" {
out = out[:len(out)-1]
}
return out
}
type hunk struct {
fromLine int
toLine int
fromCount int
toCount int
ctxPrefix string
ops []*op
}
func (c *hunk) WriteTo(buf *bytes.Buffer) {
buf.WriteString(chunkStart)
if c.fromCount == 1 {
fmt.Fprintf(buf, "%d", c.fromLine)
} else {
fmt.Fprintf(buf, chunkCount, c.fromLine, c.fromCount)
}
buf.WriteString(chunkMiddle)
if c.toCount == 1 {
fmt.Fprintf(buf, "%d", c.toLine)
} else {
fmt.Fprintf(buf, chunkCount, c.toLine, c.toCount)
}
fmt.Fprintf(buf, chunkEnd, c.ctxPrefix)
for _, d := range c.ops {
buf.WriteString(d.String())
}
}
func (c *hunk) AddOp(t Operation, s ...string) {
ls := len(s)
switch t {
case Add:
c.toCount += ls
case Delete:
c.fromCount += ls
case Equal:
c.toCount += ls
c.fromCount += ls
}
for _, l := range s {
c.ops = append(c.ops, &op{l, t})
}
}
type op struct {
text string
t Operation
}
func (o *op) String() string {
var prefix string
switch o.t {
case Add:
prefix = addLine
case Delete:
prefix = deleteLine
case Equal:
prefix = equalLine
}
return fmt.Sprintf(prefix, o.text)
}

View File

@ -0,0 +1,136 @@
package gitignore
import (
"bytes"
"io/ioutil"
"os"
"os/user"
"strings"
"gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-git.v4/plumbing/format/config"
gioutil "gopkg.in/src-d/go-git.v4/utils/ioutil"
)
const (
commentPrefix = "#"
coreSection = "core"
eol = "\n"
excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
gitconfigFile = ".gitconfig"
systemFile = "/etc/gitconfig"
)
// readIgnoreFile reads a specific git ignore file.
func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) {
f, err := fs.Open(fs.Join(append(path, ignoreFile)...))
if err == nil {
defer f.Close()
if data, err := ioutil.ReadAll(f); err == nil {
for _, s := range strings.Split(string(data), eol) {
if !strings.HasPrefix(s, commentPrefix) && len(strings.TrimSpace(s)) > 0 {
ps = append(ps, ParsePattern(s, path))
}
}
}
} else if !os.IsNotExist(err) {
return nil, err
}
return
}
// ReadPatterns reads gitignore patterns recursively traversing through the directory
// structure. The result is in the ascending order of priority (last higher).
func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) {
ps, _ = readIgnoreFile(fs, path, gitignoreFile)
var fis []os.FileInfo
fis, err = fs.ReadDir(fs.Join(path...))
if err != nil {
return
}
for _, fi := range fis {
if fi.IsDir() && fi.Name() != gitDir {
var subps []Pattern
subps, err = ReadPatterns(fs, append(path, fi.Name()))
if err != nil {
return
}
if len(subps) > 0 {
ps = append(ps, subps...)
}
}
}
return
}
func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) {
f, err := fs.Open(path)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
defer gioutil.CheckClose(f, &err)
b, err := ioutil.ReadAll(f)
if err != nil {
return
}
d := config.NewDecoder(bytes.NewBuffer(b))
raw := config.New()
if err = d.Decode(raw); err != nil {
return
}
s := raw.Section(coreSection)
efo := s.Options.Get(excludesfile)
if efo == "" {
return nil, nil
}
ps, err = readIgnoreFile(fs, nil, efo)
if os.IsNotExist(err) {
return nil, nil
}
return
}
// LoadGlobalPatterns loads gitignore patterns from from the gitignore file
// declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not
// exist the function will return nil. If the core.excludesfile property
// is not declared, the function will return nil. If the file pointed to by
// the core.excludesfile property does not exist, the function will return nil.
//
// The function assumes fs is rooted at the root filesystem.
func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) {
usr, err := user.Current()
if err != nil {
return
}
return loadPatterns(fs, fs.Join(usr.HomeDir, gitconfigFile))
}
// LoadSystemPatterns loads gitignore patterns from from the gitignore file
// declared in a system's /etc/gitconfig file. If the ~/.gitconfig file does
// not exist the function will return nil. If the core.excludesfile property
// is not declared, the function will return nil. If the file pointed to by
// the core.excludesfile property does not exist, the function will return nil.
//
// The function assumes fs is rooted at the root filesystem.
func LoadSystemPatterns(fs billy.Filesystem) (ps []Pattern, err error) {
return loadPatterns(fs, systemFile)
}

View File

@ -0,0 +1,70 @@
// Package gitignore implements matching file system paths to gitignore patterns that
// can be automatically read from a git repository tree in the order of definition
// priorities. It support all pattern formats as specified in the original gitignore
// documentation, copied below:
//
// Pattern format
// ==============
//
// - A blank line matches no files, so it can serve as a separator for readability.
//
// - A line starting with # serves as a comment. Put a backslash ("\") in front of
// the first hash for patterns that begin with a hash.
//
// - Trailing spaces are ignored unless they are quoted with backslash ("\").
//
// - An optional prefix "!" which negates the pattern; any matching file excluded
// by a previous pattern will become included again. It is not possible to
// re-include a file if a parent directory of that file is excluded.
// Git doesnt list excluded directories for performance reasons, so
// any patterns on contained files have no effect, no matter where they are
// defined. Put a backslash ("\") in front of the first "!" for patterns
// that begin with a literal "!", for example, "\!important!.txt".
//
// - If the pattern ends with a slash, it is removed for the purpose of the
// following description, but it would only find a match with a directory.
// In other words, foo/ will match a directory foo and paths underneath it,
// but will not match a regular file or a symbolic link foo (this is consistent
// with the way how pathspec works in general in Git).
//
// - If the pattern does not contain a slash /, Git treats it as a shell glob
// pattern and checks for a match against the pathname relative to the location
// of the .gitignore file (relative to the toplevel of the work tree if not
// from a .gitignore file).
//
// - Otherwise, Git treats the pattern as a shell glob suitable for consumption
// by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will
// not match a / in the pathname. For example, "Documentation/*.html" matches
// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or
// "tools/perf/Documentation/perf.html".
//
// - A leading slash matches the beginning of the pathname. For example,
// "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c".
//
// Two consecutive asterisks ("**") in patterns matched against full pathname
// may have special meaning:
//
// - A leading "**" followed by a slash means match in all directories.
// For example, "**/foo" matches file or directory "foo" anywhere, the same as
// pattern "foo". "**/foo/bar" matches file or directory "bar"
// anywhere that is directly under directory "foo".
//
// - A trailing "/**" matches everything inside. For example, "abc/**" matches
// all files inside directory "abc", relative to the location of the
// .gitignore file, with infinite depth.
//
// - A slash followed by two consecutive asterisks then a slash matches
// zero or more directories. For example, "a/**/b" matches "a/b", "a/x/b",
// "a/x/y/b" and so on.
//
// - Other consecutive asterisks are considered invalid.
//
// Copyright and license
// =====================
//
// Copyright (c) Oleg Sklyar, Silvertern and source{d}
//
// The package code was donated to source{d} to include, modify and develop
// further as a part of the `go-git` project, release it on the license of
// the whole project or delete it from the project.
package gitignore

View File

@ -0,0 +1,30 @@
package gitignore
// Matcher defines a global multi-pattern matcher for gitignore patterns
type Matcher interface {
// Match matches patterns in the order of priorities. As soon as an inclusion or
// exclusion is found, not further matching is performed.
Match(path []string, isDir bool) bool
}
// NewMatcher constructs a new global matcher. Patterns must be given in the order of
// increasing priority. That is most generic settings files first, then the content of
// the repo .gitignore, then content of .gitignore down the path or the repo and then
// the content command line arguments.
func NewMatcher(ps []Pattern) Matcher {
return &matcher{ps}
}
type matcher struct {
patterns []Pattern
}
func (m *matcher) Match(path []string, isDir bool) bool {
n := len(m.patterns)
for i := n - 1; i >= 0; i-- {
if match := m.patterns[i].Match(path, isDir); match > NoMatch {
return match == Exclude
}
}
return false
}

View File

@ -0,0 +1,153 @@
package gitignore
import (
"path/filepath"
"strings"
)
// MatchResult defines outcomes of a match, no match, exclusion or inclusion.
type MatchResult int
const (
// NoMatch defines the no match outcome of a match check
NoMatch MatchResult = iota
// Exclude defines an exclusion of a file as a result of a match check
Exclude
// Include defines an explicit inclusion of a file as a result of a match check
Include
)
const (
inclusionPrefix = "!"
zeroToManyDirs = "**"
patternDirSep = "/"
)
// Pattern defines a single gitignore pattern.
type Pattern interface {
// Match matches the given path to the pattern.
Match(path []string, isDir bool) MatchResult
}
type pattern struct {
domain []string
pattern []string
inclusion bool
dirOnly bool
isGlob bool
}
// ParsePattern parses a gitignore pattern string into the Pattern structure.
func ParsePattern(p string, domain []string) Pattern {
res := pattern{domain: domain}
if strings.HasPrefix(p, inclusionPrefix) {
res.inclusion = true
p = p[1:]
}
if !strings.HasSuffix(p, "\\ ") {
p = strings.TrimRight(p, " ")
}
if strings.HasSuffix(p, patternDirSep) {
res.dirOnly = true
p = p[:len(p)-1]
}
if strings.Contains(p, patternDirSep) {
res.isGlob = true
}
res.pattern = strings.Split(p, patternDirSep)
return &res
}
func (p *pattern) Match(path []string, isDir bool) MatchResult {
if len(path) <= len(p.domain) {
return NoMatch
}
for i, e := range p.domain {
if path[i] != e {
return NoMatch
}
}
path = path[len(p.domain):]
if p.isGlob && !p.globMatch(path, isDir) {
return NoMatch
} else if !p.isGlob && !p.simpleNameMatch(path, isDir) {
return NoMatch
}
if p.inclusion {
return Include
} else {
return Exclude
}
}
func (p *pattern) simpleNameMatch(path []string, isDir bool) bool {
for i, name := range path {
if match, err := filepath.Match(p.pattern[0], name); err != nil {
return false
} else if !match {
continue
}
if p.dirOnly && !isDir && i == len(path)-1 {
return false
}
return true
}
return false
}
func (p *pattern) globMatch(path []string, isDir bool) bool {
matched := false
canTraverse := false
for i, pattern := range p.pattern {
if pattern == "" {
canTraverse = false
continue
}
if pattern == zeroToManyDirs {
if i == len(p.pattern)-1 {
break
}
canTraverse = true
continue
}
if strings.Contains(pattern, zeroToManyDirs) {
return false
}
if len(path) == 0 {
return false
}
if canTraverse {
canTraverse = false
for len(path) > 0 {
e := path[0]
path = path[1:]
if match, err := filepath.Match(pattern, e); err != nil {
return false
} else if match {
matched = true
break
} else if len(path) == 0 {
// if nothing left then fail
matched = false
}
}
} else {
if match, err := filepath.Match(pattern, path[0]); err != nil || !match {
return false
}
matched = true
path = path[1:]
}
}
if matched && p.dirOnly && !isDir && len(path) == 0 {
matched = false
}
return matched
}

View File

@ -0,0 +1,181 @@
package idxfile
import (
"bufio"
"bytes"
"errors"
"io"
"gopkg.in/src-d/go-git.v4/utils/binary"
)
var (
// ErrUnsupportedVersion is returned by Decode when the idx file version
// is not supported.
ErrUnsupportedVersion = errors.New("Unsuported version")
// ErrMalformedIdxFile is returned by Decode when the idx file is corrupted.
ErrMalformedIdxFile = errors.New("Malformed IDX file")
)
const (
fanout = 256
objectIDLength = 20
)
// Decoder reads and decodes idx files from an input stream.
type Decoder struct {
*bufio.Reader
}
// NewDecoder builds a new idx stream decoder, that reads from r.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{bufio.NewReader(r)}
}
// Decode reads from the stream and decode the content into the MemoryIndex struct.
func (d *Decoder) Decode(idx *MemoryIndex) error {
if err := validateHeader(d); err != nil {
return err
}
flow := []func(*MemoryIndex, io.Reader) error{
readVersion,
readFanout,
readObjectNames,
readCRC32,
readOffsets,
readChecksums,
}
for _, f := range flow {
if err := f(idx, d); err != nil {
return err
}
}
return nil
}
func validateHeader(r io.Reader) error {
var h = make([]byte, 4)
if _, err := io.ReadFull(r, h); err != nil {
return err
}
if !bytes.Equal(h, idxHeader) {
return ErrMalformedIdxFile
}
return nil
}
func readVersion(idx *MemoryIndex, r io.Reader) error {
v, err := binary.ReadUint32(r)
if err != nil {
return err
}
if v > VersionSupported {
return ErrUnsupportedVersion
}
idx.Version = v
return nil
}
func readFanout(idx *MemoryIndex, r io.Reader) error {
for k := 0; k < fanout; k++ {
n, err := binary.ReadUint32(r)
if err != nil {
return err
}
idx.Fanout[k] = n
idx.FanoutMapping[k] = noMapping
}
return nil
}
func readObjectNames(idx *MemoryIndex, r io.Reader) error {
for k := 0; k < fanout; k++ {
var buckets uint32
if k == 0 {
buckets = idx.Fanout[k]
} else {
buckets = idx.Fanout[k] - idx.Fanout[k-1]
}
if buckets == 0 {
continue
}
if buckets < 0 {
return ErrMalformedIdxFile
}
idx.FanoutMapping[k] = len(idx.Names)
nameLen := int(buckets * objectIDLength)
bin := make([]byte, nameLen)
if _, err := io.ReadFull(r, bin); err != nil {
return err
}
idx.Names = append(idx.Names, bin)
idx.Offset32 = append(idx.Offset32, make([]byte, buckets*4))
idx.CRC32 = append(idx.CRC32, make([]byte, buckets*4))
}
return nil
}
func readCRC32(idx *MemoryIndex, r io.Reader) error {
for k := 0; k < fanout; k++ {
if pos := idx.FanoutMapping[k]; pos != noMapping {
if _, err := io.ReadFull(r, idx.CRC32[pos]); err != nil {
return err
}
}
}
return nil
}
func readOffsets(idx *MemoryIndex, r io.Reader) error {
var o64cnt int
for k := 0; k < fanout; k++ {
if pos := idx.FanoutMapping[k]; pos != noMapping {
if _, err := io.ReadFull(r, idx.Offset32[pos]); err != nil {
return err
}
for p := 0; p < len(idx.Offset32[pos]); p += 4 {
if idx.Offset32[pos][p]&(byte(1)<<7) > 0 {
o64cnt++
}
}
}
}
if o64cnt > 0 {
idx.Offset64 = make([]byte, o64cnt*8)
if _, err := io.ReadFull(r, idx.Offset64); err != nil {
return err
}
}
return nil
}
func readChecksums(idx *MemoryIndex, r io.Reader) error {
if _, err := io.ReadFull(r, idx.PackfileChecksum[:]); err != nil {
return err
}
if _, err := io.ReadFull(r, idx.IdxChecksum[:]); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,128 @@
// Package idxfile implements encoding and decoding of packfile idx files.
//
// == Original (version 1) pack-*.idx files have the following format:
//
// - The header consists of 256 4-byte network byte order
// integers. N-th entry of this table records the number of
// objects in the corresponding pack, the first byte of whose
// object name is less than or equal to N. This is called the
// 'first-level fan-out' table.
//
// - The header is followed by sorted 24-byte entries, one entry
// per object in the pack. Each entry is:
//
// 4-byte network byte order integer, recording where the
// object is stored in the packfile as the offset from the
// beginning.
//
// 20-byte object name.
//
// - The file is concluded with a trailer:
//
// A copy of the 20-byte SHA1 checksum at the end of
// corresponding packfile.
//
// 20-byte SHA1-checksum of all of the above.
//
// Pack Idx file:
//
// -- +--------------------------------+
// fanout | fanout[0] = 2 (for example) |-.
// table +--------------------------------+ |
// | fanout[1] | |
// +--------------------------------+ |
// | fanout[2] | |
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
// | fanout[255] = total objects |---.
// -- +--------------------------------+ | |
// main | offset | | |
// index | object name 00XXXXXXXXXXXXXXXX | | |
// tab +--------------------------------+ | |
// | offset | | |
// | object name 00XXXXXXXXXXXXXXXX | | |
// +--------------------------------+<+ |
// .-| offset | |
// | | object name 01XXXXXXXXXXXXXXXX | |
// | +--------------------------------+ |
// | | offset | |
// | | object name 01XXXXXXXXXXXXXXXX | |
// | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
// | | offset | |
// | | object name FFXXXXXXXXXXXXXXXX | |
// --| +--------------------------------+<--+
// trailer | | packfile checksum |
// | +--------------------------------+
// | | idxfile checksum |
// | +--------------------------------+
// .---------.
// |
// Pack file entry: <+
//
// packed object header:
// 1-byte size extension bit (MSB)
// type (next 3 bit)
// size0 (lower 4-bit)
// n-byte sizeN (as long as MSB is set, each 7-bit)
// size0..sizeN form 4+7+7+..+7 bit integer, size0
// is the least significant part, and sizeN is the
// most significant part.
// packed object data:
// If it is not DELTA, then deflated bytes (the size above
// is the size before compression).
// If it is REF_DELTA, then
// 20-byte base object name SHA1 (the size above is the
// size of the delta data that follows).
// delta data, deflated.
// If it is OFS_DELTA, then
// n-byte offset (see below) interpreted as a negative
// offset from the type-byte of the header of the
// ofs-delta entry (the size above is the size of
// the delta data that follows).
// delta data, deflated.
//
// offset encoding:
// n bytes with MSB set in all but the last one.
// The offset is then the number constructed by
// concatenating the lower 7 bit of each byte, and
// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1))
// to the result.
//
// == Version 2 pack-*.idx files support packs larger than 4 GiB, and
// have some other reorganizations. They have the format:
//
// - A 4-byte magic number '\377tOc' which is an unreasonable
// fanout[0] value.
//
// - A 4-byte version number (= 2)
//
// - A 256-entry fan-out table just like v1.
//
// - A table of sorted 20-byte SHA1 object names. These are
// packed together without offset values to reduce the cache
// footprint of the binary search for a specific object name.
//
// - A table of 4-byte CRC32 values of the packed object data.
// This is new in v2 so compressed data can be copied directly
// from pack to pack during repacking without undetected
// data corruption.
//
// - A table of 4-byte offset values (in network byte order).
// These are usually 31-bit pack file offsets, but large
// offsets are encoded as an index into the next table with
// the msbit set.
//
// - A table of 8-byte offset entries (empty for pack files less
// than 2 GiB). Pack files are organized with heavily used
// objects toward the front, so most object references should
// not need to refer to this table.
//
// - The same trailer as a v1 pack file:
//
// A copy of the 20-byte SHA1 checksum at the end of
// corresponding packfile.
//
// 20-byte SHA1-checksum of all of the above.
//
// Source:
// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-format.txt
package idxfile

View File

@ -0,0 +1,142 @@
package idxfile
import (
"crypto/sha1"
"hash"
"io"
"gopkg.in/src-d/go-git.v4/utils/binary"
)
// Encoder writes MemoryIndex structs to an output stream.
type Encoder struct {
io.Writer
hash hash.Hash
}
// NewEncoder returns a new stream encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
}
// Encode encodes an MemoryIndex to the encoder writer.
func (e *Encoder) Encode(idx *MemoryIndex) (int, error) {
flow := []func(*MemoryIndex) (int, error){
e.encodeHeader,
e.encodeFanout,
e.encodeHashes,
e.encodeCRC32,
e.encodeOffsets,
e.encodeChecksums,
}
sz := 0
for _, f := range flow {
i, err := f(idx)
sz += i
if err != nil {
return sz, err
}
}
return sz, nil
}
func (e *Encoder) encodeHeader(idx *MemoryIndex) (int, error) {
c, err := e.Write(idxHeader)
if err != nil {
return c, err
}
return c + 4, binary.WriteUint32(e, idx.Version)
}
func (e *Encoder) encodeFanout(idx *MemoryIndex) (int, error) {
for _, c := range idx.Fanout {
if err := binary.WriteUint32(e, c); err != nil {
return 0, err
}
}
return fanout * 4, nil
}
func (e *Encoder) encodeHashes(idx *MemoryIndex) (int, error) {
var size int
for k := 0; k < fanout; k++ {
pos := idx.FanoutMapping[k]
if pos == noMapping {
continue
}
n, err := e.Write(idx.Names[pos])
if err != nil {
return size, err
}
size += n
}
return size, nil
}
func (e *Encoder) encodeCRC32(idx *MemoryIndex) (int, error) {
var size int
for k := 0; k < fanout; k++ {
pos := idx.FanoutMapping[k]
if pos == noMapping {
continue
}
n, err := e.Write(idx.CRC32[pos])
if err != nil {
return size, err
}
size += n
}
return size, nil
}
func (e *Encoder) encodeOffsets(idx *MemoryIndex) (int, error) {
var size int
for k := 0; k < fanout; k++ {
pos := idx.FanoutMapping[k]
if pos == noMapping {
continue
}
n, err := e.Write(idx.Offset32[pos])
if err != nil {
return size, err
}
size += n
}
if len(idx.Offset64) > 0 {
n, err := e.Write(idx.Offset64)
if err != nil {
return size, err
}
size += n
}
return size, nil
}
func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) {
if _, err := e.Write(idx.PackfileChecksum[:]); err != nil {
return 0, err
}
copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20])
if _, err := e.Write(idx.IdxChecksum[:]); err != nil {
return 0, err
}
return 40, nil
}

View File

@ -0,0 +1,347 @@
package idxfile
import (
"bytes"
"io"
"sort"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/utils/binary"
)
const (
// VersionSupported is the only idx version supported.
VersionSupported = 2
noMapping = -1
)
var (
idxHeader = []byte{255, 't', 'O', 'c'}
)
// Index represents an index of a packfile.
type Index interface {
// Contains checks whether the given hash is in the index.
Contains(h plumbing.Hash) (bool, error)
// FindOffset finds the offset in the packfile for the object with
// the given hash.
FindOffset(h plumbing.Hash) (int64, error)
// FindCRC32 finds the CRC32 of the object with the given hash.
FindCRC32(h plumbing.Hash) (uint32, error)
// FindHash finds the hash for the object with the given offset.
FindHash(o int64) (plumbing.Hash, error)
// Count returns the number of entries in the index.
Count() (int64, error)
// Entries returns an iterator to retrieve all index entries.
Entries() (EntryIter, error)
// EntriesByOffset returns an iterator to retrieve all index entries ordered
// by offset.
EntriesByOffset() (EntryIter, error)
}
// MemoryIndex is the in memory representation of an idx file.
type MemoryIndex struct {
Version uint32
Fanout [256]uint32
// FanoutMapping maps the position in the fanout table to the position
// in the Names, Offset32 and CRC32 slices. This improves the memory
// usage by not needing an array with unnecessary empty slots.
FanoutMapping [256]int
Names [][]byte
Offset32 [][]byte
CRC32 [][]byte
Offset64 []byte
PackfileChecksum [20]byte
IdxChecksum [20]byte
offsetHash map[int64]plumbing.Hash
}
var _ Index = (*MemoryIndex)(nil)
// NewMemoryIndex returns an instance of a new MemoryIndex.
func NewMemoryIndex() *MemoryIndex {
return &MemoryIndex{}
}
func (idx *MemoryIndex) findHashIndex(h plumbing.Hash) (int, bool) {
k := idx.FanoutMapping[h[0]]
if k == noMapping {
return 0, false
}
if len(idx.Names) <= k {
return 0, false
}
data := idx.Names[k]
high := uint64(len(idx.Offset32[k])) >> 2
if high == 0 {
return 0, false
}
low := uint64(0)
for {
mid := (low + high) >> 1
offset := mid * objectIDLength
cmp := bytes.Compare(h[:], data[offset:offset+objectIDLength])
if cmp < 0 {
high = mid
} else if cmp == 0 {
return int(mid), true
} else {
low = mid + 1
}
if low >= high {
break
}
}
return 0, false
}
// Contains implements the Index interface.
func (idx *MemoryIndex) Contains(h plumbing.Hash) (bool, error) {
_, ok := idx.findHashIndex(h)
return ok, nil
}
// FindOffset implements the Index interface.
func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) {
if len(idx.FanoutMapping) <= int(h[0]) {
return 0, plumbing.ErrObjectNotFound
}
k := idx.FanoutMapping[h[0]]
i, ok := idx.findHashIndex(h)
if !ok {
return 0, plumbing.ErrObjectNotFound
}
return idx.getOffset(k, i)
}
const isO64Mask = uint64(1) << 31
func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) (int64, error) {
offset := secondLevel << 2
buf := bytes.NewBuffer(idx.Offset32[firstLevel][offset : offset+4])
ofs, err := binary.ReadUint32(buf)
if err != nil {
return -1, err
}
if (uint64(ofs) & isO64Mask) != 0 {
offset := 8 * (uint64(ofs) & ^isO64Mask)
buf := bytes.NewBuffer(idx.Offset64[offset : offset+8])
n, err := binary.ReadUint64(buf)
if err != nil {
return -1, err
}
return int64(n), nil
}
return int64(ofs), nil
}
// FindCRC32 implements the Index interface.
func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) {
k := idx.FanoutMapping[h[0]]
i, ok := idx.findHashIndex(h)
if !ok {
return 0, plumbing.ErrObjectNotFound
}
return idx.getCRC32(k, i)
}
func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) (uint32, error) {
offset := secondLevel << 2
buf := bytes.NewBuffer(idx.CRC32[firstLevel][offset : offset+4])
return binary.ReadUint32(buf)
}
// FindHash implements the Index interface.
func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) {
// Lazily generate the reverse offset/hash map if required.
if idx.offsetHash == nil {
if err := idx.genOffsetHash(); err != nil {
return plumbing.ZeroHash, err
}
}
hash, ok := idx.offsetHash[o]
if !ok {
return plumbing.ZeroHash, plumbing.ErrObjectNotFound
}
return hash, nil
}
// genOffsetHash generates the offset/hash mapping for reverse search.
func (idx *MemoryIndex) genOffsetHash() error {
count, err := idx.Count()
if err != nil {
return err
}
idx.offsetHash = make(map[int64]plumbing.Hash, count)
iter, err := idx.Entries()
if err != nil {
return err
}
for {
entry, err := iter.Next()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
idx.offsetHash[int64(entry.Offset)] = entry.Hash
}
}
// Count implements the Index interface.
func (idx *MemoryIndex) Count() (int64, error) {
return int64(idx.Fanout[fanout-1]), nil
}
// Entries implements the Index interface.
func (idx *MemoryIndex) Entries() (EntryIter, error) {
return &idxfileEntryIter{idx, 0, 0, 0}, nil
}
// EntriesByOffset implements the Index interface.
func (idx *MemoryIndex) EntriesByOffset() (EntryIter, error) {
count, err := idx.Count()
if err != nil {
return nil, err
}
iter := &idxfileEntryOffsetIter{
entries: make(entriesByOffset, count),
}
entries, err := idx.Entries()
if err != nil {
return nil, err
}
for pos := 0; int64(pos) < count; pos++ {
entry, err := entries.Next()
if err != nil {
return nil, err
}
iter.entries[pos] = entry
}
sort.Sort(iter.entries)
return iter, nil
}
// EntryIter is an iterator that will return the entries in a packfile index.
type EntryIter interface {
// Next returns the next entry in the packfile index.
Next() (*Entry, error)
// Close closes the iterator.
Close() error
}
type idxfileEntryIter struct {
idx *MemoryIndex
total int
firstLevel, secondLevel int
}
func (i *idxfileEntryIter) Next() (*Entry, error) {
for {
if i.firstLevel >= fanout {
return nil, io.EOF
}
if i.total >= int(i.idx.Fanout[i.firstLevel]) {
i.firstLevel++
i.secondLevel = 0
continue
}
entry := new(Entry)
ofs := i.secondLevel * objectIDLength
copy(entry.Hash[:], i.idx.Names[i.idx.FanoutMapping[i.firstLevel]][ofs:])
pos := i.idx.FanoutMapping[entry.Hash[0]]
offset, err := i.idx.getOffset(pos, i.secondLevel)
if err != nil {
return nil, err
}
entry.Offset = uint64(offset)
entry.CRC32, err = i.idx.getCRC32(pos, i.secondLevel)
if err != nil {
return nil, err
}
i.secondLevel++
i.total++
return entry, nil
}
}
func (i *idxfileEntryIter) Close() error {
i.firstLevel = fanout
return nil
}
// Entry is the in memory representation of an object entry in the idx file.
type Entry struct {
Hash plumbing.Hash
CRC32 uint32
Offset uint64
}
type idxfileEntryOffsetIter struct {
entries entriesByOffset
pos int
}
func (i *idxfileEntryOffsetIter) Next() (*Entry, error) {
if i.pos >= len(i.entries) {
return nil, io.EOF
}
entry := i.entries[i.pos]
i.pos++
return entry, nil
}
func (i *idxfileEntryOffsetIter) Close() error {
i.pos = len(i.entries) + 1
return nil
}
type entriesByOffset []*Entry
func (o entriesByOffset) Len() int {
return len(o)
}
func (o entriesByOffset) Less(i int, j int) bool {
return o[i].Offset < o[j].Offset
}
func (o entriesByOffset) Swap(i int, j int) {
o[i], o[j] = o[j], o[i]
}

View File

@ -0,0 +1,186 @@
package idxfile
import (
"bytes"
"fmt"
"math"
"sort"
"sync"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/utils/binary"
)
// objects implements sort.Interface and uses hash as sorting key.
type objects []Entry
// Writer implements a packfile Observer interface and is used to generate
// indexes.
type Writer struct {
m sync.Mutex
count uint32
checksum plumbing.Hash
objects objects
offset64 uint32
finished bool
index *MemoryIndex
added map[plumbing.Hash]struct{}
}
// Index returns a previously created MemoryIndex or creates a new one if
// needed.
func (w *Writer) Index() (*MemoryIndex, error) {
w.m.Lock()
defer w.m.Unlock()
if w.index == nil {
return w.createIndex()
}
return w.index, nil
}
// Add appends new object data.
func (w *Writer) Add(h plumbing.Hash, pos uint64, crc uint32) {
w.m.Lock()
defer w.m.Unlock()
if w.added == nil {
w.added = make(map[plumbing.Hash]struct{})
}
if _, ok := w.added[h]; !ok {
w.added[h] = struct{}{}
w.objects = append(w.objects, Entry{h, crc, pos})
}
}
func (w *Writer) Finished() bool {
return w.finished
}
// OnHeader implements packfile.Observer interface.
func (w *Writer) OnHeader(count uint32) error {
w.count = count
w.objects = make(objects, 0, count)
return nil
}
// OnInflatedObjectHeader implements packfile.Observer interface.
func (w *Writer) OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error {
return nil
}
// OnInflatedObjectContent implements packfile.Observer interface.
func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error {
w.Add(h, uint64(pos), crc)
return nil
}
// OnFooter implements packfile.Observer interface.
func (w *Writer) OnFooter(h plumbing.Hash) error {
w.checksum = h
w.finished = true
_, err := w.createIndex()
if err != nil {
return err
}
return nil
}
// creatIndex returns a filled MemoryIndex with the information filled by
// the observer callbacks.
func (w *Writer) createIndex() (*MemoryIndex, error) {
if !w.finished {
return nil, fmt.Errorf("the index still hasn't finished building")
}
idx := new(MemoryIndex)
w.index = idx
sort.Sort(w.objects)
// unmap all fans by default
for i := range idx.FanoutMapping {
idx.FanoutMapping[i] = noMapping
}
buf := new(bytes.Buffer)
last := -1
bucket := -1
for i, o := range w.objects {
fan := o.Hash[0]
// fill the gaps between fans
for j := last + 1; j < int(fan); j++ {
idx.Fanout[j] = uint32(i)
}
// update the number of objects for this position
idx.Fanout[fan] = uint32(i + 1)
// we move from one bucket to another, update counters and allocate
// memory
if last != int(fan) {
bucket++
idx.FanoutMapping[fan] = bucket
last = int(fan)
idx.Names = append(idx.Names, make([]byte, 0))
idx.Offset32 = append(idx.Offset32, make([]byte, 0))
idx.CRC32 = append(idx.CRC32, make([]byte, 0))
}
idx.Names[bucket] = append(idx.Names[bucket], o.Hash[:]...)
offset := o.Offset
if offset > math.MaxInt32 {
offset = w.addOffset64(offset)
}
buf.Truncate(0)
binary.WriteUint32(buf, uint32(offset))
idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...)
buf.Truncate(0)
binary.WriteUint32(buf, uint32(o.CRC32))
idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...)
}
for j := last + 1; j < 256; j++ {
idx.Fanout[j] = uint32(len(w.objects))
}
idx.Version = VersionSupported
idx.PackfileChecksum = w.checksum
return idx, nil
}
func (w *Writer) addOffset64(pos uint64) uint64 {
buf := new(bytes.Buffer)
binary.WriteUint64(buf, pos)
w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...)
index := uint64(w.offset64 | (1 << 31))
w.offset64++
return index
}
func (o objects) Len() int {
return len(o)
}
func (o objects) Less(i int, j int) bool {
cmp := bytes.Compare(o[i].Hash[:], o[j].Hash[:])
return cmp < 0
}
func (o objects) Swap(i int, j int) {
o[i], o[j] = o[j], o[i]
}

View File

@ -0,0 +1,476 @@
package index
import (
"bytes"
"crypto/sha1"
"errors"
"hash"
"io"
"io/ioutil"
"strconv"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/utils/binary"
)
var (
// DecodeVersionSupported is the range of supported index versions
DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4}
// ErrMalformedSignature is returned by Decode when the index header file is
// malformed
ErrMalformedSignature = errors.New("malformed index signature file")
// ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with
// the read content
ErrInvalidChecksum = errors.New("invalid checksum")
errUnknownExtension = errors.New("unknown extension")
)
const (
entryHeaderLength = 62
entryExtended = 0x4000
entryValid = 0x8000
nameMask = 0xfff
intentToAddMask = 1 << 13
skipWorkTreeMask = 1 << 14
)
// A Decoder reads and decodes index files from an input stream.
type Decoder struct {
r io.Reader
hash hash.Hash
lastEntry *Entry
}
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
h := sha1.New()
return &Decoder{
r: io.TeeReader(r, h),
hash: h,
}
}
// Decode reads the whole index object from its input and stores it in the
// value pointed to by idx.
func (d *Decoder) Decode(idx *Index) error {
var err error
idx.Version, err = validateHeader(d.r)
if err != nil {
return err
}
entryCount, err := binary.ReadUint32(d.r)
if err != nil {
return err
}
if err := d.readEntries(idx, int(entryCount)); err != nil {
return err
}
return d.readExtensions(idx)
}
func (d *Decoder) readEntries(idx *Index, count int) error {
for i := 0; i < count; i++ {
e, err := d.readEntry(idx)
if err != nil {
return err
}
d.lastEntry = e
idx.Entries = append(idx.Entries, e)
}
return nil
}
func (d *Decoder) readEntry(idx *Index) (*Entry, error) {
e := &Entry{}
var msec, mnsec, sec, nsec uint32
var flags uint16
flow := []interface{}{
&sec, &nsec,
&msec, &mnsec,
&e.Dev,
&e.Inode,
&e.Mode,
&e.UID,
&e.GID,
&e.Size,
&e.Hash,
&flags,
}
if err := binary.Read(d.r, flow...); err != nil {
return nil, err
}
read := entryHeaderLength
if sec != 0 || nsec != 0 {
e.CreatedAt = time.Unix(int64(sec), int64(nsec))
}
if msec != 0 || mnsec != 0 {
e.ModifiedAt = time.Unix(int64(msec), int64(mnsec))
}
e.Stage = Stage(flags>>12) & 0x3
if flags&entryExtended != 0 {
extended, err := binary.ReadUint16(d.r)
if err != nil {
return nil, err
}
read += 2
e.IntentToAdd = extended&intentToAddMask != 0
e.SkipWorktree = extended&skipWorkTreeMask != 0
}
if err := d.readEntryName(idx, e, flags); err != nil {
return nil, err
}
return e, d.padEntry(idx, e, read)
}
func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error {
var name string
var err error
switch idx.Version {
case 2, 3:
len := flags & nameMask
name, err = d.doReadEntryName(len)
case 4:
name, err = d.doReadEntryNameV4()
default:
return ErrUnsupportedVersion
}
if err != nil {
return err
}
e.Name = name
return nil
}
func (d *Decoder) doReadEntryNameV4() (string, error) {
l, err := binary.ReadVariableWidthInt(d.r)
if err != nil {
return "", err
}
var base string
if d.lastEntry != nil {
base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)]
}
name, err := binary.ReadUntil(d.r, '\x00')
if err != nil {
return "", err
}
return base + string(name), nil
}
func (d *Decoder) doReadEntryName(len uint16) (string, error) {
name := make([]byte, len)
if err := binary.Read(d.r, &name); err != nil {
return "", err
}
return string(name), nil
}
// Index entries are padded out to the next 8 byte alignment
// for historical reasons related to how C Git read the files.
func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {
if idx.Version == 4 {
return nil
}
entrySize := read + len(e.Name)
padLen := 8 - entrySize%8
_, err := io.CopyN(ioutil.Discard, d.r, int64(padLen))
return err
}
func (d *Decoder) readExtensions(idx *Index) error {
// TODO: support 'Split index' and 'Untracked cache' extensions, take in
// count that they are not supported by jgit or libgit
var expected []byte
var err error
var header [4]byte
for {
expected = d.hash.Sum(nil)
var n int
if n, err = io.ReadFull(d.r, header[:]); err != nil {
if n == 0 {
err = io.EOF
}
break
}
err = d.readExtension(idx, header[:])
if err != nil {
break
}
}
if err != errUnknownExtension {
return err
}
return d.readChecksum(expected, header)
}
func (d *Decoder) readExtension(idx *Index, header []byte) error {
switch {
case bytes.Equal(header, treeExtSignature):
r, err := d.getExtensionReader()
if err != nil {
return err
}
idx.Cache = &Tree{}
d := &treeExtensionDecoder{r}
if err := d.Decode(idx.Cache); err != nil {
return err
}
case bytes.Equal(header, resolveUndoExtSignature):
r, err := d.getExtensionReader()
if err != nil {
return err
}
idx.ResolveUndo = &ResolveUndo{}
d := &resolveUndoDecoder{r}
if err := d.Decode(idx.ResolveUndo); err != nil {
return err
}
case bytes.Equal(header, endOfIndexEntryExtSignature):
r, err := d.getExtensionReader()
if err != nil {
return err
}
idx.EndOfIndexEntry = &EndOfIndexEntry{}
d := &endOfIndexEntryDecoder{r}
if err := d.Decode(idx.EndOfIndexEntry); err != nil {
return err
}
default:
return errUnknownExtension
}
return nil
}
func (d *Decoder) getExtensionReader() (io.Reader, error) {
len, err := binary.ReadUint32(d.r)
if err != nil {
return nil, err
}
return &io.LimitedReader{R: d.r, N: int64(len)}, nil
}
func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error {
var h plumbing.Hash
copy(h[:4], alreadyRead[:])
if err := binary.Read(d.r, h[4:]); err != nil {
return err
}
if !bytes.Equal(h[:], expected) {
return ErrInvalidChecksum
}
return nil
}
func validateHeader(r io.Reader) (version uint32, err error) {
var s = make([]byte, 4)
if _, err := io.ReadFull(r, s); err != nil {
return 0, err
}
if !bytes.Equal(s, indexSignature) {
return 0, ErrMalformedSignature
}
version, err = binary.ReadUint32(r)
if err != nil {
return 0, err
}
if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max {
return 0, ErrUnsupportedVersion
}
return
}
type treeExtensionDecoder struct {
r io.Reader
}
func (d *treeExtensionDecoder) Decode(t *Tree) error {
for {
e, err := d.readEntry()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
if e == nil {
continue
}
t.Entries = append(t.Entries, *e)
}
}
func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) {
e := &TreeEntry{}
path, err := binary.ReadUntil(d.r, '\x00')
if err != nil {
return nil, err
}
e.Path = string(path)
count, err := binary.ReadUntil(d.r, ' ')
if err != nil {
return nil, err
}
i, err := strconv.Atoi(string(count))
if err != nil {
return nil, err
}
// An entry can be in an invalidated state and is represented by having a
// negative number in the entry_count field.
if i == -1 {
return nil, nil
}
e.Entries = i
trees, err := binary.ReadUntil(d.r, '\n')
if err != nil {
return nil, err
}
i, err = strconv.Atoi(string(trees))
if err != nil {
return nil, err
}
e.Trees = i
if err := binary.Read(d.r, &e.Hash); err != nil {
return nil, err
}
return e, nil
}
type resolveUndoDecoder struct {
r io.Reader
}
func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error {
for {
e, err := d.readEntry()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
ru.Entries = append(ru.Entries, *e)
}
}
func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) {
e := &ResolveUndoEntry{
Stages: make(map[Stage]plumbing.Hash),
}
path, err := binary.ReadUntil(d.r, '\x00')
if err != nil {
return nil, err
}
e.Path = string(path)
for i := 0; i < 3; i++ {
if err := d.readStage(e, Stage(i+1)); err != nil {
return nil, err
}
}
for s := range e.Stages {
var hash plumbing.Hash
if err := binary.Read(d.r, hash[:]); err != nil {
return nil, err
}
e.Stages[s] = hash
}
return e, nil
}
func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error {
ascii, err := binary.ReadUntil(d.r, '\x00')
if err != nil {
return err
}
stage, err := strconv.ParseInt(string(ascii), 8, 64)
if err != nil {
return err
}
if stage != 0 {
e.Stages[s] = plumbing.ZeroHash
}
return nil
}
type endOfIndexEntryDecoder struct {
r io.Reader
}
func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
var err error
e.Offset, err = binary.ReadUint32(d.r)
if err != nil {
return err
}
return binary.Read(d.r, &e.Hash)
}

View File

@ -0,0 +1,360 @@
// Package index implements encoding and decoding of index format files.
//
// Git index format
// ================
//
// == The Git index file has the following format
//
// All binary numbers are in network byte order. Version 2 is described
// here unless stated otherwise.
//
// - A 12-byte header consisting of
//
// 4-byte signature:
// The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache")
//
// 4-byte version number:
// The current supported versions are 2, 3 and 4.
//
// 32-bit number of index entries.
//
// - A number of sorted index entries (see below).
//
// - Extensions
//
// Extensions are identified by signature. Optional extensions can
// be ignored if Git does not understand them.
//
// Git currently supports cached tree and resolve undo extensions.
//
// 4-byte extension signature. If the first byte is 'A'..'Z' the
// extension is optional and can be ignored.
//
// 32-bit size of the extension
//
// Extension data
//
// - 160-bit SHA-1 over the content of the index file before this
// checksum.
//
// == Index entry
//
// Index entries are sorted in ascending order on the name field,
// interpreted as a string of unsigned bytes (i.e. memcmp() order, no
// localization, no special casing of directory separator '/'). Entries
// with the same name are sorted by their stage field.
//
// 32-bit ctime seconds, the last time a file's metadata changed
// this is stat(2) data
//
// 32-bit ctime nanosecond fractions
// this is stat(2) data
//
// 32-bit mtime seconds, the last time a file's data changed
// this is stat(2) data
//
// 32-bit mtime nanosecond fractions
// this is stat(2) data
//
// 32-bit dev
// this is stat(2) data
//
// 32-bit ino
// this is stat(2) data
//
// 32-bit mode, split into (high to low bits)
//
// 4-bit object type
// valid values in binary are 1000 (regular file), 1010 (symbolic link)
// and 1110 (gitlink)
//
// 3-bit unused
//
// 9-bit unix permission. Only 0755 and 0644 are valid for regular files.
// Symbolic links and gitlinks have value 0 in this field.
//
// 32-bit uid
// this is stat(2) data
//
// 32-bit gid
// this is stat(2) data
//
// 32-bit file size
// This is the on-disk size from stat(2), truncated to 32-bit.
//
// 160-bit SHA-1 for the represented object
//
// A 16-bit 'flags' field split into (high to low bits)
//
// 1-bit assume-valid flag
//
// 1-bit extended flag (must be zero in version 2)
//
// 2-bit stage (during merge)
//
// 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF
// is stored in this field.
//
// (Version 3 or later) A 16-bit field, only applicable if the
// "extended flag" above is 1, split into (high to low bits).
//
// 1-bit reserved for future
//
// 1-bit skip-worktree flag (used by sparse checkout)
//
// 1-bit intent-to-add flag (used by "git add -N")
//
// 13-bit unused, must be zero
//
// Entry path name (variable length) relative to top level directory
// (without leading slash). '/' is used as path separator. The special
// path components ".", ".." and ".git" (without quotes) are disallowed.
// Trailing slash is also disallowed.
//
// The exact encoding is undefined, but the '.' and '/' characters
// are encoded in 7-bit ASCII and the encoding cannot contain a NUL
// byte (iow, this is a UNIX pathname).
//
// (Version 4) In version 4, the entry path name is prefix-compressed
// relative to the path name for the previous entry (the very first
// entry is encoded as if the path name for the previous entry is an
// empty string). At the beginning of an entry, an integer N in the
// variable width encoding (the same encoding as the offset is encoded
// for OFS_DELTA pack entries; see pack-format.txt) is stored, followed
// by a NUL-terminated string S. Removing N bytes from the end of the
// path name for the previous entry, and replacing it with the string S
// yields the path name for this entry.
//
// 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes
// while keeping the name NUL-terminated.
//
// (Version 4) In version 4, the padding after the pathname does not
// exist.
//
// Interpretation of index entries in split index mode is completely
// different. See below for details.
//
// == Extensions
//
// === Cached tree
//
// Cached tree extension contains pre-computed hashes for trees that can
// be derived from the index. It helps speed up tree object generation
// from index for a new commit.
//
// When a path is updated in index, the path must be invalidated and
// removed from tree cache.
//
// The signature for this extension is { 'T', 'R', 'E', 'E' }.
//
// A series of entries fill the entire extension; each of which
// consists of:
//
// - NUL-terminated path component (relative to its parent directory);
//
// - ASCII decimal number of entries in the index that is covered by the
// tree this entry represents (entry_count);
//
// - A space (ASCII 32);
//
// - ASCII decimal number that represents the number of subtrees this
// tree has;
//
// - A newline (ASCII 10); and
//
// - 160-bit object name for the object that would result from writing
// this span of index as a tree.
//
// An entry can be in an invalidated state and is represented by having
// a negative number in the entry_count field. In this case, there is no
// object name and the next entry starts immediately after the newline.
// When writing an invalid entry, -1 should always be used as entry_count.
//
// The entries are written out in the top-down, depth-first order. The
// first entry represents the root level of the repository, followed by the
// first subtree--let's call this A--of the root level (with its name
// relative to the root level), followed by the first subtree of A (with
// its name relative to A), ...
//
// === Resolve undo
//
// A conflict is represented in the index as a set of higher stage entries.
// When a conflict is resolved (e.g. with "git add path"), these higher
// stage entries will be removed and a stage-0 entry with proper resolution
// is added.
//
// When these higher stage entries are removed, they are saved in the
// resolve undo extension, so that conflicts can be recreated (e.g. with
// "git checkout -m"), in case users want to redo a conflict resolution
// from scratch.
//
// The signature for this extension is { 'R', 'E', 'U', 'C' }.
//
// A series of entries fill the entire extension; each of which
// consists of:
//
// - NUL-terminated pathname the entry describes (relative to the root of
// the repository, i.e. full pathname);
//
// - Three NUL-terminated ASCII octal numbers, entry mode of entries in
// stage 1 to 3 (a missing stage is represented by "0" in this field);
// and
//
// - At most three 160-bit object names of the entry in stages from 1 to 3
// (nothing is written for a missing stage).
//
// === Split index
//
// In split index mode, the majority of index entries could be stored
// in a separate file. This extension records the changes to be made on
// top of that to produce the final index.
//
// The signature for this extension is { 'l', 'i', 'n', 'k' }.
//
// The extension consists of:
//
// - 160-bit SHA-1 of the shared index file. The shared index file path
// is $GIT_DIR/sharedindex.<SHA-1>. If all 160 bits are zero, the
// index does not require a shared index file.
//
// - An ewah-encoded delete bitmap, each bit represents an entry in the
// shared index. If a bit is set, its corresponding entry in the
// shared index will be removed from the final index. Note, because
// a delete operation changes index entry positions, but we do need
// original positions in replace phase, it's best to just mark
// entries for removal, then do a mass deletion after replacement.
//
// - An ewah-encoded replace bitmap, each bit represents an entry in
// the shared index. If a bit is set, its corresponding entry in the
// shared index will be replaced with an entry in this index
// file. All replaced entries are stored in sorted order in this
// index. The first "1" bit in the replace bitmap corresponds to the
// first index entry, the second "1" bit to the second entry and so
// on. Replaced entries may have empty path names to save space.
//
// The remaining index entries after replaced ones will be added to the
// final index. These added entries are also sorted by entry name then
// stage.
//
// == Untracked cache
//
// Untracked cache saves the untracked file list and necessary data to
// verify the cache. The signature for this extension is { 'U', 'N',
// 'T', 'R' }.
//
// The extension starts with
//
// - A sequence of NUL-terminated strings, preceded by the size of the
// sequence in variable width encoding. Each string describes the
// environment where the cache can be used.
//
// - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from
// ctime field until "file size".
//
// - Stat data of plumbing.excludesfile
//
// - 32-bit dir_flags (see struct dir_struct)
//
// - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file
// does not exist.
//
// - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does
// not exist.
//
// - NUL-terminated string of per-dir exclude file name. This usually
// is ".gitignore".
//
// - The number of following directory blocks, variable width
// encoding. If this number is zero, the extension ends here with a
// following NUL.
//
// - A number of directory blocks in depth-first-search order, each
// consists of
//
// - The number of untracked entries, variable width encoding.
//
// - The number of sub-directory blocks, variable width encoding.
//
// - The directory name terminated by NUL.
//
// - A number of untracked file/dir names terminated by NUL.
//
// The remaining data of each directory block is grouped by type:
//
// - An ewah bitmap, the n-th bit marks whether the n-th directory has
// valid untracked cache entries.
//
// - An ewah bitmap, the n-th bit records "check-only" bit of
// read_directory_recursive() for the n-th directory.
//
// - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data
// is valid for the n-th directory and exists in the next data.
//
// - An array of stat data. The n-th data corresponds with the n-th
// "one" bit in the previous ewah bitmap.
//
// - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit
// in the previous ewah bitmap.
//
// - One NUL.
//
// == File System Monitor cache
//
// The file system monitor cache tracks files for which the core.fsmonitor
// hook has told us about changes. The signature for this extension is
// { 'F', 'S', 'M', 'N' }.
//
// The extension starts with
//
// - 32-bit version number: the current supported version is 1.
//
// - 64-bit time: the extension data reflects all changes through the given
// time which is stored as the nanoseconds elapsed since midnight,
// January 1, 1970.
//
// - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap.
//
// - An ewah bitmap, the n-th bit indicates whether the n-th index entry
// is not CE_FSMONITOR_VALID.
//
// == End of Index Entry
//
// The End of Index Entry (EOIE) is used to locate the end of the variable
// length index entries and the begining of the extensions. Code can take
// advantage of this to quickly locate the index extensions without having
// to parse through all of the index entries.
//
// Because it must be able to be loaded before the variable length cache
// entries and other index extensions, this extension must be written last.
// The signature for this extension is { 'E', 'O', 'I', 'E' }.
//
// The extension consists of:
//
// - 32-bit offset to the end of the index entries
//
// - 160-bit SHA-1 over the extension types and their sizes (but not
// their contents). E.g. if we have "TREE" extension that is N-bytes
// long, "REUC" extension that is M-bytes long, followed by "EOIE",
// then the hash would be:
//
// SHA-1("TREE" + <binary representation of N> +
// "REUC" + <binary representation of M>)
//
// == Index Entry Offset Table
//
// The Index Entry Offset Table (IEOT) is used to help address the CPU
// cost of loading the index by enabling multi-threading the process of
// converting cache entries from the on-disk format to the in-memory format.
// The signature for this extension is { 'I', 'E', 'O', 'T' }.
//
// The extension consists of:
//
// - 32-bit version (currently 1)
//
// - A number of index offset entries each consisting of:
//
// - 32-bit offset from the begining of the file to the first cache entry
// in this block of entries.
//
// - 32-bit count of cache entries in this blockpackage index
package index

View File

@ -0,0 +1,150 @@
package index
import (
"bytes"
"crypto/sha1"
"errors"
"hash"
"io"
"sort"
"time"
"gopkg.in/src-d/go-git.v4/utils/binary"
)
var (
// EncodeVersionSupported is the range of supported index versions
EncodeVersionSupported uint32 = 2
// ErrInvalidTimestamp is returned by Encode if a Index with a Entry with
// negative timestamp values
ErrInvalidTimestamp = errors.New("negative timestamps are not allowed")
)
// An Encoder writes an Index to an output stream.
type Encoder struct {
w io.Writer
hash hash.Hash
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
}
// Encode writes the Index to the stream of the encoder.
func (e *Encoder) Encode(idx *Index) error {
// TODO: support versions v3 and v4
// TODO: support extensions
if idx.Version != EncodeVersionSupported {
return ErrUnsupportedVersion
}
if err := e.encodeHeader(idx); err != nil {
return err
}
if err := e.encodeEntries(idx); err != nil {
return err
}
return e.encodeFooter()
}
func (e *Encoder) encodeHeader(idx *Index) error {
return binary.Write(e.w,
indexSignature,
idx.Version,
uint32(len(idx.Entries)),
)
}
func (e *Encoder) encodeEntries(idx *Index) error {
sort.Sort(byName(idx.Entries))
for _, entry := range idx.Entries {
if err := e.encodeEntry(entry); err != nil {
return err
}
wrote := entryHeaderLength + len(entry.Name)
if err := e.padEntry(wrote); err != nil {
return err
}
}
return nil
}
func (e *Encoder) encodeEntry(entry *Entry) error {
if entry.IntentToAdd || entry.SkipWorktree {
return ErrUnsupportedVersion
}
sec, nsec, err := e.timeToUint32(&entry.CreatedAt)
if err != nil {
return err
}
msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt)
if err != nil {
return err
}
flags := uint16(entry.Stage&0x3) << 12
if l := len(entry.Name); l < nameMask {
flags |= uint16(l)
} else {
flags |= nameMask
}
flow := []interface{}{
sec, nsec,
msec, mnsec,
entry.Dev,
entry.Inode,
entry.Mode,
entry.UID,
entry.GID,
entry.Size,
entry.Hash[:],
flags,
}
if err := binary.Write(e.w, flow...); err != nil {
return err
}
return binary.Write(e.w, []byte(entry.Name))
}
func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) {
if t.IsZero() {
return 0, 0, nil
}
if t.Unix() < 0 || t.UnixNano() < 0 {
return 0, 0, ErrInvalidTimestamp
}
return uint32(t.Unix()), uint32(t.Nanosecond()), nil
}
func (e *Encoder) padEntry(wrote int) error {
padLen := 8 - wrote%8
_, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen))
return err
}
func (e *Encoder) encodeFooter() error {
return binary.Write(e.w, e.hash.Sum(nil))
}
type byName []*Entry
func (l byName) Len() int { return len(l) }
func (l byName) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l byName) Less(i, j int) bool { return l[i].Name < l[j].Name }

View File

@ -0,0 +1,213 @@
package index
import (
"bytes"
"errors"
"fmt"
"path/filepath"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
)
var (
// ErrUnsupportedVersion is returned by Decode when the index file version
// is not supported.
ErrUnsupportedVersion = errors.New("unsupported version")
// ErrEntryNotFound is returned by Index.Entry, if an entry is not found.
ErrEntryNotFound = errors.New("entry not found")
indexSignature = []byte{'D', 'I', 'R', 'C'}
treeExtSignature = []byte{'T', 'R', 'E', 'E'}
resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'}
endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'}
)
// Stage during merge
type Stage int
const (
// Merged is the default stage, fully merged
Merged Stage = 1
// AncestorMode is the base revision
AncestorMode Stage = 1
// OurMode is the first tree revision, ours
OurMode Stage = 2
// TheirMode is the second tree revision, theirs
TheirMode Stage = 3
)
// Index contains the information about which objects are currently checked out
// in the worktree, having information about the working files. Changes in
// worktree are detected using this Index. The Index is also used during merges
type Index struct {
// Version is index version
Version uint32
// Entries collection of entries represented by this Index. The order of
// this collection is not guaranteed
Entries []*Entry
// Cache represents the 'Cached tree' extension
Cache *Tree
// ResolveUndo represents the 'Resolve undo' extension
ResolveUndo *ResolveUndo
// EndOfIndexEntry represents the 'End of Index Entry' extension
EndOfIndexEntry *EndOfIndexEntry
}
// Add creates a new Entry and returns it. The caller should first check that
// another entry with the same path does not exist.
func (i *Index) Add(path string) *Entry {
e := &Entry{
Name: filepath.ToSlash(path),
}
i.Entries = append(i.Entries, e)
return e
}
// Entry returns the entry that match the given path, if any.
func (i *Index) Entry(path string) (*Entry, error) {
path = filepath.ToSlash(path)
for _, e := range i.Entries {
if e.Name == path {
return e, nil
}
}
return nil, ErrEntryNotFound
}
// Remove remove the entry that match the give path and returns deleted entry.
func (i *Index) Remove(path string) (*Entry, error) {
path = filepath.ToSlash(path)
for index, e := range i.Entries {
if e.Name == path {
i.Entries = append(i.Entries[:index], i.Entries[index+1:]...)
return e, nil
}
}
return nil, ErrEntryNotFound
}
// Glob returns the all entries matching pattern or nil if there is no matching
// entry. The syntax of patterns is the same as in filepath.Glob.
func (i *Index) Glob(pattern string) (matches []*Entry, err error) {
pattern = filepath.ToSlash(pattern)
for _, e := range i.Entries {
m, err := match(pattern, e.Name)
if err != nil {
return nil, err
}
if m {
matches = append(matches, e)
}
}
return
}
// String is equivalent to `git ls-files --stage --debug`
func (i *Index) String() string {
buf := bytes.NewBuffer(nil)
for _, e := range i.Entries {
buf.WriteString(e.String())
}
return buf.String()
}
// Entry represents a single file (or stage of a file) in the cache. An entry
// represents exactly one stage of a file. If a file path is unmerged then
// multiple Entry instances may appear for the same path name.
type Entry struct {
// Hash is the SHA1 of the represented file
Hash plumbing.Hash
// Name is the Entry path name relative to top level directory
Name string
// CreatedAt time when the tracked path was created
CreatedAt time.Time
// ModifiedAt time when the tracked path was changed
ModifiedAt time.Time
// Dev and Inode of the tracked path
Dev, Inode uint32
// Mode of the path
Mode filemode.FileMode
// UID and GID, userid and group id of the owner
UID, GID uint32
// Size is the length in bytes for regular files
Size uint32
// Stage on a merge is defines what stage is representing this entry
// https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging
Stage Stage
// SkipWorktree used in sparse checkouts
// https://git-scm.com/docs/git-read-tree#_sparse_checkout
SkipWorktree bool
// IntentToAdd record only the fact that the path will be added later
// https://git-scm.com/docs/git-add ("git add -N")
IntentToAdd bool
}
func (e Entry) String() string {
buf := bytes.NewBuffer(nil)
fmt.Fprintf(buf, "%06o %s %d\t%s\n", e.Mode, e.Hash, e.Stage, e.Name)
fmt.Fprintf(buf, " ctime: %d:%d\n", e.CreatedAt.Unix(), e.CreatedAt.Nanosecond())
fmt.Fprintf(buf, " mtime: %d:%d\n", e.ModifiedAt.Unix(), e.ModifiedAt.Nanosecond())
fmt.Fprintf(buf, " dev: %d\tino: %d\n", e.Dev, e.Inode)
fmt.Fprintf(buf, " uid: %d\tgid: %d\n", e.UID, e.GID)
fmt.Fprintf(buf, " size: %d\tflags: %x\n", e.Size, 0)
return buf.String()
}
// Tree contains pre-computed hashes for trees that can be derived from the
// index. It helps speed up tree object generation from index for a new commit.
type Tree struct {
Entries []TreeEntry
}
// TreeEntry entry of a cached Tree
type TreeEntry struct {
// Path component (relative to its parent directory)
Path string
// Entries is the number of entries in the index that is covered by the tree
// this entry represents.
Entries int
// Trees is the number that represents the number of subtrees this tree has
Trees int
// Hash object name for the object that would result from writing this span
// of index as a tree.
Hash plumbing.Hash
}
// ResolveUndo is used when a conflict is resolved (e.g. with "git add path"),
// these higher stage entries are removed and a stage-0 entry with proper
// resolution is added. When these higher stage entries are removed, they are
// saved in the resolve undo extension.
type ResolveUndo struct {
Entries []ResolveUndoEntry
}
// ResolveUndoEntry contains the information about a conflict when is resolved
type ResolveUndoEntry struct {
Path string
Stages map[Stage]plumbing.Hash
}
// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of
// the variable length index entries and the begining of the extensions. Code
// can take advantage of this to quickly locate the index extensions without
// having to parse through all of the index entries.
//
// Because it must be able to be loaded before the variable length cache
// entries and other index extensions, this extension must be written last.
type EndOfIndexEntry struct {
// Offset to the end of the index entries
Offset uint32
// Hash is a SHA-1 over the extension types and their sizes (but not
// their contents).
Hash plumbing.Hash
}

View File

@ -0,0 +1,186 @@
package index
import (
"path/filepath"
"runtime"
"unicode/utf8"
)
// match is filepath.Match with support to match fullpath and not only filenames
// code from:
// https://github.com/golang/go/blob/39852bf4cce6927e01d0136c7843f65a801738cb/src/path/filepath/match.go#L44-L224
func match(pattern, name string) (matched bool, err error) {
Pattern:
for len(pattern) > 0 {
var star bool
var chunk string
star, chunk, pattern = scanChunk(pattern)
// Look for match at current position.
t, ok, err := matchChunk(chunk, name)
// if we're the last chunk, make sure we've exhausted the name
// otherwise we'll give a false result even if we could still match
// using the star
if ok && (len(t) == 0 || len(pattern) > 0) {
name = t
continue
}
if err != nil {
return false, err
}
if star {
// Look for match skipping i+1 bytes.
// Cannot skip /.
for i := 0; i < len(name); i++ {
t, ok, err := matchChunk(chunk, name[i+1:])
if ok {
// if we're the last chunk, make sure we exhausted the name
if len(pattern) == 0 && len(t) > 0 {
continue
}
name = t
continue Pattern
}
if err != nil {
return false, err
}
}
}
return false, nil
}
return len(name) == 0, nil
}
// scanChunk gets the next segment of pattern, which is a non-star string
// possibly preceded by a star.
func scanChunk(pattern string) (star bool, chunk, rest string) {
for len(pattern) > 0 && pattern[0] == '*' {
pattern = pattern[1:]
star = true
}
inrange := false
var i int
Scan:
for i = 0; i < len(pattern); i++ {
switch pattern[i] {
case '\\':
if runtime.GOOS != "windows" {
// error check handled in matchChunk: bad pattern.
if i+1 < len(pattern) {
i++
}
}
case '[':
inrange = true
case ']':
inrange = false
case '*':
if !inrange {
break Scan
}
}
}
return star, pattern[0:i], pattern[i:]
}
// matchChunk checks whether chunk matches the beginning of s.
// If so, it returns the remainder of s (after the match).
// Chunk is all single-character operators: literals, char classes, and ?.
func matchChunk(chunk, s string) (rest string, ok bool, err error) {
for len(chunk) > 0 {
if len(s) == 0 {
return
}
switch chunk[0] {
case '[':
// character class
r, n := utf8.DecodeRuneInString(s)
s = s[n:]
chunk = chunk[1:]
// We can't end right after '[', we're expecting at least
// a closing bracket and possibly a caret.
if len(chunk) == 0 {
err = filepath.ErrBadPattern
return
}
// possibly negated
negated := chunk[0] == '^'
if negated {
chunk = chunk[1:]
}
// parse all ranges
match := false
nrange := 0
for {
if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
chunk = chunk[1:]
break
}
var lo, hi rune
if lo, chunk, err = getEsc(chunk); err != nil {
return
}
hi = lo
if chunk[0] == '-' {
if hi, chunk, err = getEsc(chunk[1:]); err != nil {
return
}
}
if lo <= r && r <= hi {
match = true
}
nrange++
}
if match == negated {
return
}
case '?':
_, n := utf8.DecodeRuneInString(s)
s = s[n:]
chunk = chunk[1:]
case '\\':
if runtime.GOOS != "windows" {
chunk = chunk[1:]
if len(chunk) == 0 {
err = filepath.ErrBadPattern
return
}
}
fallthrough
default:
if chunk[0] != s[0] {
return
}
s = s[1:]
chunk = chunk[1:]
}
}
return s, true, nil
}
// getEsc gets a possibly-escaped character from chunk, for a character class.
func getEsc(chunk string) (r rune, nchunk string, err error) {
if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
err = filepath.ErrBadPattern
return
}
if chunk[0] == '\\' && runtime.GOOS != "windows" {
chunk = chunk[1:]
if len(chunk) == 0 {
err = filepath.ErrBadPattern
return
}
}
r, n := utf8.DecodeRuneInString(chunk)
if r == utf8.RuneError && n == 1 {
err = filepath.ErrBadPattern
}
nchunk = chunk[n:]
if len(nchunk) == 0 {
err = filepath.ErrBadPattern
}
return
}

View File

@ -0,0 +1,2 @@
// Package objfile implements encoding and decoding of object files.
package objfile

View File

@ -0,0 +1,114 @@
package objfile
import (
"compress/zlib"
"errors"
"io"
"strconv"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
)
var (
ErrClosed = errors.New("objfile: already closed")
ErrHeader = errors.New("objfile: invalid header")
ErrNegativeSize = errors.New("objfile: negative object size")
)
// Reader reads and decodes compressed objfile data from a provided io.Reader.
// Reader implements io.ReadCloser. Close should be called when finished with
// the Reader. Close will not close the underlying io.Reader.
type Reader struct {
multi io.Reader
zlib io.ReadCloser
hasher plumbing.Hasher
}
// NewReader returns a new Reader reading from r.
func NewReader(r io.Reader) (*Reader, error) {
zlib, err := zlib.NewReader(r)
if err != nil {
return nil, packfile.ErrZLib.AddDetails(err.Error())
}
return &Reader{
zlib: zlib,
}, nil
}
// Header reads the type and the size of object, and prepares the reader for read
func (r *Reader) Header() (t plumbing.ObjectType, size int64, err error) {
var raw []byte
raw, err = r.readUntil(' ')
if err != nil {
return
}
t, err = plumbing.ParseObjectType(string(raw))
if err != nil {
return
}
raw, err = r.readUntil(0)
if err != nil {
return
}
size, err = strconv.ParseInt(string(raw), 10, 64)
if err != nil {
err = ErrHeader
return
}
defer r.prepareForRead(t, size)
return
}
// readSlice reads one byte at a time from r until it encounters delim or an
// error.
func (r *Reader) readUntil(delim byte) ([]byte, error) {
var buf [1]byte
value := make([]byte, 0, 16)
for {
if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) {
if err == io.EOF {
return nil, ErrHeader
}
return nil, err
}
if buf[0] == delim {
return value, nil
}
value = append(value, buf[0])
}
}
func (r *Reader) prepareForRead(t plumbing.ObjectType, size int64) {
r.hasher = plumbing.NewHasher(t, size)
r.multi = io.TeeReader(r.zlib, r.hasher)
}
// Read reads len(p) bytes into p from the object data stream. It returns
// the number of bytes read (0 <= n <= len(p)) and any error encountered. Even
// if Read returns n < len(p), it may use all of p as scratch space during the
// call.
//
// If Read encounters the end of the data stream it will return err == io.EOF,
// either in the current call if n > 0 or in a subsequent call.
func (r *Reader) Read(p []byte) (n int, err error) {
return r.multi.Read(p)
}
// Hash returns the hash of the object data stream that has been read so far.
func (r *Reader) Hash() plumbing.Hash {
return r.hasher.Sum()
}
// Close releases any resources consumed by the Reader. Calling Close does not
// close the wrapped io.Reader originally passed to NewReader.
func (r *Reader) Close() error {
return r.zlib.Close()
}

View File

@ -0,0 +1,109 @@
package objfile
import (
"compress/zlib"
"errors"
"io"
"strconv"
"gopkg.in/src-d/go-git.v4/plumbing"
)
var (
ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)")
)
// Writer writes and encodes data in compressed objfile format to a provided
// io.Writer. Close should be called when finished with the Writer. Close will
// not close the underlying io.Writer.
type Writer struct {
raw io.Writer
zlib io.WriteCloser
hasher plumbing.Hasher
multi io.Writer
closed bool
pending int64 // number of unwritten bytes
}
// NewWriter returns a new Writer writing to w.
//
// The returned Writer implements io.WriteCloser. Close should be called when
// finished with the Writer. Close will not close the underlying io.Writer.
func NewWriter(w io.Writer) *Writer {
return &Writer{
raw: w,
zlib: zlib.NewWriter(w),
}
}
// WriteHeader writes the type and the size and prepares to accept the object's
// contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a
// negative size is provided, ErrNegativeSize is returned.
func (w *Writer) WriteHeader(t plumbing.ObjectType, size int64) error {
if !t.Valid() {
return plumbing.ErrInvalidType
}
if size < 0 {
return ErrNegativeSize
}
b := t.Bytes()
b = append(b, ' ')
b = append(b, []byte(strconv.FormatInt(size, 10))...)
b = append(b, 0)
defer w.prepareForWrite(t, size)
_, err := w.zlib.Write(b)
return err
}
func (w *Writer) prepareForWrite(t plumbing.ObjectType, size int64) {
w.pending = size
w.hasher = plumbing.NewHasher(t, size)
w.multi = io.MultiWriter(w.zlib, w.hasher)
}
// Write writes the object's contents. Write returns the error ErrOverflow if
// more than size bytes are written after WriteHeader.
func (w *Writer) Write(p []byte) (n int, err error) {
if w.closed {
return 0, ErrClosed
}
overwrite := false
if int64(len(p)) > w.pending {
p = p[0:w.pending]
overwrite = true
}
n, err = w.multi.Write(p)
w.pending -= int64(n)
if err == nil && overwrite {
err = ErrOverflow
return
}
return
}
// Hash returns the hash of the object data stream that has been written so far.
// It can be called before or after Close.
func (w *Writer) Hash() plumbing.Hash {
return w.hasher.Sum() // Not yet closed, return hash of data written so far
}
// Close releases any resources consumed by the Writer.
//
// Calling Close does not close the wrapped io.Writer originally passed to
// NewWriter.
func (w *Writer) Close() error {
if err := w.zlib.Close(); err != nil {
return err
}
w.closed = true
return nil
}

View File

@ -0,0 +1,68 @@
package packfile
import (
"bytes"
"io"
"sync"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
var signature = []byte{'P', 'A', 'C', 'K'}
const (
// VersionSupported is the packfile version supported by this package
VersionSupported uint32 = 2
firstLengthBits = uint8(4) // the first byte into object header has 4 bits to store the length
lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length
maskFirstLength = 15 // 0000 1111
maskContinue = 0x80 // 1000 0000
maskLength = uint8(127) // 0111 1111
maskType = uint8(112) // 0111 0000
)
// UpdateObjectStorage updates the storer with the objects in the given
// packfile.
func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error {
if pw, ok := s.(storer.PackfileWriter); ok {
return WritePackfileToObjectStorage(pw, packfile)
}
p, err := NewParserWithStorage(NewScanner(packfile), s)
if err != nil {
return err
}
_, err = p.Parse()
return err
}
// WritePackfileToObjectStorage writes all the packfile objects into the given
// object storage.
func WritePackfileToObjectStorage(
sw storer.PackfileWriter,
packfile io.Reader,
) (err error) {
w, err := sw.PackfileWriter()
if err != nil {
return err
}
defer ioutil.CheckClose(w, &err)
var n int64
n, err = io.Copy(w, packfile)
if err == nil && n == 0 {
return ErrEmptyPackfile
}
return err
}
var bufPool = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(nil)
},
}

View File

@ -0,0 +1,297 @@
package packfile
const blksz = 16
const maxChainLength = 64
// deltaIndex is a modified version of JGit's DeltaIndex adapted to our current
// design.
type deltaIndex struct {
table []int
entries []int
mask int
}
func (idx *deltaIndex) init(buf []byte) {
scanner := newDeltaIndexScanner(buf, len(buf))
idx.mask = scanner.mask
idx.table = scanner.table
idx.entries = make([]int, countEntries(scanner)+1)
idx.copyEntries(scanner)
}
// findMatch returns the offset of src where the block starting at tgtOffset
// is and the length of the match. A length of 0 means there was no match. A
// length of -1 means the src length is lower than the blksz and whatever
// other positive length is the length of the match in bytes.
func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l int) {
if len(tgt) < tgtOffset+s {
return 0, len(tgt) - tgtOffset
}
if len(src) < blksz {
return 0, -1
}
if len(tgt) >= tgtOffset+s && len(src) >= blksz {
h := hashBlock(tgt, tgtOffset)
tIdx := h & idx.mask
eIdx := idx.table[tIdx]
if eIdx != 0 {
srcOffset = idx.entries[eIdx]
} else {
return
}
l = matchLength(src, tgt, tgtOffset, srcOffset)
}
return
}
func matchLength(src, tgt []byte, otgt, osrc int) (l int) {
lensrc := len(src)
lentgt := len(tgt)
for (osrc < lensrc && otgt < lentgt) && src[osrc] == tgt[otgt] {
l++
osrc++
otgt++
}
return
}
func countEntries(scan *deltaIndexScanner) (cnt int) {
// Figure out exactly how many entries we need. As we do the
// enumeration truncate any delta chains longer than what we
// are willing to scan during encode. This keeps the encode
// logic linear in the size of the input rather than quadratic.
for i := 0; i < len(scan.table); i++ {
h := scan.table[i]
if h == 0 {
continue
}
size := 0
for {
size++
if size == maxChainLength {
scan.next[h] = 0
break
}
h = scan.next[h]
if h == 0 {
break
}
}
cnt += size
}
return
}
func (idx *deltaIndex) copyEntries(scanner *deltaIndexScanner) {
// Rebuild the entries list from the scanner, positioning all
// blocks in the same hash chain next to each other. We can
// then later discard the next list, along with the scanner.
//
next := 1
for i := 0; i < len(idx.table); i++ {
h := idx.table[i]
if h == 0 {
continue
}
idx.table[i] = next
for {
idx.entries[next] = scanner.entries[h]
next++
h = scanner.next[h]
if h == 0 {
break
}
}
}
}
type deltaIndexScanner struct {
table []int
entries []int
next []int
mask int
count int
}
func newDeltaIndexScanner(buf []byte, size int) *deltaIndexScanner {
size -= size % blksz
worstCaseBlockCnt := size / blksz
if worstCaseBlockCnt < 1 {
return new(deltaIndexScanner)
}
tableSize := tableSize(worstCaseBlockCnt)
scanner := &deltaIndexScanner{
table: make([]int, tableSize),
mask: tableSize - 1,
entries: make([]int, worstCaseBlockCnt+1),
next: make([]int, worstCaseBlockCnt+1),
}
scanner.scan(buf, size)
return scanner
}
// slightly modified version of JGit's DeltaIndexScanner. We store the offset on the entries
// instead of the entries and the key, so we avoid operations to retrieve the offset later, as
// we don't use the key.
// See: https://github.com/eclipse/jgit/blob/005e5feb4ecd08c4e4d141a38b9e7942accb3212/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/pack/DeltaIndexScanner.java
func (s *deltaIndexScanner) scan(buf []byte, end int) {
lastHash := 0
ptr := end - blksz
for {
key := hashBlock(buf, ptr)
tIdx := key & s.mask
head := s.table[tIdx]
if head != 0 && lastHash == key {
s.entries[head] = ptr
} else {
s.count++
eIdx := s.count
s.entries[eIdx] = ptr
s.next[eIdx] = head
s.table[tIdx] = eIdx
}
lastHash = key
ptr -= blksz
if 0 > ptr {
break
}
}
}
func tableSize(worstCaseBlockCnt int) int {
shift := 32 - leadingZeros(uint32(worstCaseBlockCnt))
sz := 1 << uint(shift-1)
if sz < worstCaseBlockCnt {
sz <<= 1
}
return sz
}
// use https://golang.org/pkg/math/bits/#LeadingZeros32 in the future
func leadingZeros(x uint32) (n int) {
if x >= 1<<16 {
x >>= 16
n = 16
}
if x >= 1<<8 {
x >>= 8
n += 8
}
n += int(len8tab[x])
return 32 - n
}
var len8tab = [256]uint8{
0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
}
func hashBlock(raw []byte, ptr int) int {
// The first 4 steps collapse out into a 4 byte big-endian decode,
// with a larger right shift as we combined shift lefts together.
//
hash := ((uint32(raw[ptr]) & 0xff) << 24) |
((uint32(raw[ptr+1]) & 0xff) << 16) |
((uint32(raw[ptr+2]) & 0xff) << 8) |
(uint32(raw[ptr+3]) & 0xff)
hash ^= T[hash>>31]
hash = ((hash << 8) | (uint32(raw[ptr+4]) & 0xff)) ^ T[hash>>23]
hash = ((hash << 8) | (uint32(raw[ptr+5]) & 0xff)) ^ T[hash>>23]
hash = ((hash << 8) | (uint32(raw[ptr+6]) & 0xff)) ^ T[hash>>23]
hash = ((hash << 8) | (uint32(raw[ptr+7]) & 0xff)) ^ T[hash>>23]
hash = ((hash << 8) | (uint32(raw[ptr+8]) & 0xff)) ^ T[hash>>23]
hash = ((hash << 8) | (uint32(raw[ptr+9]) & 0xff)) ^ T[hash>>23]
hash = ((hash << 8) | (uint32(raw[ptr+10]) & 0xff)) ^ T[hash>>23]
hash = ((hash << 8) | (uint32(raw[ptr+11]) & 0xff)) ^ T[hash>>23]
hash = ((hash << 8) | (uint32(raw[ptr+12]) & 0xff)) ^ T[hash>>23]
hash = ((hash << 8) | (uint32(raw[ptr+13]) & 0xff)) ^ T[hash>>23]
hash = ((hash << 8) | (uint32(raw[ptr+14]) & 0xff)) ^ T[hash>>23]
hash = ((hash << 8) | (uint32(raw[ptr+15]) & 0xff)) ^ T[hash>>23]
return int(hash)
}
var T = []uint32{0x00000000, 0xd4c6b32d, 0x7d4bd577,
0xa98d665a, 0x2e5119c3, 0xfa97aaee, 0x531accb4, 0x87dc7f99,
0x5ca23386, 0x886480ab, 0x21e9e6f1, 0xf52f55dc, 0x72f32a45,
0xa6359968, 0x0fb8ff32, 0xdb7e4c1f, 0x6d82d421, 0xb944670c,
0x10c90156, 0xc40fb27b, 0x43d3cde2, 0x97157ecf, 0x3e981895,
0xea5eabb8, 0x3120e7a7, 0xe5e6548a, 0x4c6b32d0, 0x98ad81fd,
0x1f71fe64, 0xcbb74d49, 0x623a2b13, 0xb6fc983e, 0x0fc31b6f,
0xdb05a842, 0x7288ce18, 0xa64e7d35, 0x219202ac, 0xf554b181,
0x5cd9d7db, 0x881f64f6, 0x536128e9, 0x87a79bc4, 0x2e2afd9e,
0xfaec4eb3, 0x7d30312a, 0xa9f68207, 0x007be45d, 0xd4bd5770,
0x6241cf4e, 0xb6877c63, 0x1f0a1a39, 0xcbcca914, 0x4c10d68d,
0x98d665a0, 0x315b03fa, 0xe59db0d7, 0x3ee3fcc8, 0xea254fe5,
0x43a829bf, 0x976e9a92, 0x10b2e50b, 0xc4745626, 0x6df9307c,
0xb93f8351, 0x1f8636de, 0xcb4085f3, 0x62cde3a9, 0xb60b5084,
0x31d72f1d, 0xe5119c30, 0x4c9cfa6a, 0x985a4947, 0x43240558,
0x97e2b675, 0x3e6fd02f, 0xeaa96302, 0x6d751c9b, 0xb9b3afb6,
0x103ec9ec, 0xc4f87ac1, 0x7204e2ff, 0xa6c251d2, 0x0f4f3788,
0xdb8984a5, 0x5c55fb3c, 0x88934811, 0x211e2e4b, 0xf5d89d66,
0x2ea6d179, 0xfa606254, 0x53ed040e, 0x872bb723, 0x00f7c8ba,
0xd4317b97, 0x7dbc1dcd, 0xa97aaee0, 0x10452db1, 0xc4839e9c,
0x6d0ef8c6, 0xb9c84beb, 0x3e143472, 0xead2875f, 0x435fe105,
0x97995228, 0x4ce71e37, 0x9821ad1a, 0x31accb40, 0xe56a786d,
0x62b607f4, 0xb670b4d9, 0x1ffdd283, 0xcb3b61ae, 0x7dc7f990,
0xa9014abd, 0x008c2ce7, 0xd44a9fca, 0x5396e053, 0x8750537e,
0x2edd3524, 0xfa1b8609, 0x2165ca16, 0xf5a3793b, 0x5c2e1f61,
0x88e8ac4c, 0x0f34d3d5, 0xdbf260f8, 0x727f06a2, 0xa6b9b58f,
0x3f0c6dbc, 0xebcade91, 0x4247b8cb, 0x96810be6, 0x115d747f,
0xc59bc752, 0x6c16a108, 0xb8d01225, 0x63ae5e3a, 0xb768ed17,
0x1ee58b4d, 0xca233860, 0x4dff47f9, 0x9939f4d4, 0x30b4928e,
0xe47221a3, 0x528eb99d, 0x86480ab0, 0x2fc56cea, 0xfb03dfc7,
0x7cdfa05e, 0xa8191373, 0x01947529, 0xd552c604, 0x0e2c8a1b,
0xdaea3936, 0x73675f6c, 0xa7a1ec41, 0x207d93d8, 0xf4bb20f5,
0x5d3646af, 0x89f0f582, 0x30cf76d3, 0xe409c5fe, 0x4d84a3a4,
0x99421089, 0x1e9e6f10, 0xca58dc3d, 0x63d5ba67, 0xb713094a,
0x6c6d4555, 0xb8abf678, 0x11269022, 0xc5e0230f, 0x423c5c96,
0x96faefbb, 0x3f7789e1, 0xebb13acc, 0x5d4da2f2, 0x898b11df,
0x20067785, 0xf4c0c4a8, 0x731cbb31, 0xa7da081c, 0x0e576e46,
0xda91dd6b, 0x01ef9174, 0xd5292259, 0x7ca44403, 0xa862f72e,
0x2fbe88b7, 0xfb783b9a, 0x52f55dc0, 0x8633eeed, 0x208a5b62,
0xf44ce84f, 0x5dc18e15, 0x89073d38, 0x0edb42a1, 0xda1df18c,
0x739097d6, 0xa75624fb, 0x7c2868e4, 0xa8eedbc9, 0x0163bd93,
0xd5a50ebe, 0x52797127, 0x86bfc20a, 0x2f32a450, 0xfbf4177d,
0x4d088f43, 0x99ce3c6e, 0x30435a34, 0xe485e919, 0x63599680,
0xb79f25ad, 0x1e1243f7, 0xcad4f0da, 0x11aabcc5, 0xc56c0fe8,
0x6ce169b2, 0xb827da9f, 0x3ffba506, 0xeb3d162b, 0x42b07071,
0x9676c35c, 0x2f49400d, 0xfb8ff320, 0x5202957a, 0x86c42657,
0x011859ce, 0xd5deeae3, 0x7c538cb9, 0xa8953f94, 0x73eb738b,
0xa72dc0a6, 0x0ea0a6fc, 0xda6615d1, 0x5dba6a48, 0x897cd965,
0x20f1bf3f, 0xf4370c12, 0x42cb942c, 0x960d2701, 0x3f80415b,
0xeb46f276, 0x6c9a8def, 0xb85c3ec2, 0x11d15898, 0xc517ebb5,
0x1e69a7aa, 0xcaaf1487, 0x632272dd, 0xb7e4c1f0, 0x3038be69,
0xe4fe0d44, 0x4d736b1e, 0x99b5d833,
}

View File

@ -0,0 +1,369 @@
package packfile
import (
"sort"
"sync"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
const (
// deltas based on deltas, how many steps we can do.
// 50 is the default value used in JGit
maxDepth = int64(50)
)
// applyDelta is the set of object types that we should apply deltas
var applyDelta = map[plumbing.ObjectType]bool{
plumbing.BlobObject: true,
plumbing.TreeObject: true,
}
type deltaSelector struct {
storer storer.EncodedObjectStorer
}
func newDeltaSelector(s storer.EncodedObjectStorer) *deltaSelector {
return &deltaSelector{s}
}
// ObjectsToPack creates a list of ObjectToPack from the hashes
// provided, creating deltas if it's suitable, using an specific
// internal logic. `packWindow` specifies the size of the sliding
// window used to compare objects for delta compression; 0 turns off
// delta compression entirely.
func (dw *deltaSelector) ObjectsToPack(
hashes []plumbing.Hash,
packWindow uint,
) ([]*ObjectToPack, error) {
otp, err := dw.objectsToPack(hashes, packWindow)
if err != nil {
return nil, err
}
if packWindow == 0 {
return otp, nil
}
dw.sort(otp)
var objectGroups [][]*ObjectToPack
var prev *ObjectToPack
i := -1
for _, obj := range otp {
if prev == nil || prev.Type() != obj.Type() {
objectGroups = append(objectGroups, []*ObjectToPack{obj})
i++
prev = obj
} else {
objectGroups[i] = append(objectGroups[i], obj)
}
}
var wg sync.WaitGroup
var once sync.Once
for _, objs := range objectGroups {
objs := objs
wg.Add(1)
go func() {
if walkErr := dw.walk(objs, packWindow); walkErr != nil {
once.Do(func() {
err = walkErr
})
}
wg.Done()
}()
}
wg.Wait()
if err != nil {
return nil, err
}
return otp, nil
}
func (dw *deltaSelector) objectsToPack(
hashes []plumbing.Hash,
packWindow uint,
) ([]*ObjectToPack, error) {
var objectsToPack []*ObjectToPack
for _, h := range hashes {
var o plumbing.EncodedObject
var err error
if packWindow == 0 {
o, err = dw.encodedObject(h)
} else {
o, err = dw.encodedDeltaObject(h)
}
if err != nil {
return nil, err
}
otp := newObjectToPack(o)
if _, ok := o.(plumbing.DeltaObject); ok {
otp.CleanOriginal()
}
objectsToPack = append(objectsToPack, otp)
}
if packWindow == 0 {
return objectsToPack, nil
}
if err := dw.fixAndBreakChains(objectsToPack); err != nil {
return nil, err
}
return objectsToPack, nil
}
func (dw *deltaSelector) encodedDeltaObject(h plumbing.Hash) (plumbing.EncodedObject, error) {
edos, ok := dw.storer.(storer.DeltaObjectStorer)
if !ok {
return dw.encodedObject(h)
}
return edos.DeltaObject(plumbing.AnyObject, h)
}
func (dw *deltaSelector) encodedObject(h plumbing.Hash) (plumbing.EncodedObject, error) {
return dw.storer.EncodedObject(plumbing.AnyObject, h)
}
func (dw *deltaSelector) fixAndBreakChains(objectsToPack []*ObjectToPack) error {
m := make(map[plumbing.Hash]*ObjectToPack, len(objectsToPack))
for _, otp := range objectsToPack {
m[otp.Hash()] = otp
}
for _, otp := range objectsToPack {
if err := dw.fixAndBreakChainsOne(m, otp); err != nil {
return err
}
}
return nil
}
func (dw *deltaSelector) fixAndBreakChainsOne(objectsToPack map[plumbing.Hash]*ObjectToPack, otp *ObjectToPack) error {
if !otp.Object.Type().IsDelta() {
return nil
}
// Initial ObjectToPack instances might have a delta assigned to Object
// but no actual base initially. Once Base is assigned to a delta, it means
// we already fixed it.
if otp.Base != nil {
return nil
}
do, ok := otp.Object.(plumbing.DeltaObject)
if !ok {
// if this is not a DeltaObject, then we cannot retrieve its base,
// so we have to break the delta chain here.
return dw.undeltify(otp)
}
base, ok := objectsToPack[do.BaseHash()]
if !ok {
// The base of the delta is not in our list of objects to pack, so
// we break the chain.
return dw.undeltify(otp)
}
if err := dw.fixAndBreakChainsOne(objectsToPack, base); err != nil {
return err
}
otp.SetDelta(base, otp.Object)
return nil
}
func (dw *deltaSelector) restoreOriginal(otp *ObjectToPack) error {
if otp.Original != nil {
return nil
}
if !otp.Object.Type().IsDelta() {
return nil
}
obj, err := dw.encodedObject(otp.Hash())
if err != nil {
return err
}
otp.SetOriginal(obj)
return nil
}
// undeltify undeltifies an *ObjectToPack by retrieving the original object from
// the storer and resetting it.
func (dw *deltaSelector) undeltify(otp *ObjectToPack) error {
if err := dw.restoreOriginal(otp); err != nil {
return err
}
otp.Object = otp.Original
otp.Depth = 0
return nil
}
func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) {
sort.Sort(byTypeAndSize(objectsToPack))
}
func (dw *deltaSelector) walk(
objectsToPack []*ObjectToPack,
packWindow uint,
) error {
indexMap := make(map[plumbing.Hash]*deltaIndex)
for i := 0; i < len(objectsToPack); i++ {
// Clean up the index map and reconstructed delta objects for anything
// outside our pack window, to save memory.
if i > int(packWindow) {
obj := objectsToPack[i-int(packWindow)]
delete(indexMap, obj.Hash())
if obj.IsDelta() {
obj.SaveOriginalMetadata()
obj.CleanOriginal()
}
}
target := objectsToPack[i]
// If we already have a delta, we don't try to find a new one for this
// object. This happens when a delta is set to be reused from an existing
// packfile.
if target.IsDelta() {
continue
}
// We only want to create deltas from specific types.
if !applyDelta[target.Type()] {
continue
}
for j := i - 1; j >= 0 && i-j < int(packWindow); j-- {
base := objectsToPack[j]
// Objects must use only the same type as their delta base.
// Since objectsToPack is sorted by type and size, once we find
// a different type, we know we won't find more of them.
if base.Type() != target.Type() {
break
}
if err := dw.tryToDeltify(indexMap, base, target); err != nil {
return err
}
}
}
return nil
}
func (dw *deltaSelector) tryToDeltify(indexMap map[plumbing.Hash]*deltaIndex, base, target *ObjectToPack) error {
// Original object might not be present if we're reusing a delta, so we
// ensure it is restored.
if err := dw.restoreOriginal(target); err != nil {
return err
}
if err := dw.restoreOriginal(base); err != nil {
return err
}
// If the sizes are radically different, this is a bad pairing.
if target.Size() < base.Size()>>4 {
return nil
}
msz := dw.deltaSizeLimit(
target.Object.Size(),
base.Depth,
target.Depth,
target.IsDelta(),
)
// Nearly impossible to fit useful delta.
if msz <= 8 {
return nil
}
// If we have to insert a lot to make this work, find another.
if base.Size()-target.Size() > msz {
return nil
}
if _, ok := indexMap[base.Hash()]; !ok {
indexMap[base.Hash()] = new(deltaIndex)
}
// Now we can generate the delta using originals
delta, err := getDelta(indexMap[base.Hash()], base.Original, target.Original)
if err != nil {
return err
}
// if delta better than target
if delta.Size() < msz {
target.SetDelta(base, delta)
}
return nil
}
func (dw *deltaSelector) deltaSizeLimit(targetSize int64, baseDepth int,
targetDepth int, targetDelta bool) int64 {
if !targetDelta {
// Any delta should be no more than 50% of the original size
// (for text files deflate of whole form should shrink 50%).
n := targetSize >> 1
// Evenly distribute delta size limits over allowed depth.
// If src is non-delta (depth = 0), delta <= 50% of original.
// If src is almost at limit (9/10), delta <= 10% of original.
return n * (maxDepth - int64(baseDepth)) / maxDepth
}
// With a delta base chosen any new delta must be "better".
// Retain the distribution described above.
d := int64(targetDepth)
n := targetSize
// If target depth is bigger than maxDepth, this delta is not suitable to be used.
if d >= maxDepth {
return 0
}
// If src is whole (depth=0) and base is near limit (depth=9/10)
// any delta using src can be 10x larger and still be better.
//
// If src is near limit (depth=9/10) and base is whole (depth=0)
// a new delta dependent on src must be 1/10th the size.
return n * (maxDepth - int64(baseDepth)) / (maxDepth - d)
}
type byTypeAndSize []*ObjectToPack
func (a byTypeAndSize) Len() int { return len(a) }
func (a byTypeAndSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byTypeAndSize) Less(i, j int) bool {
if a[i].Type() < a[j].Type() {
return false
}
if a[i].Type() > a[j].Type() {
return true
}
return a[i].Size() > a[j].Size()
}

View File

@ -0,0 +1,201 @@
package packfile
import (
"bytes"
"gopkg.in/src-d/go-git.v4/plumbing"
)
// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and
// https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
// for more info
const (
// Standard chunk size used to generate fingerprints
s = 16
// https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428
// Max size of a copy operation (64KB)
maxCopySize = 64 * 1024
)
// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object,
// will be loaded into memory to be able to create the delta object.
// To generate target again, you will need the obtained object and "base" one.
// Error will be returned if base or target object cannot be read.
func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) {
return getDelta(new(deltaIndex), base, target)
}
func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) {
br, err := base.Reader()
if err != nil {
return nil, err
}
defer br.Close()
tr, err := target.Reader()
if err != nil {
return nil, err
}
defer tr.Close()
bb := bufPool.Get().(*bytes.Buffer)
bb.Reset()
defer bufPool.Put(bb)
_, err = bb.ReadFrom(br)
if err != nil {
return nil, err
}
tb := bufPool.Get().(*bytes.Buffer)
tb.Reset()
defer bufPool.Put(tb)
_, err = tb.ReadFrom(tr)
if err != nil {
return nil, err
}
db := diffDelta(index, bb.Bytes(), tb.Bytes())
delta := &plumbing.MemoryObject{}
_, err = delta.Write(db)
if err != nil {
return nil, err
}
delta.SetSize(int64(len(db)))
delta.SetType(plumbing.OFSDeltaObject)
return delta, nil
}
// DiffDelta returns the delta that transforms src into tgt.
func DiffDelta(src, tgt []byte) []byte {
return diffDelta(new(deltaIndex), src, tgt)
}
func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
buf.Write(deltaEncodeSize(len(src)))
buf.Write(deltaEncodeSize(len(tgt)))
if len(index.entries) == 0 {
index.init(src)
}
ibuf := bufPool.Get().(*bytes.Buffer)
ibuf.Reset()
for i := 0; i < len(tgt); i++ {
offset, l := index.findMatch(src, tgt, i)
if l == 0 {
// couldn't find a match, just write the current byte and continue
ibuf.WriteByte(tgt[i])
} else if l < 0 {
// src is less than blksz, copy the rest of the target to avoid
// calls to findMatch
for ; i < len(tgt); i++ {
ibuf.WriteByte(tgt[i])
}
} else if l < s {
// remaining target is less than blksz, copy what's left of it
// and avoid calls to findMatch
for j := i; j < i+l; j++ {
ibuf.WriteByte(tgt[j])
}
i += l - 1
} else {
encodeInsertOperation(ibuf, buf)
rl := l
aOffset := offset
for rl > 0 {
if rl < maxCopySize {
buf.Write(encodeCopyOperation(aOffset, rl))
break
}
buf.Write(encodeCopyOperation(aOffset, maxCopySize))
rl -= maxCopySize
aOffset += maxCopySize
}
i += l - 1
}
}
encodeInsertOperation(ibuf, buf)
bytes := buf.Bytes()
bufPool.Put(buf)
bufPool.Put(ibuf)
return bytes
}
func encodeInsertOperation(ibuf, buf *bytes.Buffer) {
if ibuf.Len() == 0 {
return
}
b := ibuf.Bytes()
s := ibuf.Len()
o := 0
for {
if s <= 127 {
break
}
buf.WriteByte(byte(127))
buf.Write(b[o : o+127])
s -= 127
o += 127
}
buf.WriteByte(byte(s))
buf.Write(b[o : o+s])
ibuf.Reset()
}
func deltaEncodeSize(size int) []byte {
var ret []byte
c := size & 0x7f
size >>= 7
for {
if size == 0 {
break
}
ret = append(ret, byte(c|0x80))
c = size & 0x7f
size >>= 7
}
ret = append(ret, byte(c))
return ret
}
func encodeCopyOperation(offset, length int) []byte {
code := 0x80
var opcodes []byte
var i uint
for i = 0; i < 4; i++ {
f := 0xff << (i * 8)
if offset&f != 0 {
opcodes = append(opcodes, byte(offset&f>>(i*8)))
code |= 0x01 << i
}
}
for i = 0; i < 3; i++ {
f := 0xff << (i * 8)
if length&f != 0 {
opcodes = append(opcodes, byte(length&f>>(i*8)))
code |= 0x10 << i
}
}
return append([]byte{byte(code)}, opcodes...)
}

View File

@ -0,0 +1,39 @@
// Package packfile implements encoding and decoding of packfile format.
//
// == pack-*.pack files have the following format:
//
// - A header appears at the beginning and consists of the following:
//
// 4-byte signature:
// The signature is: {'P', 'A', 'C', 'K'}
//
// 4-byte version number (network byte order):
// GIT currently accepts version number 2 or 3 but
// generates version 2 only.
//
// 4-byte number of objects contained in the pack (network byte order)
//
// Observation: we cannot have more than 4G versions ;-) and
// more than 4G objects in a pack.
//
// - The header is followed by number of object entries, each of
// which looks like this:
//
// (undeltified representation)
// n-byte type and length (3-bit type, (n-1)*7+4-bit length)
// compressed data
//
// (deltified representation)
// n-byte type and length (3-bit type, (n-1)*7+4-bit length)
// 20-byte base object name
// compressed delta data
//
// Observation: length of each object is encoded in a variable
// length format and is not constrained to 32-bit or anything.
//
// - The trailer records 20-byte SHA1 checksum of all of the above.
//
//
// Source:
// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt
package packfile

View File

@ -0,0 +1,219 @@
package packfile
import (
"compress/zlib"
"crypto/sha1"
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/binary"
)
// Encoder gets the data from the storage and write it into the writer in PACK
// format
type Encoder struct {
selector *deltaSelector
w *offsetWriter
zw *zlib.Writer
hasher plumbing.Hasher
useRefDeltas bool
}
// NewEncoder creates a new packfile encoder using a specific Writer and
// EncodedObjectStorer. By default deltas used to generate the packfile will be
// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true.
func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder {
h := plumbing.Hasher{
Hash: sha1.New(),
}
mw := io.MultiWriter(w, h)
ow := newOffsetWriter(mw)
zw := zlib.NewWriter(mw)
return &Encoder{
selector: newDeltaSelector(s),
w: ow,
zw: zw,
hasher: h,
useRefDeltas: useRefDeltas,
}
}
// Encode creates a packfile containing all the objects referenced in
// hashes and writes it to the writer in the Encoder. `packWindow`
// specifies the size of the sliding window used to compare objects
// for delta compression; 0 turns off delta compression entirely.
func (e *Encoder) Encode(
hashes []plumbing.Hash,
packWindow uint,
) (plumbing.Hash, error) {
objects, err := e.selector.ObjectsToPack(hashes, packWindow)
if err != nil {
return plumbing.ZeroHash, err
}
return e.encode(objects)
}
func (e *Encoder) encode(objects []*ObjectToPack) (plumbing.Hash, error) {
if err := e.head(len(objects)); err != nil {
return plumbing.ZeroHash, err
}
for _, o := range objects {
if err := e.entry(o); err != nil {
return plumbing.ZeroHash, err
}
}
return e.footer()
}
func (e *Encoder) head(numEntries int) error {
return binary.Write(
e.w,
signature,
int32(VersionSupported),
int32(numEntries),
)
}
func (e *Encoder) entry(o *ObjectToPack) error {
if o.WantWrite() {
// A cycle exists in this delta chain. This should only occur if a
// selected object representation disappeared during writing
// (for example due to a concurrent repack) and a different base
// was chosen, forcing a cycle. Select something other than a
// delta, and write this object.
e.selector.restoreOriginal(o)
o.BackToOriginal()
}
if o.IsWritten() {
return nil
}
o.MarkWantWrite()
if err := e.writeBaseIfDelta(o); err != nil {
return err
}
// We need to check if we already write that object due a cyclic delta chain
if o.IsWritten() {
return nil
}
o.Offset = e.w.Offset()
if o.IsDelta() {
if err := e.writeDeltaHeader(o); err != nil {
return err
}
} else {
if err := e.entryHead(o.Type(), o.Size()); err != nil {
return err
}
}
e.zw.Reset(e.w)
or, err := o.Object.Reader()
if err != nil {
return err
}
_, err = io.Copy(e.zw, or)
if err != nil {
return err
}
return e.zw.Close()
}
func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error {
if o.IsDelta() && !o.Base.IsWritten() {
// We must write base first
return e.entry(o.Base)
}
return nil
}
func (e *Encoder) writeDeltaHeader(o *ObjectToPack) error {
// Write offset deltas by default
t := plumbing.OFSDeltaObject
if e.useRefDeltas {
t = plumbing.REFDeltaObject
}
if err := e.entryHead(t, o.Object.Size()); err != nil {
return err
}
if e.useRefDeltas {
return e.writeRefDeltaHeader(o.Base.Hash())
} else {
return e.writeOfsDeltaHeader(o)
}
}
func (e *Encoder) writeRefDeltaHeader(base plumbing.Hash) error {
return binary.Write(e.w, base)
}
func (e *Encoder) writeOfsDeltaHeader(o *ObjectToPack) error {
// for OFS_DELTA, offset of the base is interpreted as negative offset
// relative to the type-byte of the header of the ofs-delta entry.
relativeOffset := o.Offset - o.Base.Offset
if relativeOffset <= 0 {
return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset)
}
return binary.WriteVariableWidthInt(e.w, relativeOffset)
}
func (e *Encoder) entryHead(typeNum plumbing.ObjectType, size int64) error {
t := int64(typeNum)
header := []byte{}
c := (t << firstLengthBits) | (size & maskFirstLength)
size >>= firstLengthBits
for {
if size == 0 {
break
}
header = append(header, byte(c|maskContinue))
c = size & int64(maskLength)
size >>= lengthBits
}
header = append(header, byte(c))
_, err := e.w.Write(header)
return err
}
func (e *Encoder) footer() (plumbing.Hash, error) {
h := e.hasher.Sum()
return h, binary.Write(e.w, h)
}
type offsetWriter struct {
w io.Writer
offset int64
}
func newOffsetWriter(w io.Writer) *offsetWriter {
return &offsetWriter{w: w}
}
func (ow *offsetWriter) Write(p []byte) (n int, err error) {
n, err = ow.w.Write(p)
ow.offset += int64(n)
return n, err
}
func (ow *offsetWriter) Offset() int64 {
return ow.offset
}

View File

@ -0,0 +1,30 @@
package packfile
import "fmt"
// Error specifies errors returned during packfile parsing.
type Error struct {
reason, details string
}
// NewError returns a new error.
func NewError(reason string) *Error {
return &Error{reason: reason}
}
// Error returns a text representation of the error.
func (e *Error) Error() string {
if e.details == "" {
return e.reason
}
return fmt.Sprintf("%s: %s", e.reason, e.details)
}
// AddDetails adds details to an error, with additional text.
func (e *Error) AddDetails(format string, args ...interface{}) *Error {
return &Error{
reason: e.reason,
details: fmt.Sprintf(format, args...),
}
}

View File

@ -0,0 +1,116 @@
package packfile
import (
"io"
billy "gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
)
// FSObject is an object from the packfile on the filesystem.
type FSObject struct {
hash plumbing.Hash
h *ObjectHeader
offset int64
size int64
typ plumbing.ObjectType
index idxfile.Index
fs billy.Filesystem
path string
cache cache.Object
}
// NewFSObject creates a new filesystem object.
func NewFSObject(
hash plumbing.Hash,
finalType plumbing.ObjectType,
offset int64,
contentSize int64,
index idxfile.Index,
fs billy.Filesystem,
path string,
cache cache.Object,
) *FSObject {
return &FSObject{
hash: hash,
offset: offset,
size: contentSize,
typ: finalType,
index: index,
fs: fs,
path: path,
cache: cache,
}
}
// Reader implements the plumbing.EncodedObject interface.
func (o *FSObject) Reader() (io.ReadCloser, error) {
obj, ok := o.cache.Get(o.hash)
if ok && obj != o {
reader, err := obj.Reader()
if err != nil {
return nil, err
}
return reader, nil
}
f, err := o.fs.Open(o.path)
if err != nil {
return nil, err
}
p := NewPackfileWithCache(o.index, nil, f, o.cache)
r, err := p.getObjectContent(o.offset)
if err != nil {
_ = f.Close()
return nil, err
}
if err := f.Close(); err != nil {
return nil, err
}
return r, nil
}
// SetSize implements the plumbing.EncodedObject interface. This method
// is a noop.
func (o *FSObject) SetSize(int64) {}
// SetType implements the plumbing.EncodedObject interface. This method is
// a noop.
func (o *FSObject) SetType(plumbing.ObjectType) {}
// Hash implements the plumbing.EncodedObject interface.
func (o *FSObject) Hash() plumbing.Hash { return o.hash }
// Size implements the plumbing.EncodedObject interface.
func (o *FSObject) Size() int64 { return o.size }
// Type implements the plumbing.EncodedObject interface.
func (o *FSObject) Type() plumbing.ObjectType {
return o.typ
}
// Writer implements the plumbing.EncodedObject interface. This method always
// returns a nil writer.
func (o *FSObject) Writer() (io.WriteCloser, error) {
return nil, nil
}
type objectReader struct {
io.ReadCloser
f billy.File
}
func (r *objectReader) Close() error {
if err := r.ReadCloser.Close(); err != nil {
_ = r.f.Close()
return err
}
return r.f.Close()
}

View File

@ -0,0 +1,164 @@
package packfile
import (
"gopkg.in/src-d/go-git.v4/plumbing"
)
// ObjectToPack is a representation of an object that is going to be into a
// pack file.
type ObjectToPack struct {
// The main object to pack, it could be any object, including deltas
Object plumbing.EncodedObject
// Base is the object that a delta is based on (it could be also another delta).
// If the main object is not a delta, Base will be null
Base *ObjectToPack
// Original is the object that we can generate applying the delta to
// Base, or the same object as Object in the case of a non-delta
// object.
Original plumbing.EncodedObject
// Depth is the amount of deltas needed to resolve to obtain Original
// (delta based on delta based on ...)
Depth int
// offset in pack when object has been already written, or 0 if it
// has not been written yet
Offset int64
// Information from the original object
resolvedOriginal bool
originalType plumbing.ObjectType
originalSize int64
originalHash plumbing.Hash
}
// newObjectToPack creates a correct ObjectToPack based on a non-delta object
func newObjectToPack(o plumbing.EncodedObject) *ObjectToPack {
return &ObjectToPack{
Object: o,
Original: o,
}
}
// newDeltaObjectToPack creates a correct ObjectToPack for a delta object, based on
// his base (could be another delta), the delta target (in this case called original),
// and the delta Object itself
func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedObject) *ObjectToPack {
return &ObjectToPack{
Object: delta,
Base: base,
Original: original,
Depth: base.Depth + 1,
}
}
// BackToOriginal converts that ObjectToPack to a non-deltified object if it was one
func (o *ObjectToPack) BackToOriginal() {
if o.IsDelta() && o.Original != nil {
o.Object = o.Original
o.Base = nil
o.Depth = 0
}
}
// IsWritten returns if that ObjectToPack was
// already written into the packfile or not
func (o *ObjectToPack) IsWritten() bool {
return o.Offset > 1
}
// MarkWantWrite marks this ObjectToPack as WantWrite
// to avoid delta chain loops
func (o *ObjectToPack) MarkWantWrite() {
o.Offset = 1
}
// WantWrite checks if this ObjectToPack was marked as WantWrite before
func (o *ObjectToPack) WantWrite() bool {
return o.Offset == 1
}
// SetOriginal sets both Original and saves size, type and hash. If object
// is nil Original is set but previous resolved values are kept
func (o *ObjectToPack) SetOriginal(obj plumbing.EncodedObject) {
o.Original = obj
o.SaveOriginalMetadata()
}
// SaveOriginalMetadata saves size, type and hash of Original object
func (o *ObjectToPack) SaveOriginalMetadata() {
if o.Original != nil {
o.originalSize = o.Original.Size()
o.originalType = o.Original.Type()
o.originalHash = o.Original.Hash()
o.resolvedOriginal = true
}
}
// CleanOriginal sets Original to nil
func (o *ObjectToPack) CleanOriginal() {
o.Original = nil
}
func (o *ObjectToPack) Type() plumbing.ObjectType {
if o.Original != nil {
return o.Original.Type()
}
if o.resolvedOriginal {
return o.originalType
}
if o.Base != nil {
return o.Base.Type()
}
if o.Object != nil {
return o.Object.Type()
}
panic("cannot get type")
}
func (o *ObjectToPack) Hash() plumbing.Hash {
if o.Original != nil {
return o.Original.Hash()
}
if o.resolvedOriginal {
return o.originalHash
}
do, ok := o.Object.(plumbing.DeltaObject)
if ok {
return do.ActualHash()
}
panic("cannot get hash")
}
func (o *ObjectToPack) Size() int64 {
if o.Original != nil {
return o.Original.Size()
}
if o.resolvedOriginal {
return o.originalSize
}
do, ok := o.Object.(plumbing.DeltaObject)
if ok {
return do.ActualSize()
}
panic("cannot get ObjectToPack size")
}
func (o *ObjectToPack) IsDelta() bool {
return o.Base != nil
}
func (o *ObjectToPack) SetDelta(base *ObjectToPack, delta plumbing.EncodedObject) {
o.Object = delta
o.Base = base
o.Depth = base.Depth + 1
}

View File

@ -0,0 +1,487 @@
package packfile
import (
"bytes"
"io"
"os"
billy "gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
var (
// ErrInvalidObject is returned by Decode when an invalid object is
// found in the packfile.
ErrInvalidObject = NewError("invalid git object")
// ErrZLib is returned by Decode when there was an error unzipping
// the packfile contents.
ErrZLib = NewError("zlib reading error")
)
// When reading small objects from packfile it is beneficial to do so at
// once to exploit the buffered I/O. In many cases the objects are so small
// that they were already loaded to memory when the object header was
// loaded from the packfile. Wrapping in FSObject would cause this buffered
// data to be thrown away and then re-read later, with the additional
// seeking causing reloads from disk. Objects smaller than this threshold
// are now always read into memory and stored in cache instead of being
// wrapped in FSObject.
const smallObjectThreshold = 16 * 1024
// Packfile allows retrieving information from inside a packfile.
type Packfile struct {
idxfile.Index
fs billy.Filesystem
file billy.File
s *Scanner
deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType
}
// NewPackfileWithCache creates a new Packfile with the given object cache.
// If the filesystem is provided, the packfile will return FSObjects, otherwise
// it will return MemoryObjects.
func NewPackfileWithCache(
index idxfile.Index,
fs billy.Filesystem,
file billy.File,
cache cache.Object,
) *Packfile {
s := NewScanner(file)
return &Packfile{
index,
fs,
file,
s,
cache,
make(map[int64]plumbing.ObjectType),
}
}
// NewPackfile returns a packfile representation for the given packfile file
// and packfile idx.
// If the filesystem is provided, the packfile will return FSObjects, otherwise
// it will return MemoryObjects.
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile {
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault())
}
// Get retrieves the encoded object in the packfile with the given hash.
func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) {
offset, err := p.FindOffset(h)
if err != nil {
return nil, err
}
return p.GetByOffset(offset)
}
// GetByOffset retrieves the encoded object from the packfile with the given
// offset.
func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
hash, err := p.FindHash(o)
if err == nil {
if obj, ok := p.deltaBaseCache.Get(hash); ok {
return obj, nil
}
}
return p.objectAtOffset(o)
}
// GetSizeByOffset retrieves the size of the encoded object from the
// packfile with the given offset.
func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) {
if _, err := p.s.SeekFromStart(o); err != nil {
if err == io.EOF || isInvalid(err) {
return 0, plumbing.ErrObjectNotFound
}
return 0, err
}
h, err := p.nextObjectHeader()
if err != nil {
return 0, err
}
return p.getObjectSize(h)
}
func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) {
h, err := p.s.SeekObjectHeader(offset)
p.s.pendingObject = nil
return h, err
}
func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
h, err := p.s.NextObjectHeader()
p.s.pendingObject = nil
return h, err
}
func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
switch h.Type {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return h.Length, nil
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
if _, _, err := p.s.NextObject(buf); err != nil {
return 0, err
}
delta := buf.Bytes()
_, delta = decodeLEB128(delta) // skip src size
sz, _ := decodeLEB128(delta)
return int64(sz), nil
default:
return 0, ErrInvalidObject.AddDetails("type %q", h.Type)
}
}
func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err error) {
switch h.Type {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return h.Type, nil
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
var offset int64
if h.Type == plumbing.REFDeltaObject {
offset, err = p.FindOffset(h.Reference)
if err != nil {
return
}
} else {
offset = h.OffsetReference
}
if baseType, ok := p.offsetToType[offset]; ok {
typ = baseType
} else {
h, err = p.objectHeaderAtOffset(offset)
if err != nil {
return
}
typ, err = p.getObjectType(h)
if err != nil {
return
}
}
default:
err = ErrInvalidObject.AddDetails("type %q", h.Type)
}
return
}
func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) {
h, err := p.objectHeaderAtOffset(offset)
if err != nil {
if err == io.EOF || isInvalid(err) {
return nil, plumbing.ErrObjectNotFound
}
return nil, err
}
// If we have no filesystem, we will return a MemoryObject instead
// of an FSObject.
if p.fs == nil {
return p.getNextObject(h)
}
// If the object is not a delta and it's small enough then read it
// completely into memory now since it is already read from disk
// into buffer anyway.
if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
return p.getNextObject(h)
}
hash, err := p.FindHash(h.Offset)
if err != nil {
return nil, err
}
size, err := p.getObjectSize(h)
if err != nil {
return nil, err
}
typ, err := p.getObjectType(h)
if err != nil {
return nil, err
}
p.offsetToType[h.Offset] = typ
return NewFSObject(
hash,
typ,
h.Offset,
size,
p.Index,
p.fs,
p.file.Name(),
p.deltaBaseCache,
), nil
}
func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
ref, err := p.FindHash(offset)
if err == nil {
obj, ok := p.cacheGet(ref)
if ok {
reader, err := obj.Reader()
if err != nil {
return nil, err
}
return reader, nil
}
}
h, err := p.objectHeaderAtOffset(offset)
if err != nil {
return nil, err
}
obj, err := p.getNextObject(h)
if err != nil {
return nil, err
}
return obj.Reader()
}
func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
var obj = new(plumbing.MemoryObject)
obj.SetSize(h.Length)
obj.SetType(h.Type)
var err error
switch h.Type {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
err = p.fillRegularObjectContent(obj)
case plumbing.REFDeltaObject:
err = p.fillREFDeltaObjectContent(obj, h.Reference)
case plumbing.OFSDeltaObject:
err = p.fillOFSDeltaObjectContent(obj, h.OffsetReference)
default:
err = ErrInvalidObject.AddDetails("type %q", h.Type)
}
if err != nil {
return nil, err
}
return obj, nil
}
func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error {
w, err := obj.Writer()
if err != nil {
return err
}
_, _, err = p.s.NextObject(w)
p.cachePut(obj)
return err
}
func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
}
base, ok := p.cacheGet(ref)
if !ok {
base, err = p.Get(ref)
if err != nil {
return err
}
}
obj.SetType(base.Type())
err = ApplyDelta(obj, base, buf.Bytes())
p.cachePut(obj)
bufPool.Put(buf)
return err
}
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
buf := bytes.NewBuffer(nil)
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
}
var base plumbing.EncodedObject
var ok bool
hash, err := p.FindHash(offset)
if err == nil {
base, ok = p.cacheGet(hash)
}
if !ok {
base, err = p.GetByOffset(offset)
if err != nil {
return err
}
}
obj.SetType(base.Type())
err = ApplyDelta(obj, base, buf.Bytes())
p.cachePut(obj)
return err
}
func (p *Packfile) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) {
if p.deltaBaseCache == nil {
return nil, false
}
return p.deltaBaseCache.Get(h)
}
func (p *Packfile) cachePut(obj plumbing.EncodedObject) {
if p.deltaBaseCache == nil {
return
}
p.deltaBaseCache.Put(obj)
}
// GetAll returns an iterator with all encoded objects in the packfile.
// The iterator returned is not thread-safe, it should be used in the same
// thread as the Packfile instance.
func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) {
return p.GetByType(plumbing.AnyObject)
}
// GetByType returns all the objects of the given type.
func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) {
switch typ {
case plumbing.AnyObject,
plumbing.BlobObject,
plumbing.TreeObject,
plumbing.CommitObject,
plumbing.TagObject:
entries, err := p.EntriesByOffset()
if err != nil {
return nil, err
}
return &objectIter{
// Easiest way to provide an object decoder is just to pass a Packfile
// instance. To not mess with the seeks, it's a new instance with a
// different scanner but the same cache and offset to hash map for
// reusing as much cache as possible.
p: p,
iter: entries,
typ: typ,
}, nil
default:
return nil, plumbing.ErrInvalidType
}
}
// ID returns the ID of the packfile, which is the checksum at the end of it.
func (p *Packfile) ID() (plumbing.Hash, error) {
prev, err := p.file.Seek(-20, io.SeekEnd)
if err != nil {
return plumbing.ZeroHash, err
}
var hash plumbing.Hash
if _, err := io.ReadFull(p.file, hash[:]); err != nil {
return plumbing.ZeroHash, err
}
if _, err := p.file.Seek(prev, io.SeekStart); err != nil {
return plumbing.ZeroHash, err
}
return hash, nil
}
// Close the packfile and its resources.
func (p *Packfile) Close() error {
closer, ok := p.file.(io.Closer)
if !ok {
return nil
}
return closer.Close()
}
type objectIter struct {
p *Packfile
typ plumbing.ObjectType
iter idxfile.EntryIter
}
func (i *objectIter) Next() (plumbing.EncodedObject, error) {
for {
e, err := i.iter.Next()
if err != nil {
return nil, err
}
obj, err := i.p.GetByOffset(int64(e.Offset))
if err != nil {
return nil, err
}
if i.typ == plumbing.AnyObject || obj.Type() == i.typ {
return obj, nil
}
}
}
func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error {
for {
o, err := i.Next()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
if err := f(o); err != nil {
return err
}
}
}
func (i *objectIter) Close() {
i.iter.Close()
}
// isInvalid checks whether an error is an os.PathError with an os.ErrInvalid
// error inside. It also checks for the windows error, which is different from
// os.ErrInvalid.
func isInvalid(err error) bool {
pe, ok := err.(*os.PathError)
if !ok {
return false
}
errstr := pe.Err.Error()
return errstr == errInvalidUnix || errstr == errInvalidWindows
}
// errInvalidWindows is the Windows equivalent to os.ErrInvalid
const errInvalidWindows = "The parameter is incorrect."
var errInvalidUnix = os.ErrInvalid.Error()

View File

@ -0,0 +1,483 @@
package packfile
import (
"bytes"
"errors"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
var (
// ErrReferenceDeltaNotFound is returned when the reference delta is not
// found.
ErrReferenceDeltaNotFound = errors.New("reference delta not found")
// ErrNotSeekableSource is returned when the source for the parser is not
// seekable and a storage was not provided, so it can't be parsed.
ErrNotSeekableSource = errors.New("parser source is not seekable and storage was not provided")
// ErrDeltaNotCached is returned when the delta could not be found in cache.
ErrDeltaNotCached = errors.New("delta could not be found in cache")
)
// Observer interface is implemented by index encoders.
type Observer interface {
// OnHeader is called when a new packfile is opened.
OnHeader(count uint32) error
// OnInflatedObjectHeader is called for each object header read.
OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error
// OnInflatedObjectContent is called for each decoded object.
OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error
// OnFooter is called when decoding is done.
OnFooter(h plumbing.Hash) error
}
// Parser decodes a packfile and calls any observer associated to it. Is used
// to generate indexes.
type Parser struct {
storage storer.EncodedObjectStorer
scanner *Scanner
count uint32
oi []*objectInfo
oiByHash map[plumbing.Hash]*objectInfo
oiByOffset map[int64]*objectInfo
hashOffset map[plumbing.Hash]int64
checksum plumbing.Hash
cache *cache.BufferLRU
// delta content by offset, only used if source is not seekable
deltas map[int64][]byte
ob []Observer
}
// NewParser creates a new Parser. The Scanner source must be seekable.
// If it's not, NewParserWithStorage should be used instead.
func NewParser(scanner *Scanner, ob ...Observer) (*Parser, error) {
return NewParserWithStorage(scanner, nil, ob...)
}
// NewParserWithStorage creates a new Parser. The scanner source must either
// be seekable or a storage must be provided.
func NewParserWithStorage(
scanner *Scanner,
storage storer.EncodedObjectStorer,
ob ...Observer,
) (*Parser, error) {
if !scanner.IsSeekable && storage == nil {
return nil, ErrNotSeekableSource
}
var deltas map[int64][]byte
if !scanner.IsSeekable {
deltas = make(map[int64][]byte)
}
return &Parser{
storage: storage,
scanner: scanner,
ob: ob,
count: 0,
cache: cache.NewBufferLRUDefault(),
deltas: deltas,
}, nil
}
func (p *Parser) forEachObserver(f func(o Observer) error) error {
for _, o := range p.ob {
if err := f(o); err != nil {
return err
}
}
return nil
}
func (p *Parser) onHeader(count uint32) error {
return p.forEachObserver(func(o Observer) error {
return o.OnHeader(count)
})
}
func (p *Parser) onInflatedObjectHeader(
t plumbing.ObjectType,
objSize int64,
pos int64,
) error {
return p.forEachObserver(func(o Observer) error {
return o.OnInflatedObjectHeader(t, objSize, pos)
})
}
func (p *Parser) onInflatedObjectContent(
h plumbing.Hash,
pos int64,
crc uint32,
content []byte,
) error {
return p.forEachObserver(func(o Observer) error {
return o.OnInflatedObjectContent(h, pos, crc, content)
})
}
func (p *Parser) onFooter(h plumbing.Hash) error {
return p.forEachObserver(func(o Observer) error {
return o.OnFooter(h)
})
}
// Parse start decoding phase of the packfile.
func (p *Parser) Parse() (plumbing.Hash, error) {
if err := p.init(); err != nil {
return plumbing.ZeroHash, err
}
if err := p.indexObjects(); err != nil {
return plumbing.ZeroHash, err
}
var err error
p.checksum, err = p.scanner.Checksum()
if err != nil && err != io.EOF {
return plumbing.ZeroHash, err
}
if err := p.resolveDeltas(); err != nil {
return plumbing.ZeroHash, err
}
if err := p.onFooter(p.checksum); err != nil {
return plumbing.ZeroHash, err
}
return p.checksum, nil
}
func (p *Parser) init() error {
_, c, err := p.scanner.Header()
if err != nil {
return err
}
if err := p.onHeader(c); err != nil {
return err
}
p.count = c
p.oiByHash = make(map[plumbing.Hash]*objectInfo, p.count)
p.oiByOffset = make(map[int64]*objectInfo, p.count)
p.oi = make([]*objectInfo, p.count)
return nil
}
func (p *Parser) indexObjects() error {
buf := new(bytes.Buffer)
for i := uint32(0); i < p.count; i++ {
buf.Reset()
oh, err := p.scanner.NextObjectHeader()
if err != nil {
return err
}
delta := false
var ota *objectInfo
switch t := oh.Type; t {
case plumbing.OFSDeltaObject:
delta = true
parent, ok := p.oiByOffset[oh.OffsetReference]
if !ok {
return plumbing.ErrObjectNotFound
}
ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
parent.Children = append(parent.Children, ota)
case plumbing.REFDeltaObject:
delta = true
parent, ok := p.oiByHash[oh.Reference]
if !ok {
// can't find referenced object in this pack file
// this must be a "thin" pack.
parent = &objectInfo{ //Placeholder parent
SHA1: oh.Reference,
ExternalRef: true, // mark as an external reference that must be resolved
Type: plumbing.AnyObject,
DiskType: plumbing.AnyObject,
}
p.oiByHash[oh.Reference] = parent
}
ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
parent.Children = append(parent.Children, ota)
default:
ota = newBaseObject(oh.Offset, oh.Length, t)
}
_, crc, err := p.scanner.NextObject(buf)
if err != nil {
return err
}
ota.Crc32 = crc
ota.Length = oh.Length
data := buf.Bytes()
if !delta {
sha1, err := getSHA1(ota.Type, data)
if err != nil {
return err
}
ota.SHA1 = sha1
p.oiByHash[ota.SHA1] = ota
}
if p.storage != nil && !delta {
obj := new(plumbing.MemoryObject)
obj.SetSize(oh.Length)
obj.SetType(oh.Type)
if _, err := obj.Write(data); err != nil {
return err
}
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return err
}
}
if delta && !p.scanner.IsSeekable {
p.deltas[oh.Offset] = make([]byte, len(data))
copy(p.deltas[oh.Offset], data)
}
p.oiByOffset[oh.Offset] = ota
p.oi[i] = ota
}
return nil
}
func (p *Parser) resolveDeltas() error {
for _, obj := range p.oi {
content, err := p.get(obj)
if err != nil {
return err
}
if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
return err
}
if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil {
return err
}
if !obj.IsDelta() && len(obj.Children) > 0 {
for _, child := range obj.Children {
if _, err := p.resolveObject(child, content); err != nil {
return err
}
}
// Remove the delta from the cache.
if obj.DiskType.IsDelta() && !p.scanner.IsSeekable {
delete(p.deltas, obj.Offset)
}
}
}
return nil
}
func (p *Parser) get(o *objectInfo) (b []byte, err error) {
var ok bool
if !o.ExternalRef { // skip cache check for placeholder parents
b, ok = p.cache.Get(o.Offset)
}
// If it's not on the cache and is not a delta we can try to find it in the
// storage, if there's one. External refs must enter here.
if !ok && p.storage != nil && !o.Type.IsDelta() {
e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1)
if err != nil {
return nil, err
}
o.Type = e.Type()
r, err := e.Reader()
if err != nil {
return nil, err
}
b = make([]byte, e.Size())
if _, err = r.Read(b); err != nil {
return nil, err
}
}
if b != nil {
return b, nil
}
if o.ExternalRef {
// we were not able to resolve a ref in a thin pack
return nil, ErrReferenceDeltaNotFound
}
var data []byte
if o.DiskType.IsDelta() {
base, err := p.get(o.Parent)
if err != nil {
return nil, err
}
data, err = p.resolveObject(o, base)
if err != nil {
return nil, err
}
} else {
data, err = p.readData(o)
if err != nil {
return nil, err
}
}
if len(o.Children) > 0 {
p.cache.Put(o.Offset, data)
}
return data, nil
}
func (p *Parser) resolveObject(
o *objectInfo,
base []byte,
) ([]byte, error) {
if !o.DiskType.IsDelta() {
return nil, nil
}
data, err := p.readData(o)
if err != nil {
return nil, err
}
data, err = applyPatchBase(o, data, base)
if err != nil {
return nil, err
}
if p.storage != nil {
obj := new(plumbing.MemoryObject)
obj.SetSize(o.Size())
obj.SetType(o.Type)
if _, err := obj.Write(data); err != nil {
return nil, err
}
if _, err := p.storage.SetEncodedObject(obj); err != nil {
return nil, err
}
}
return data, nil
}
func (p *Parser) readData(o *objectInfo) ([]byte, error) {
if !p.scanner.IsSeekable && o.DiskType.IsDelta() {
data, ok := p.deltas[o.Offset]
if !ok {
return nil, ErrDeltaNotCached
}
return data, nil
}
if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil {
return nil, err
}
buf := new(bytes.Buffer)
if _, _, err := p.scanner.NextObject(buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
patched, err := PatchDelta(base, data)
if err != nil {
return nil, err
}
if ota.SHA1 == plumbing.ZeroHash {
ota.Type = ota.Parent.Type
sha1, err := getSHA1(ota.Type, patched)
if err != nil {
return nil, err
}
ota.SHA1 = sha1
ota.Length = int64(len(patched))
}
return patched, nil
}
func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) {
hasher := plumbing.NewHasher(t, int64(len(data)))
if _, err := hasher.Write(data); err != nil {
return plumbing.ZeroHash, err
}
return hasher.Sum(), nil
}
type objectInfo struct {
Offset int64
Length int64
Type plumbing.ObjectType
DiskType plumbing.ObjectType
ExternalRef bool // indicates this is an external reference in a thin pack file
Crc32 uint32
Parent *objectInfo
Children []*objectInfo
SHA1 plumbing.Hash
}
func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo {
return newDeltaObject(offset, length, t, nil)
}
func newDeltaObject(
offset, length int64,
t plumbing.ObjectType,
parent *objectInfo,
) *objectInfo {
obj := &objectInfo{
Offset: offset,
Length: length,
Type: t,
DiskType: t,
Crc32: 0,
Parent: parent,
}
return obj
}
func (o *objectInfo) IsDelta() bool {
return o.Type.IsDelta()
}
func (o *objectInfo) Size() int64 {
return o.Length
}

View File

@ -0,0 +1,229 @@
package packfile
import (
"errors"
"io/ioutil"
"gopkg.in/src-d/go-git.v4/plumbing"
)
// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c,
// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
// for details about the delta format.
const deltaSizeMin = 4
// ApplyDelta writes to target the result of applying the modification deltas in delta to base.
func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) error {
r, err := base.Reader()
if err != nil {
return err
}
w, err := target.Writer()
if err != nil {
return err
}
src, err := ioutil.ReadAll(r)
if err != nil {
return err
}
dst, err := PatchDelta(src, delta)
if err != nil {
return err
}
target.SetSize(int64(len(dst)))
_, err = w.Write(dst)
return err
}
var (
ErrInvalidDelta = errors.New("invalid delta")
ErrDeltaCmd = errors.New("wrong delta command")
)
// PatchDelta returns the result of applying the modification deltas in delta to src.
// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command
// is not copy from source or copy from delta (ErrDeltaCmd).
func PatchDelta(src, delta []byte) ([]byte, error) {
if len(delta) < deltaSizeMin {
return nil, ErrInvalidDelta
}
srcSz, delta := decodeLEB128(delta)
if srcSz != uint(len(src)) {
return nil, ErrInvalidDelta
}
targetSz, delta := decodeLEB128(delta)
remainingTargetSz := targetSz
var cmd byte
dest := make([]byte, 0, targetSz)
for {
if len(delta) == 0 {
return nil, ErrInvalidDelta
}
cmd = delta[0]
delta = delta[1:]
if isCopyFromSrc(cmd) {
var offset, sz uint
var err error
offset, delta, err = decodeOffset(cmd, delta)
if err != nil {
return nil, err
}
sz, delta, err = decodeSize(cmd, delta)
if err != nil {
return nil, err
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
break
}
dest = append(dest, src[offset:offset+sz]...)
remainingTargetSz -= sz
} else if isCopyFromDelta(cmd) {
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
return nil, ErrInvalidDelta
}
if uint(len(delta)) < sz {
return nil, ErrInvalidDelta
}
dest = append(dest, delta[0:sz]...)
remainingTargetSz -= sz
delta = delta[sz:]
} else {
return nil, ErrDeltaCmd
}
if remainingTargetSz <= 0 {
break
}
}
return dest, nil
}
// Decodes a number encoded as an unsigned LEB128 at the start of some
// binary data and returns the decoded number and the rest of the
// stream.
//
// This must be called twice on the delta data buffer, first to get the
// expected source buffer size, and again to get the target buffer size.
func decodeLEB128(input []byte) (uint, []byte) {
var num, sz uint
var b byte
for {
b = input[sz]
num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
sz++
if uint(b)&continuation == 0 || sz == uint(len(input)) {
break
}
}
return num, input[sz:]
}
const (
payload = 0x7f // 0111 1111
continuation = 0x80 // 1000 0000
)
func isCopyFromSrc(cmd byte) bool {
return (cmd & 0x80) != 0
}
func isCopyFromDelta(cmd byte) bool {
return (cmd&0x80) == 0 && cmd != 0
}
func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
var offset uint
if (cmd & 0x01) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset = uint(delta[0])
delta = delta[1:]
}
if (cmd & 0x02) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 8
delta = delta[1:]
}
if (cmd & 0x04) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 16
delta = delta[1:]
}
if (cmd & 0x08) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
offset |= uint(delta[0]) << 24
delta = delta[1:]
}
return offset, delta, nil
}
func decodeSize(cmd byte, delta []byte) (uint, []byte, error) {
var sz uint
if (cmd & 0x10) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz = uint(delta[0])
delta = delta[1:]
}
if (cmd & 0x20) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << 8
delta = delta[1:]
}
if (cmd & 0x40) != 0 {
if len(delta) == 0 {
return 0, nil, ErrInvalidDelta
}
sz |= uint(delta[0]) << 16
delta = delta[1:]
}
if sz == 0 {
sz = 0x10000
}
return sz, delta, nil
}
func invalidSize(sz, targetSz uint) bool {
return sz > targetSz
}
func invalidOffsetSize(offset, sz, srcSz uint) bool {
return sumOverflows(offset, sz) ||
offset+sz > srcSz
}
func sumOverflows(a, b uint) bool {
return a+b < a
}

View File

@ -0,0 +1,487 @@
package packfile
import (
"bufio"
"bytes"
"compress/zlib"
"fmt"
"hash"
"hash/crc32"
"io"
stdioutil "io/ioutil"
"sync"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/utils/binary"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
var (
// ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile
ErrEmptyPackfile = NewError("empty packfile")
// ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect.
ErrBadSignature = NewError("malformed pack file signature")
// ErrUnsupportedVersion is returned by ReadHeader when the packfile version is
// different than VersionSupported.
ErrUnsupportedVersion = NewError("unsupported packfile version")
// ErrSeekNotSupported returned if seek is not support
ErrSeekNotSupported = NewError("not seek support")
)
// ObjectHeader contains the information related to the object, this information
// is collected from the previous bytes to the content of the object.
type ObjectHeader struct {
Type plumbing.ObjectType
Offset int64
Length int64
Reference plumbing.Hash
OffsetReference int64
}
type Scanner struct {
r reader
zr readerResetter
crc hash.Hash32
// pendingObject is used to detect if an object has been read, or still
// is waiting to be read
pendingObject *ObjectHeader
version, objects uint32
// lsSeekable says if this scanner can do Seek or not, to have a Scanner
// seekable a r implementing io.Seeker is required
IsSeekable bool
}
// NewScanner returns a new Scanner based on a reader, if the given reader
// implements io.ReadSeeker the Scanner will be also Seekable
func NewScanner(r io.Reader) *Scanner {
seeker, ok := r.(io.ReadSeeker)
if !ok {
seeker = &trackableReader{Reader: r}
}
crc := crc32.NewIEEE()
return &Scanner{
r: newTeeReader(newByteReadSeeker(seeker), crc),
crc: crc,
IsSeekable: ok,
}
}
// Header reads the whole packfile header (signature, version and object count).
// It returns the version and the object count and performs checks on the
// validity of the signature and the version fields.
func (s *Scanner) Header() (version, objects uint32, err error) {
if s.version != 0 {
return s.version, s.objects, nil
}
sig, err := s.readSignature()
if err != nil {
if err == io.EOF {
err = ErrEmptyPackfile
}
return
}
if !s.isValidSignature(sig) {
err = ErrBadSignature
return
}
version, err = s.readVersion()
s.version = version
if err != nil {
return
}
if !s.isSupportedVersion(version) {
err = ErrUnsupportedVersion.AddDetails("%d", version)
return
}
objects, err = s.readCount()
s.objects = objects
return
}
// readSignature reads an returns the signature field in the packfile.
func (s *Scanner) readSignature() ([]byte, error) {
var sig = make([]byte, 4)
if _, err := io.ReadFull(s.r, sig); err != nil {
return []byte{}, err
}
return sig, nil
}
// isValidSignature returns if sig is a valid packfile signature.
func (s *Scanner) isValidSignature(sig []byte) bool {
return bytes.Equal(sig, signature)
}
// readVersion reads and returns the version field of a packfile.
func (s *Scanner) readVersion() (uint32, error) {
return binary.ReadUint32(s.r)
}
// isSupportedVersion returns whether version v is supported by the parser.
// The current supported version is VersionSupported, defined above.
func (s *Scanner) isSupportedVersion(v uint32) bool {
return v == VersionSupported
}
// readCount reads and returns the count of objects field of a packfile.
func (s *Scanner) readCount() (uint32, error) {
return binary.ReadUint32(s.r)
}
// SeekObjectHeader seeks to specified offset and returns the ObjectHeader
// for the next object in the reader
func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) {
// if seeking we assume that you are not interested in the header
if s.version == 0 {
s.version = VersionSupported
}
if _, err := s.r.Seek(offset, io.SeekStart); err != nil {
return nil, err
}
h, err := s.nextObjectHeader()
if err != nil {
return nil, err
}
h.Offset = offset
return h, nil
}
// NextObjectHeader returns the ObjectHeader for the next object in the reader
func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
if err := s.doPending(); err != nil {
return nil, err
}
offset, err := s.r.Seek(0, io.SeekCurrent)
if err != nil {
return nil, err
}
h, err := s.nextObjectHeader()
if err != nil {
return nil, err
}
h.Offset = offset
return h, nil
}
// nextObjectHeader returns the ObjectHeader for the next object in the reader
// without the Offset field
func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
defer s.Flush()
s.crc.Reset()
h := &ObjectHeader{}
s.pendingObject = h
var err error
h.Offset, err = s.r.Seek(0, io.SeekCurrent)
if err != nil {
return nil, err
}
h.Type, h.Length, err = s.readObjectTypeAndLength()
if err != nil {
return nil, err
}
switch h.Type {
case plumbing.OFSDeltaObject:
no, err := binary.ReadVariableWidthInt(s.r)
if err != nil {
return nil, err
}
h.OffsetReference = h.Offset - no
case plumbing.REFDeltaObject:
var err error
h.Reference, err = binary.ReadHash(s.r)
if err != nil {
return nil, err
}
}
return h, nil
}
func (s *Scanner) doPending() error {
if s.version == 0 {
var err error
s.version, s.objects, err = s.Header()
if err != nil {
return err
}
}
return s.discardObjectIfNeeded()
}
func (s *Scanner) discardObjectIfNeeded() error {
if s.pendingObject == nil {
return nil
}
h := s.pendingObject
n, _, err := s.NextObject(stdioutil.Discard)
if err != nil {
return err
}
if n != h.Length {
return fmt.Errorf(
"error discarding object, discarded %d, expected %d",
n, h.Length,
)
}
return nil
}
// ReadObjectTypeAndLength reads and returns the object type and the
// length field from an object entry in a packfile.
func (s *Scanner) readObjectTypeAndLength() (plumbing.ObjectType, int64, error) {
t, c, err := s.readType()
if err != nil {
return t, 0, err
}
l, err := s.readLength(c)
return t, l, err
}
func (s *Scanner) readType() (plumbing.ObjectType, byte, error) {
var c byte
var err error
if c, err = s.r.ReadByte(); err != nil {
return plumbing.ObjectType(0), 0, err
}
typ := parseType(c)
return typ, c, nil
}
func parseType(b byte) plumbing.ObjectType {
return plumbing.ObjectType((b & maskType) >> firstLengthBits)
}
// the length is codified in the last 4 bits of the first byte and in
// the last 7 bits of subsequent bytes. Last byte has a 0 MSB.
func (s *Scanner) readLength(first byte) (int64, error) {
length := int64(first & maskFirstLength)
c := first
shift := firstLengthBits
var err error
for c&maskContinue > 0 {
if c, err = s.r.ReadByte(); err != nil {
return 0, err
}
length += int64(c&maskLength) << shift
shift += lengthBits
}
return length, nil
}
// NextObject writes the content of the next object into the reader, returns
// the number of bytes written, the CRC32 of the content and an error, if any
func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) {
defer s.crc.Reset()
s.pendingObject = nil
written, err = s.copyObject(w)
s.Flush()
crc32 = s.crc.Sum32()
return
}
// ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile.
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
if s.zr == nil {
var zr io.ReadCloser
zr, err = zlib.NewReader(s.r)
if err != nil {
return 0, fmt.Errorf("zlib initialization error: %s", err)
}
s.zr = zr.(readerResetter)
} else {
if err = s.zr.Reset(s.r, nil); err != nil {
return 0, fmt.Errorf("zlib reset error: %s", err)
}
}
defer ioutil.CheckClose(s.zr, &err)
buf := byteSlicePool.Get().([]byte)
n, err = io.CopyBuffer(w, s.zr, buf)
byteSlicePool.Put(buf)
return
}
var byteSlicePool = sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
},
}
// SeekFromStart sets a new offset from start, returns the old position before
// the change.
func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
// if seeking we assume that you are not interested in the header
if s.version == 0 {
s.version = VersionSupported
}
previous, err = s.r.Seek(0, io.SeekCurrent)
if err != nil {
return -1, err
}
_, err = s.r.Seek(offset, io.SeekStart)
return previous, err
}
// Checksum returns the checksum of the packfile
func (s *Scanner) Checksum() (plumbing.Hash, error) {
err := s.discardObjectIfNeeded()
if err != nil {
return plumbing.ZeroHash, err
}
return binary.ReadHash(s.r)
}
// Close reads the reader until io.EOF
func (s *Scanner) Close() error {
buf := byteSlicePool.Get().([]byte)
_, err := io.CopyBuffer(stdioutil.Discard, s.r, buf)
byteSlicePool.Put(buf)
return err
}
// Flush finishes writing the buffer to crc hasher in case we are using
// a teeReader. Otherwise it is a no-op.
func (s *Scanner) Flush() error {
tee, ok := s.r.(*teeReader)
if ok {
return tee.Flush()
}
return nil
}
type trackableReader struct {
count int64
io.Reader
}
// Read reads up to len(p) bytes into p.
func (r *trackableReader) Read(p []byte) (n int, err error) {
n, err = r.Reader.Read(p)
r.count += int64(n)
return
}
// Seek only supports io.SeekCurrent, any other operation fails
func (r *trackableReader) Seek(offset int64, whence int) (int64, error) {
if whence != io.SeekCurrent {
return -1, ErrSeekNotSupported
}
return r.count, nil
}
func newByteReadSeeker(r io.ReadSeeker) *bufferedSeeker {
return &bufferedSeeker{
r: r,
Reader: *bufio.NewReader(r),
}
}
type bufferedSeeker struct {
r io.ReadSeeker
bufio.Reader
}
func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
if whence == io.SeekCurrent && offset == 0 {
current, err := r.r.Seek(offset, whence)
if err != nil {
return current, err
}
return current - int64(r.Buffered()), nil
}
defer r.Reader.Reset(r.r)
return r.r.Seek(offset, whence)
}
type readerResetter interface {
io.ReadCloser
zlib.Resetter
}
type reader interface {
io.Reader
io.ByteReader
io.Seeker
}
type teeReader struct {
reader
w hash.Hash32
bufWriter *bufio.Writer
}
func newTeeReader(r reader, h hash.Hash32) *teeReader {
return &teeReader{
reader: r,
w: h,
bufWriter: bufio.NewWriter(h),
}
}
func (r *teeReader) Read(p []byte) (n int, err error) {
r.Flush()
n, err = r.reader.Read(p)
if n > 0 {
if n, err := r.w.Write(p[:n]); err != nil {
return n, err
}
}
return
}
func (r *teeReader) ReadByte() (b byte, err error) {
b, err = r.reader.ReadByte()
if err == nil {
return b, r.bufWriter.WriteByte(b)
}
return
}
func (r *teeReader) Flush() (err error) {
return r.bufWriter.Flush()
}

View File

@ -0,0 +1,122 @@
// Package pktline implements reading payloads form pkt-lines and encoding
// pkt-lines from payloads.
package pktline
import (
"bytes"
"errors"
"fmt"
"io"
)
// An Encoder writes pkt-lines to an output stream.
type Encoder struct {
w io.Writer
}
const (
// MaxPayloadSize is the maximum payload size of a pkt-line in bytes.
MaxPayloadSize = 65516
// For compatibility with canonical Git implementation, accept longer pkt-lines
OversizePayloadMax = 65520
)
var (
// FlushPkt are the contents of a flush-pkt pkt-line.
FlushPkt = []byte{'0', '0', '0', '0'}
// Flush is the payload to use with the Encode method to encode a flush-pkt.
Flush = []byte{}
// FlushString is the payload to use with the EncodeString method to encode a flush-pkt.
FlushString = ""
// ErrPayloadTooLong is returned by the Encode methods when any of the
// provided payloads is bigger than MaxPayloadSize.
ErrPayloadTooLong = errors.New("payload is too long")
)
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: w,
}
}
// Flush encodes a flush-pkt to the output stream.
func (e *Encoder) Flush() error {
_, err := e.w.Write(FlushPkt)
return err
}
// Encode encodes a pkt-line with the payload specified and write it to
// the output stream. If several payloads are specified, each of them
// will get streamed in their own pkt-lines.
func (e *Encoder) Encode(payloads ...[]byte) error {
for _, p := range payloads {
if err := e.encodeLine(p); err != nil {
return err
}
}
return nil
}
func (e *Encoder) encodeLine(p []byte) error {
if len(p) > MaxPayloadSize {
return ErrPayloadTooLong
}
if bytes.Equal(p, Flush) {
return e.Flush()
}
n := len(p) + 4
if _, err := e.w.Write(asciiHex16(n)); err != nil {
return err
}
_, err := e.w.Write(p)
return err
}
// Returns the hexadecimal ascii representation of the 16 less
// significant bits of n. The length of the returned slice will always
// be 4. Example: if n is 1234 (0x4d2), the return value will be
// []byte{'0', '4', 'd', '2'}.
func asciiHex16(n int) []byte {
var ret [4]byte
ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12))
ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8))
ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4))
ret[3] = byteToASCIIHex(byte(n & 0x000f))
return ret[:]
}
// turns a byte into its hexadecimal ascii representation. Example:
// from 11 (0xb) to 'b'.
func byteToASCIIHex(n byte) byte {
if n < 10 {
return '0' + n
}
return 'a' - 10 + n
}
// EncodeString works similarly as Encode but payloads are specified as strings.
func (e *Encoder) EncodeString(payloads ...string) error {
for _, p := range payloads {
if err := e.Encode([]byte(p)); err != nil {
return err
}
}
return nil
}
// Encodef encodes a single pkt-line with the payload formatted as
// the format specifier. The rest of the arguments will be used in
// the format string.
func (e *Encoder) Encodef(format string, a ...interface{}) error {
return e.EncodeString(
fmt.Sprintf(format, a...),
)
}

View File

@ -0,0 +1,134 @@
package pktline
import (
"errors"
"io"
)
const (
lenSize = 4
)
// ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found.
var ErrInvalidPktLen = errors.New("invalid pkt-len found")
// Scanner provides a convenient interface for reading the payloads of a
// series of pkt-lines. It takes an io.Reader providing the source,
// which then can be tokenized through repeated calls to the Scan
// method.
//
// After each Scan call, the Bytes method will return the payload of the
// corresponding pkt-line on a shared buffer, which will be 65516 bytes
// or smaller. Flush pkt-lines are represented by empty byte slices.
//
// Scanning stops at EOF or the first I/O error.
type Scanner struct {
r io.Reader // The reader provided by the client
err error // Sticky error
payload []byte // Last pkt-payload
len [lenSize]byte // Last pkt-len
}
// NewScanner returns a new Scanner to read from r.
func NewScanner(r io.Reader) *Scanner {
return &Scanner{
r: r,
}
}
// Err returns the first error encountered by the Scanner.
func (s *Scanner) Err() error {
return s.err
}
// Scan advances the Scanner to the next pkt-line, whose payload will
// then be available through the Bytes method. Scanning stops at EOF
// or the first I/O error. After Scan returns false, the Err method
// will return any error that occurred during scanning, except that if
// it was io.EOF, Err will return nil.
func (s *Scanner) Scan() bool {
var l int
l, s.err = s.readPayloadLen()
if s.err == io.EOF {
s.err = nil
return false
}
if s.err != nil {
return false
}
if cap(s.payload) < l {
s.payload = make([]byte, 0, l)
}
if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil {
return false
}
s.payload = s.payload[:l]
return true
}
// Bytes returns the most recent payload generated by a call to Scan.
// The underlying array may point to data that will be overwritten by a
// subsequent call to Scan. It does no allocation.
func (s *Scanner) Bytes() []byte {
return s.payload
}
// Method readPayloadLen returns the payload length by reading the
// pkt-len and subtracting the pkt-len size.
func (s *Scanner) readPayloadLen() (int, error) {
if _, err := io.ReadFull(s.r, s.len[:]); err != nil {
if err == io.ErrUnexpectedEOF {
return 0, ErrInvalidPktLen
}
return 0, err
}
n, err := hexDecode(s.len)
if err != nil {
return 0, err
}
switch {
case n == 0:
return 0, nil
case n <= lenSize:
return 0, ErrInvalidPktLen
case n > OversizePayloadMax+lenSize:
return 0, ErrInvalidPktLen
default:
return n - lenSize, nil
}
}
// Turns the hexadecimal representation of a number in a byte slice into
// a number. This function substitute strconv.ParseUint(string(buf), 16,
// 16) and/or hex.Decode, to avoid generating new strings, thus helping the
// GC.
func hexDecode(buf [lenSize]byte) (int, error) {
var ret int
for i := 0; i < lenSize; i++ {
n, err := asciiHexToByte(buf[i])
if err != nil {
return 0, ErrInvalidPktLen
}
ret = 16*ret + int(n)
}
return ret, nil
}
// turns the hexadecimal ascii representation of a byte into its
// numerical value. Example: from 'b' to 11 (0xb).
func asciiHexToByte(b byte) (byte, error) {
switch {
case b >= '0' && b <= '9':
return b - '0', nil
case b >= 'a' && b <= 'f':
return b - 'a' + 10, nil
default:
return 0, ErrInvalidPktLen
}
}

73
vendor/gopkg.in/src-d/go-git.v4/plumbing/hash.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
package plumbing
import (
"bytes"
"crypto/sha1"
"encoding/hex"
"hash"
"sort"
"strconv"
)
// Hash SHA1 hased content
type Hash [20]byte
// ZeroHash is Hash with value zero
var ZeroHash Hash
// ComputeHash compute the hash for a given ObjectType and content
func ComputeHash(t ObjectType, content []byte) Hash {
h := NewHasher(t, int64(len(content)))
h.Write(content)
return h.Sum()
}
// NewHash return a new Hash from a hexadecimal hash representation
func NewHash(s string) Hash {
b, _ := hex.DecodeString(s)
var h Hash
copy(h[:], b)
return h
}
func (h Hash) IsZero() bool {
var empty Hash
return h == empty
}
func (h Hash) String() string {
return hex.EncodeToString(h[:])
}
type Hasher struct {
hash.Hash
}
func NewHasher(t ObjectType, size int64) Hasher {
h := Hasher{sha1.New()}
h.Write(t.Bytes())
h.Write([]byte(" "))
h.Write([]byte(strconv.FormatInt(size, 10)))
h.Write([]byte{0})
return h
}
func (h Hasher) Sum() (hash Hash) {
copy(hash[:], h.Hash.Sum(nil))
return
}
// HashesSort sorts a slice of Hashes in increasing order.
func HashesSort(a []Hash) {
sort.Sort(HashSlice(a))
}
// HashSlice attaches the methods of sort.Interface to []Hash, sorting in
// increasing order.
type HashSlice []Hash
func (p HashSlice) Len() int { return len(p) }
func (p HashSlice) Less(i, j int) bool { return bytes.Compare(p[i][:], p[j][:]) < 0 }
func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

61
vendor/gopkg.in/src-d/go-git.v4/plumbing/memory.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
package plumbing
import (
"bytes"
"io"
"io/ioutil"
)
// MemoryObject on memory Object implementation
type MemoryObject struct {
t ObjectType
h Hash
cont []byte
sz int64
}
// Hash returns the object Hash, the hash is calculated on-the-fly the first
// time it's called, in all subsequent calls the same Hash is returned even
// if the type or the content have changed. The Hash is only generated if the
// size of the content is exactly the object size.
func (o *MemoryObject) Hash() Hash {
if o.h == ZeroHash && int64(len(o.cont)) == o.sz {
o.h = ComputeHash(o.t, o.cont)
}
return o.h
}
// Type return the ObjectType
func (o *MemoryObject) Type() ObjectType { return o.t }
// SetType sets the ObjectType
func (o *MemoryObject) SetType(t ObjectType) { o.t = t }
// Size return the size of the object
func (o *MemoryObject) Size() int64 { return o.sz }
// SetSize set the object size, a content of the given size should be written
// afterwards
func (o *MemoryObject) SetSize(s int64) { o.sz = s }
// Reader returns a ObjectReader used to read the object's content.
func (o *MemoryObject) Reader() (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewBuffer(o.cont)), nil
}
// Writer returns a ObjectWriter used to write the object's content.
func (o *MemoryObject) Writer() (io.WriteCloser, error) {
return o, nil
}
func (o *MemoryObject) Write(p []byte) (n int, err error) {
o.cont = append(o.cont, p...)
o.sz = int64(len(o.cont))
return len(p), nil
}
// Close releases any resources consumed by the object when it is acting as a
// ObjectWriter.
func (o *MemoryObject) Close() error { return nil }

111
vendor/gopkg.in/src-d/go-git.v4/plumbing/object.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
// package plumbing implement the core interfaces and structs used by go-git
package plumbing
import (
"errors"
"io"
)
var (
ErrObjectNotFound = errors.New("object not found")
// ErrInvalidType is returned when an invalid object type is provided.
ErrInvalidType = errors.New("invalid object type")
)
// Object is a generic representation of any git object
type EncodedObject interface {
Hash() Hash
Type() ObjectType
SetType(ObjectType)
Size() int64
SetSize(int64)
Reader() (io.ReadCloser, error)
Writer() (io.WriteCloser, error)
}
// DeltaObject is an EncodedObject representing a delta.
type DeltaObject interface {
EncodedObject
// BaseHash returns the hash of the object used as base for this delta.
BaseHash() Hash
// ActualHash returns the hash of the object after applying the delta.
ActualHash() Hash
// Size returns the size of the object after applying the delta.
ActualSize() int64
}
// ObjectType internal object type
// Integer values from 0 to 7 map to those exposed by git.
// AnyObject is used to represent any from 0 to 7.
type ObjectType int8
const (
InvalidObject ObjectType = 0
CommitObject ObjectType = 1
TreeObject ObjectType = 2
BlobObject ObjectType = 3
TagObject ObjectType = 4
// 5 reserved for future expansion
OFSDeltaObject ObjectType = 6
REFDeltaObject ObjectType = 7
AnyObject ObjectType = -127
)
func (t ObjectType) String() string {
switch t {
case CommitObject:
return "commit"
case TreeObject:
return "tree"
case BlobObject:
return "blob"
case TagObject:
return "tag"
case OFSDeltaObject:
return "ofs-delta"
case REFDeltaObject:
return "ref-delta"
case AnyObject:
return "any"
default:
return "unknown"
}
}
func (t ObjectType) Bytes() []byte {
return []byte(t.String())
}
// Valid returns true if t is a valid ObjectType.
func (t ObjectType) Valid() bool {
return t >= CommitObject && t <= REFDeltaObject
}
// IsDelta returns true for any ObjectTyoe that represents a delta (i.e.
// REFDeltaObject or OFSDeltaObject).
func (t ObjectType) IsDelta() bool {
return t == REFDeltaObject || t == OFSDeltaObject
}
// ParseObjectType parses a string representation of ObjectType. It returns an
// error on parse failure.
func ParseObjectType(value string) (typ ObjectType, err error) {
switch value {
case "commit":
typ = CommitObject
case "tree":
typ = TreeObject
case "blob":
typ = BlobObject
case "tag":
typ = TagObject
case "ofs-delta":
typ = OFSDeltaObject
case "ref-delta":
typ = REFDeltaObject
default:
err = ErrInvalidType
}
return
}

144
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/blob.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
package object
import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
// Blob is used to store arbitrary data - it is generally a file.
type Blob struct {
// Hash of the blob.
Hash plumbing.Hash
// Size of the (uncompressed) blob.
Size int64
obj plumbing.EncodedObject
}
// GetBlob gets a blob from an object storer and decodes it.
func GetBlob(s storer.EncodedObjectStorer, h plumbing.Hash) (*Blob, error) {
o, err := s.EncodedObject(plumbing.BlobObject, h)
if err != nil {
return nil, err
}
return DecodeBlob(o)
}
// DecodeObject decodes an encoded object into a *Blob.
func DecodeBlob(o plumbing.EncodedObject) (*Blob, error) {
b := &Blob{}
if err := b.Decode(o); err != nil {
return nil, err
}
return b, nil
}
// ID returns the object ID of the blob. The returned value will always match
// the current value of Blob.Hash.
//
// ID is present to fulfill the Object interface.
func (b *Blob) ID() plumbing.Hash {
return b.Hash
}
// Type returns the type of object. It always returns plumbing.BlobObject.
//
// Type is present to fulfill the Object interface.
func (b *Blob) Type() plumbing.ObjectType {
return plumbing.BlobObject
}
// Decode transforms a plumbing.EncodedObject into a Blob struct.
func (b *Blob) Decode(o plumbing.EncodedObject) error {
if o.Type() != plumbing.BlobObject {
return ErrUnsupportedObject
}
b.Hash = o.Hash()
b.Size = o.Size()
b.obj = o
return nil
}
// Encode transforms a Blob into a plumbing.EncodedObject.
func (b *Blob) Encode(o plumbing.EncodedObject) (err error) {
o.SetType(plumbing.BlobObject)
w, err := o.Writer()
if err != nil {
return err
}
defer ioutil.CheckClose(w, &err)
r, err := b.Reader()
if err != nil {
return err
}
defer ioutil.CheckClose(r, &err)
_, err = io.Copy(w, r)
return err
}
// Reader returns a reader allow the access to the content of the blob
func (b *Blob) Reader() (io.ReadCloser, error) {
return b.obj.Reader()
}
// BlobIter provides an iterator for a set of blobs.
type BlobIter struct {
storer.EncodedObjectIter
s storer.EncodedObjectStorer
}
// NewBlobIter takes a storer.EncodedObjectStorer and a
// storer.EncodedObjectIter and returns a *BlobIter that iterates over all
// blobs contained in the storer.EncodedObjectIter.
//
// Any non-blob object returned by the storer.EncodedObjectIter is skipped.
func NewBlobIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *BlobIter {
return &BlobIter{iter, s}
}
// Next moves the iterator to the next blob and returns a pointer to it. If
// there are no more blobs, it returns io.EOF.
func (iter *BlobIter) Next() (*Blob, error) {
for {
obj, err := iter.EncodedObjectIter.Next()
if err != nil {
return nil, err
}
if obj.Type() != plumbing.BlobObject {
continue
}
return DecodeBlob(obj)
}
}
// ForEach call the cb function for each blob contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *BlobIter) ForEach(cb func(*Blob) error) error {
return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
if obj.Type() != plumbing.BlobObject {
return nil
}
b, err := DecodeBlob(obj)
if err != nil {
return err
}
return cb(b)
})
}

View File

@ -0,0 +1,157 @@
package object
import (
"bytes"
"context"
"fmt"
"strings"
"gopkg.in/src-d/go-git.v4/utils/merkletrie"
)
// Change values represent a detected change between two git trees. For
// modifications, From is the original status of the node and To is its
// final status. For insertions, From is the zero value and for
// deletions To is the zero value.
type Change struct {
From ChangeEntry
To ChangeEntry
}
var empty = ChangeEntry{}
// Action returns the kind of action represented by the change, an
// insertion, a deletion or a modification.
func (c *Change) Action() (merkletrie.Action, error) {
if c.From == empty && c.To == empty {
return merkletrie.Action(0),
fmt.Errorf("malformed change: empty from and to")
}
if c.From == empty {
return merkletrie.Insert, nil
}
if c.To == empty {
return merkletrie.Delete, nil
}
return merkletrie.Modify, nil
}
// Files return the files before and after a change.
// For insertions from will be nil. For deletions to will be nil.
func (c *Change) Files() (from, to *File, err error) {
action, err := c.Action()
if err != nil {
return
}
if action == merkletrie.Insert || action == merkletrie.Modify {
to, err = c.To.Tree.TreeEntryFile(&c.To.TreeEntry)
if !c.To.TreeEntry.Mode.IsFile() {
return nil, nil, nil
}
if err != nil {
return
}
}
if action == merkletrie.Delete || action == merkletrie.Modify {
from, err = c.From.Tree.TreeEntryFile(&c.From.TreeEntry)
if !c.From.TreeEntry.Mode.IsFile() {
return nil, nil, nil
}
if err != nil {
return
}
}
return
}
func (c *Change) String() string {
action, err := c.Action()
if err != nil {
return fmt.Sprintf("malformed change")
}
return fmt.Sprintf("<Action: %s, Path: %s>", action, c.name())
}
// Patch returns a Patch with all the file changes in chunks. This
// representation can be used to create several diff outputs.
func (c *Change) Patch() (*Patch, error) {
return c.PatchContext(context.Background())
}
// Patch returns a Patch with all the file changes in chunks. This
// representation can be used to create several diff outputs.
// If context expires, an non-nil error will be returned
// Provided context must be non-nil
func (c *Change) PatchContext(ctx context.Context) (*Patch, error) {
return getPatchContext(ctx, "", c)
}
func (c *Change) name() string {
if c.From != empty {
return c.From.Name
}
return c.To.Name
}
// ChangeEntry values represent a node that has suffered a change.
type ChangeEntry struct {
// Full path of the node using "/" as separator.
Name string
// Parent tree of the node that has changed.
Tree *Tree
// The entry of the node.
TreeEntry TreeEntry
}
// Changes represents a collection of changes between two git trees.
// Implements sort.Interface lexicographically over the path of the
// changed files.
type Changes []*Change
func (c Changes) Len() int {
return len(c)
}
func (c Changes) Swap(i, j int) {
c[i], c[j] = c[j], c[i]
}
func (c Changes) Less(i, j int) bool {
return strings.Compare(c[i].name(), c[j].name()) < 0
}
func (c Changes) String() string {
var buffer bytes.Buffer
buffer.WriteString("[")
comma := ""
for _, v := range c {
buffer.WriteString(comma)
buffer.WriteString(v.String())
comma = ", "
}
buffer.WriteString("]")
return buffer.String()
}
// Patch returns a Patch with all the changes in chunks. This
// representation can be used to create several diff outputs.
func (c Changes) Patch() (*Patch, error) {
return c.PatchContext(context.Background())
}
// Patch returns a Patch with all the changes in chunks. This
// representation can be used to create several diff outputs.
// If context expires, an non-nil error will be returned
// Provided context must be non-nil
func (c Changes) PatchContext(ctx context.Context) (*Patch, error) {
return getPatchContext(ctx, "", c...)
}

View File

@ -0,0 +1,61 @@
package object
import (
"errors"
"fmt"
"gopkg.in/src-d/go-git.v4/utils/merkletrie"
"gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
)
// The following functions transform changes types form the merkletrie
// package to changes types from this package.
func newChange(c merkletrie.Change) (*Change, error) {
ret := &Change{}
var err error
if ret.From, err = newChangeEntry(c.From); err != nil {
return nil, fmt.Errorf("From field: %s", err)
}
if ret.To, err = newChangeEntry(c.To); err != nil {
return nil, fmt.Errorf("To field: %s", err)
}
return ret, nil
}
func newChangeEntry(p noder.Path) (ChangeEntry, error) {
if p == nil {
return empty, nil
}
asTreeNoder, ok := p.Last().(*treeNoder)
if !ok {
return ChangeEntry{}, errors.New("cannot transform non-TreeNoders")
}
return ChangeEntry{
Name: p.String(),
Tree: asTreeNoder.parent,
TreeEntry: TreeEntry{
Name: asTreeNoder.name,
Mode: asTreeNoder.mode,
Hash: asTreeNoder.hash,
},
}, nil
}
func newChanges(src merkletrie.Changes) (Changes, error) {
ret := make(Changes, len(src))
var err error
for i, e := range src {
ret[i], err = newChange(e)
if err != nil {
return nil, fmt.Errorf("change #%d: %s", i, err)
}
}
return ret, nil
}

View File

@ -0,0 +1,415 @@
package object
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"strings"
"golang.org/x/crypto/openpgp"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
const (
beginpgp string = "-----BEGIN PGP SIGNATURE-----"
endpgp string = "-----END PGP SIGNATURE-----"
headerpgp string = "gpgsig"
)
// Hash represents the hash of an object
type Hash plumbing.Hash
// Commit points to a single tree, marking it as what the project looked like
// at a certain point in time. It contains meta-information about that point
// in time, such as a timestamp, the author of the changes since the last
// commit, a pointer to the previous commit(s), etc.
// http://shafiulazam.com/gitbook/1_the_git_object_model.html
type Commit struct {
// Hash of the commit object.
Hash plumbing.Hash
// Author is the original author of the commit.
Author Signature
// Committer is the one performing the commit, might be different from
// Author.
Committer Signature
// PGPSignature is the PGP signature of the commit.
PGPSignature string
// Message is the commit message, contains arbitrary text.
Message string
// TreeHash is the hash of the root tree of the commit.
TreeHash plumbing.Hash
// ParentHashes are the hashes of the parent commits of the commit.
ParentHashes []plumbing.Hash
s storer.EncodedObjectStorer
}
// GetCommit gets a commit from an object storer and decodes it.
func GetCommit(s storer.EncodedObjectStorer, h plumbing.Hash) (*Commit, error) {
o, err := s.EncodedObject(plumbing.CommitObject, h)
if err != nil {
return nil, err
}
return DecodeCommit(s, o)
}
// DecodeCommit decodes an encoded object into a *Commit and associates it to
// the given object storer.
func DecodeCommit(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Commit, error) {
c := &Commit{s: s}
if err := c.Decode(o); err != nil {
return nil, err
}
return c, nil
}
// Tree returns the Tree from the commit.
func (c *Commit) Tree() (*Tree, error) {
return GetTree(c.s, c.TreeHash)
}
// Patch returns the Patch between the actual commit and the provided one.
// Error will be return if context expires. Provided context must be non-nil
func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) {
fromTree, err := c.Tree()
if err != nil {
return nil, err
}
toTree, err := to.Tree()
if err != nil {
return nil, err
}
return fromTree.PatchContext(ctx, toTree)
}
// Patch returns the Patch between the actual commit and the provided one.
func (c *Commit) Patch(to *Commit) (*Patch, error) {
return c.PatchContext(context.Background(), to)
}
// Parents return a CommitIter to the parent Commits.
func (c *Commit) Parents() CommitIter {
return NewCommitIter(c.s,
storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, c.ParentHashes),
)
}
// NumParents returns the number of parents in a commit.
func (c *Commit) NumParents() int {
return len(c.ParentHashes)
}
var ErrParentNotFound = errors.New("commit parent not found")
// Parent returns the ith parent of a commit.
func (c *Commit) Parent(i int) (*Commit, error) {
if len(c.ParentHashes) == 0 || i > len(c.ParentHashes)-1 {
return nil, ErrParentNotFound
}
return GetCommit(c.s, c.ParentHashes[i])
}
// File returns the file with the specified "path" in the commit and a
// nil error if the file exists. If the file does not exist, it returns
// a nil file and the ErrFileNotFound error.
func (c *Commit) File(path string) (*File, error) {
tree, err := c.Tree()
if err != nil {
return nil, err
}
return tree.File(path)
}
// Files returns a FileIter allowing to iterate over the Tree
func (c *Commit) Files() (*FileIter, error) {
tree, err := c.Tree()
if err != nil {
return nil, err
}
return tree.Files(), nil
}
// ID returns the object ID of the commit. The returned value will always match
// the current value of Commit.Hash.
//
// ID is present to fulfill the Object interface.
func (c *Commit) ID() plumbing.Hash {
return c.Hash
}
// Type returns the type of object. It always returns plumbing.CommitObject.
//
// Type is present to fulfill the Object interface.
func (c *Commit) Type() plumbing.ObjectType {
return plumbing.CommitObject
}
// Decode transforms a plumbing.EncodedObject into a Commit struct.
func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
if o.Type() != plumbing.CommitObject {
return ErrUnsupportedObject
}
c.Hash = o.Hash()
reader, err := o.Reader()
if err != nil {
return err
}
defer ioutil.CheckClose(reader, &err)
r := bufio.NewReader(reader)
var message bool
var pgpsig bool
for {
line, err := r.ReadBytes('\n')
if err != nil && err != io.EOF {
return err
}
if pgpsig {
if len(line) > 0 && line[0] == ' ' {
line = bytes.TrimLeft(line, " ")
c.PGPSignature += string(line)
continue
} else {
pgpsig = false
}
}
if !message {
line = bytes.TrimSpace(line)
if len(line) == 0 {
message = true
continue
}
split := bytes.SplitN(line, []byte{' '}, 2)
var data []byte
if len(split) == 2 {
data = split[1]
}
switch string(split[0]) {
case "tree":
c.TreeHash = plumbing.NewHash(string(data))
case "parent":
c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(data)))
case "author":
c.Author.Decode(data)
case "committer":
c.Committer.Decode(data)
case headerpgp:
c.PGPSignature += string(data) + "\n"
pgpsig = true
}
} else {
c.Message += string(line)
}
if err == io.EOF {
return nil
}
}
}
// Encode transforms a Commit into a plumbing.EncodedObject.
func (b *Commit) Encode(o plumbing.EncodedObject) error {
return b.encode(o, true)
}
func (b *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
o.SetType(plumbing.CommitObject)
w, err := o.Writer()
if err != nil {
return err
}
defer ioutil.CheckClose(w, &err)
if _, err = fmt.Fprintf(w, "tree %s\n", b.TreeHash.String()); err != nil {
return err
}
for _, parent := range b.ParentHashes {
if _, err = fmt.Fprintf(w, "parent %s\n", parent.String()); err != nil {
return err
}
}
if _, err = fmt.Fprint(w, "author "); err != nil {
return err
}
if err = b.Author.Encode(w); err != nil {
return err
}
if _, err = fmt.Fprint(w, "\ncommitter "); err != nil {
return err
}
if err = b.Committer.Encode(w); err != nil {
return err
}
if b.PGPSignature != "" && includeSig {
if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil {
return err
}
// Split all the signature lines and re-write with a left padding and
// newline. Use join for this so it's clear that a newline should not be
// added after this section, as it will be added when the message is
// printed.
signature := strings.TrimSuffix(b.PGPSignature, "\n")
lines := strings.Split(signature, "\n")
if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil {
return err
}
}
if _, err = fmt.Fprintf(w, "\n\n%s", b.Message); err != nil {
return err
}
return err
}
// Stats shows the status of commit.
func (c *Commit) Stats() (FileStats, error) {
// Get the previous commit.
ci := c.Parents()
parentCommit, err := ci.Next()
if err != nil {
if err == io.EOF {
emptyNoder := treeNoder{}
parentCommit = &Commit{
Hash: emptyNoder.hash,
// TreeHash: emptyNoder.parent.Hash,
s: c.s,
}
} else {
return nil, err
}
}
patch, err := parentCommit.Patch(c)
if err != nil {
return nil, err
}
return getFileStatsFromFilePatches(patch.FilePatches()), nil
}
func (c *Commit) String() string {
return fmt.Sprintf(
"%s %s\nAuthor: %s\nDate: %s\n\n%s\n",
plumbing.CommitObject, c.Hash, c.Author.String(),
c.Author.When.Format(DateFormat), indent(c.Message),
)
}
// Verify performs PGP verification of the commit with a provided armored
// keyring and returns openpgp.Entity associated with verifying key on success.
func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
keyRingReader := strings.NewReader(armoredKeyRing)
keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader)
if err != nil {
return nil, err
}
// Extract signature.
signature := strings.NewReader(c.PGPSignature)
encoded := &plumbing.MemoryObject{}
// Encode commit components, excluding signature and get a reader object.
if err := c.encode(encoded, false); err != nil {
return nil, err
}
er, err := encoded.Reader()
if err != nil {
return nil, err
}
return openpgp.CheckArmoredDetachedSignature(keyring, er, signature)
}
func indent(t string) string {
var output []string
for _, line := range strings.Split(t, "\n") {
if len(line) != 0 {
line = " " + line
}
output = append(output, line)
}
return strings.Join(output, "\n")
}
// CommitIter is a generic closable interface for iterating over commits.
type CommitIter interface {
Next() (*Commit, error)
ForEach(func(*Commit) error) error
Close()
}
// storerCommitIter provides an iterator from commits in an EncodedObjectStorer.
type storerCommitIter struct {
storer.EncodedObjectIter
s storer.EncodedObjectStorer
}
// NewCommitIter takes a storer.EncodedObjectStorer and a
// storer.EncodedObjectIter and returns a CommitIter that iterates over all
// commits contained in the storer.EncodedObjectIter.
//
// Any non-commit object returned by the storer.EncodedObjectIter is skipped.
func NewCommitIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) CommitIter {
return &storerCommitIter{iter, s}
}
// Next moves the iterator to the next commit and returns a pointer to it. If
// there are no more commits, it returns io.EOF.
func (iter *storerCommitIter) Next() (*Commit, error) {
obj, err := iter.EncodedObjectIter.Next()
if err != nil {
return nil, err
}
return DecodeCommit(iter.s, obj)
}
// ForEach call the cb function for each commit contained on this iter until
// an error appends or the end of the iter is reached. If ErrStop is sent
// the iteration is stopped but no error is returned. The iterator is closed.
func (iter *storerCommitIter) ForEach(cb func(*Commit) error) error {
return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
c, err := DecodeCommit(iter.s, obj)
if err != nil {
return err
}
return cb(c)
})
}
func (iter *storerCommitIter) Close() {
iter.EncodedObjectIter.Close()
}

View File

@ -0,0 +1,327 @@
package object
import (
"container/list"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage"
)
type commitPreIterator struct {
seenExternal map[plumbing.Hash]bool
seen map[plumbing.Hash]bool
stack []CommitIter
start *Commit
}
// NewCommitPreorderIter returns a CommitIter that walks the commit history,
// starting at the given commit and visiting its parents in pre-order.
// The given callback will be called for each visited commit. Each commit will
// be visited only once. If the callback returns an error, walking will stop
// and will return the error. Other errors might be returned if the history
// cannot be traversed (e.g. missing objects). Ignore allows to skip some
// commits from being iterated.
func NewCommitPreorderIter(
c *Commit,
seenExternal map[plumbing.Hash]bool,
ignore []plumbing.Hash,
) CommitIter {
seen := make(map[plumbing.Hash]bool)
for _, h := range ignore {
seen[h] = true
}
return &commitPreIterator{
seenExternal: seenExternal,
seen: seen,
stack: make([]CommitIter, 0),
start: c,
}
}
func (w *commitPreIterator) Next() (*Commit, error) {
var c *Commit
for {
if w.start != nil {
c = w.start
w.start = nil
} else {
current := len(w.stack) - 1
if current < 0 {
return nil, io.EOF
}
var err error
c, err = w.stack[current].Next()
if err == io.EOF {
w.stack = w.stack[:current]
continue
}
if err != nil {
return nil, err
}
}
if w.seen[c.Hash] || w.seenExternal[c.Hash] {
continue
}
w.seen[c.Hash] = true
if c.NumParents() > 0 {
w.stack = append(w.stack, filteredParentIter(c, w.seen))
}
return c, nil
}
}
func filteredParentIter(c *Commit, seen map[plumbing.Hash]bool) CommitIter {
var hashes []plumbing.Hash
for _, h := range c.ParentHashes {
if !seen[h] {
hashes = append(hashes, h)
}
}
return NewCommitIter(c.s,
storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, hashes),
)
}
func (w *commitPreIterator) ForEach(cb func(*Commit) error) error {
for {
c, err := w.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
err = cb(c)
if err == storer.ErrStop {
break
}
if err != nil {
return err
}
}
return nil
}
func (w *commitPreIterator) Close() {}
type commitPostIterator struct {
stack []*Commit
seen map[plumbing.Hash]bool
}
// NewCommitPostorderIter returns a CommitIter that walks the commit
// history like WalkCommitHistory but in post-order. This means that after
// walking a merge commit, the merged commit will be walked before the base
// it was merged on. This can be useful if you wish to see the history in
// chronological order. Ignore allows to skip some commits from being iterated.
func NewCommitPostorderIter(c *Commit, ignore []plumbing.Hash) CommitIter {
seen := make(map[plumbing.Hash]bool)
for _, h := range ignore {
seen[h] = true
}
return &commitPostIterator{
stack: []*Commit{c},
seen: seen,
}
}
func (w *commitPostIterator) Next() (*Commit, error) {
for {
if len(w.stack) == 0 {
return nil, io.EOF
}
c := w.stack[len(w.stack)-1]
w.stack = w.stack[:len(w.stack)-1]
if w.seen[c.Hash] {
continue
}
w.seen[c.Hash] = true
return c, c.Parents().ForEach(func(p *Commit) error {
w.stack = append(w.stack, p)
return nil
})
}
}
func (w *commitPostIterator) ForEach(cb func(*Commit) error) error {
for {
c, err := w.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
err = cb(c)
if err == storer.ErrStop {
break
}
if err != nil {
return err
}
}
return nil
}
func (w *commitPostIterator) Close() {}
// commitAllIterator stands for commit iterator for all refs.
type commitAllIterator struct {
// currCommit points to the current commit.
currCommit *list.Element
}
// NewCommitAllIter returns a new commit iterator for all refs.
// repoStorer is a repo Storer used to get commits and references.
// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order
func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) {
commitsPath := list.New()
commitsLookup := make(map[plumbing.Hash]*list.Element)
head, err := storer.ResolveReference(repoStorer, plumbing.HEAD)
if err == nil {
err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup)
}
if err != nil && err != plumbing.ErrReferenceNotFound {
return nil, err
}
// add all references along with the HEAD
refIter, err := repoStorer.IterReferences()
if err != nil {
return nil, err
}
defer refIter.Close()
for {
ref, err := refIter.Next()
if err == io.EOF {
break
}
if err == plumbing.ErrReferenceNotFound {
continue
}
if err != nil {
return nil, err
}
if err = addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup); err != nil {
return nil, err
}
}
return &commitAllIterator{commitsPath.Front()}, nil
}
func addReference(
repoStorer storage.Storer,
commitIterFunc func(*Commit) CommitIter,
ref *plumbing.Reference,
commitsPath *list.List,
commitsLookup map[plumbing.Hash]*list.Element) error {
_, exists := commitsLookup[ref.Hash()]
if exists {
// we already have it - skip the reference.
return nil
}
refCommit, _ := GetCommit(repoStorer, ref.Hash())
if refCommit == nil {
// if it's not a commit - skip it.
return nil
}
var (
refCommits []*Commit
parent *list.Element
)
// collect all ref commits to add
commitIter := commitIterFunc(refCommit)
for c, e := commitIter.Next(); e == nil; {
parent, exists = commitsLookup[c.Hash]
if exists {
break
}
refCommits = append(refCommits, c)
c, e = commitIter.Next()
}
commitIter.Close()
if parent == nil {
// common parent - not found
// add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet)
for _, c := range refCommits {
parent = commitsPath.PushBack(c)
commitsLookup[c.Hash] = parent
}
} else {
// add ref's commits to the path in reverse order (from the latest)
for i := len(refCommits) - 1; i >= 0; i-- {
c := refCommits[i]
// insert before found common parent
parent = commitsPath.InsertBefore(c, parent)
commitsLookup[c.Hash] = parent
}
}
return nil
}
func (it *commitAllIterator) Next() (*Commit, error) {
if it.currCommit == nil {
return nil, io.EOF
}
c := it.currCommit.Value.(*Commit)
it.currCommit = it.currCommit.Next()
return c, nil
}
func (it *commitAllIterator) ForEach(cb func(*Commit) error) error {
for {
c, err := it.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
err = cb(c)
if err == storer.ErrStop {
break
}
if err != nil {
return err
}
}
return nil
}
func (it *commitAllIterator) Close() {
it.currCommit = nil
}

View File

@ -0,0 +1,100 @@
package object
import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
type bfsCommitIterator struct {
seenExternal map[plumbing.Hash]bool
seen map[plumbing.Hash]bool
queue []*Commit
}
// NewCommitIterBSF returns a CommitIter that walks the commit history,
// starting at the given commit and visiting its parents in pre-order.
// The given callback will be called for each visited commit. Each commit will
// be visited only once. If the callback returns an error, walking will stop
// and will return the error. Other errors might be returned if the history
// cannot be traversed (e.g. missing objects). Ignore allows to skip some
// commits from being iterated.
func NewCommitIterBSF(
c *Commit,
seenExternal map[plumbing.Hash]bool,
ignore []plumbing.Hash,
) CommitIter {
seen := make(map[plumbing.Hash]bool)
for _, h := range ignore {
seen[h] = true
}
return &bfsCommitIterator{
seenExternal: seenExternal,
seen: seen,
queue: []*Commit{c},
}
}
func (w *bfsCommitIterator) appendHash(store storer.EncodedObjectStorer, h plumbing.Hash) error {
if w.seen[h] || w.seenExternal[h] {
return nil
}
c, err := GetCommit(store, h)
if err != nil {
return err
}
w.queue = append(w.queue, c)
return nil
}
func (w *bfsCommitIterator) Next() (*Commit, error) {
var c *Commit
for {
if len(w.queue) == 0 {
return nil, io.EOF
}
c = w.queue[0]
w.queue = w.queue[1:]
if w.seen[c.Hash] || w.seenExternal[c.Hash] {
continue
}
w.seen[c.Hash] = true
for _, h := range c.ParentHashes {
err := w.appendHash(c.s, h)
if err != nil {
return nil, nil
}
}
return c, nil
}
}
func (w *bfsCommitIterator) ForEach(cb func(*Commit) error) error {
for {
c, err := w.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
err = cb(c)
if err == storer.ErrStop {
break
}
if err != nil {
return err
}
}
return nil
}
func (w *bfsCommitIterator) Close() {}

View File

@ -0,0 +1,103 @@
package object
import (
"io"
"github.com/emirpasic/gods/trees/binaryheap"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
type commitIteratorByCTime struct {
seenExternal map[plumbing.Hash]bool
seen map[plumbing.Hash]bool
heap *binaryheap.Heap
}
// NewCommitIterCTime returns a CommitIter that walks the commit history,
// starting at the given commit and visiting its parents while preserving Committer Time order.
// this appears to be the closest order to `git log`
// The given callback will be called for each visited commit. Each commit will
// be visited only once. If the callback returns an error, walking will stop
// and will return the error. Other errors might be returned if the history
// cannot be traversed (e.g. missing objects). Ignore allows to skip some
// commits from being iterated.
func NewCommitIterCTime(
c *Commit,
seenExternal map[plumbing.Hash]bool,
ignore []plumbing.Hash,
) CommitIter {
seen := make(map[plumbing.Hash]bool)
for _, h := range ignore {
seen[h] = true
}
heap := binaryheap.NewWith(func(a, b interface{}) int {
if a.(*Commit).Committer.When.Before(b.(*Commit).Committer.When) {
return 1
}
return -1
})
heap.Push(c)
return &commitIteratorByCTime{
seenExternal: seenExternal,
seen: seen,
heap: heap,
}
}
func (w *commitIteratorByCTime) Next() (*Commit, error) {
var c *Commit
for {
cIn, ok := w.heap.Pop()
if !ok {
return nil, io.EOF
}
c = cIn.(*Commit)
if w.seen[c.Hash] || w.seenExternal[c.Hash] {
continue
}
w.seen[c.Hash] = true
for _, h := range c.ParentHashes {
if w.seen[h] || w.seenExternal[h] {
continue
}
pc, err := GetCommit(c.s, h)
if err != nil {
return nil, err
}
w.heap.Push(pc)
}
return c, nil
}
}
func (w *commitIteratorByCTime) ForEach(cb func(*Commit) error) error {
for {
c, err := w.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
err = cb(c)
if err == storer.ErrStop {
break
}
if err != nil {
return err
}
}
return nil
}
func (w *commitIteratorByCTime) Close() {}

View File

@ -0,0 +1,145 @@
package object
import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
type commitFileIter struct {
fileName string
sourceIter CommitIter
currentCommit *Commit
checkParent bool
}
// NewCommitFileIterFromIter returns a commit iterator which performs diffTree between
// successive trees returned from the commit iterator from the argument. The purpose of this is
// to find the commits that explain how the files that match the path came to be.
// If checkParent is true then the function double checks if potential parent (next commit in a path)
// is one of the parents in the tree (it's used by `git log --all`).
func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter {
iterator := new(commitFileIter)
iterator.sourceIter = commitIter
iterator.fileName = fileName
iterator.checkParent = checkParent
return iterator
}
func (c *commitFileIter) Next() (*Commit, error) {
if c.currentCommit == nil {
var err error
c.currentCommit, err = c.sourceIter.Next()
if err != nil {
return nil, err
}
}
commit, commitErr := c.getNextFileCommit()
// Setting current-commit to nil to prevent unwanted states when errors are raised
if commitErr != nil {
c.currentCommit = nil
}
return commit, commitErr
}
func (c *commitFileIter) getNextFileCommit() (*Commit, error) {
for {
// Parent-commit can be nil if the current-commit is the initial commit
parentCommit, parentCommitErr := c.sourceIter.Next()
if parentCommitErr != nil {
// If the parent-commit is beyond the initial commit, keep it nil
if parentCommitErr != io.EOF {
return nil, parentCommitErr
}
parentCommit = nil
}
// Fetch the trees of the current and parent commits
currentTree, currTreeErr := c.currentCommit.Tree()
if currTreeErr != nil {
return nil, currTreeErr
}
var parentTree *Tree
if parentCommit != nil {
var parentTreeErr error
parentTree, parentTreeErr = parentCommit.Tree()
if parentTreeErr != nil {
return nil, parentTreeErr
}
}
// Find diff between current and parent trees
changes, diffErr := DiffTree(currentTree, parentTree)
if diffErr != nil {
return nil, diffErr
}
found := c.hasFileChange(changes, parentCommit)
// Storing the current-commit in-case a change is found, and
// Updating the current-commit for the next-iteration
prevCommit := c.currentCommit
c.currentCommit = parentCommit
if found {
return prevCommit, nil
}
// If not matches found and if parent-commit is beyond the initial commit, then return with EOF
if parentCommit == nil {
return nil, io.EOF
}
}
}
func (c *commitFileIter) hasFileChange(changes Changes, parent *Commit) bool {
for _, change := range changes {
if change.name() != c.fileName {
continue
}
// filename matches, now check if source iterator contains all commits (from all refs)
if c.checkParent {
if parent != nil && isParentHash(parent.Hash, c.currentCommit) {
return true
}
continue
}
return true
}
return false
}
func isParentHash(hash plumbing.Hash, commit *Commit) bool {
for _, h := range commit.ParentHashes {
if h == hash {
return true
}
}
return false
}
func (c *commitFileIter) ForEach(cb func(*Commit) error) error {
for {
commit, nextErr := c.Next()
if nextErr != nil {
return nextErr
}
err := cb(commit)
if err == storer.ErrStop {
return nil
} else if err != nil {
return err
}
}
}
func (c *commitFileIter) Close() {
c.sourceIter.Close()
}

View File

@ -0,0 +1,37 @@
package object
import (
"bytes"
"context"
"gopkg.in/src-d/go-git.v4/utils/merkletrie"
"gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
)
// DiffTree compares the content and mode of the blobs found via two
// tree objects.
func DiffTree(a, b *Tree) (Changes, error) {
return DiffTreeContext(context.Background(), a, b)
}
// DiffTree compares the content and mode of the blobs found via two
// tree objects. Provided context must be non-nil.
// An error will be return if context expires
func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) {
from := NewTreeRootNode(a)
to := NewTreeRootNode(b)
hashEqual := func(a, b noder.Hasher) bool {
return bytes.Equal(a.Hash(), b.Hash())
}
merkletrieChanges, err := merkletrie.DiffTreeContext(ctx, from, to, hashEqual)
if err != nil {
if err == merkletrie.ErrCanceled {
return nil, ErrCanceled
}
return nil, err
}
return newChanges(merkletrieChanges)
}

137
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/file.go generated vendored Normal file
View File

@ -0,0 +1,137 @@
package object
import (
"bytes"
"io"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/binary"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
// File represents git file objects.
type File struct {
// Name is the path of the file. It might be relative to a tree,
// depending of the function that generates it.
Name string
// Mode is the file mode.
Mode filemode.FileMode
// Blob with the contents of the file.
Blob
}
// NewFile returns a File based on the given blob object
func NewFile(name string, m filemode.FileMode, b *Blob) *File {
return &File{Name: name, Mode: m, Blob: *b}
}
// Contents returns the contents of a file as a string.
func (f *File) Contents() (content string, err error) {
reader, err := f.Reader()
if err != nil {
return "", err
}
defer ioutil.CheckClose(reader, &err)
buf := new(bytes.Buffer)
if _, err := buf.ReadFrom(reader); err != nil {
return "", err
}
return buf.String(), nil
}
// IsBinary returns if the file is binary or not
func (f *File) IsBinary() (bin bool, err error) {
reader, err := f.Reader()
if err != nil {
return false, err
}
defer ioutil.CheckClose(reader, &err)
return binary.IsBinary(reader)
}
// Lines returns a slice of lines from the contents of a file, stripping
// all end of line characters. If the last line is empty (does not end
// in an end of line), it is also stripped.
func (f *File) Lines() ([]string, error) {
content, err := f.Contents()
if err != nil {
return nil, err
}
splits := strings.Split(content, "\n")
// remove the last line if it is empty
if splits[len(splits)-1] == "" {
return splits[:len(splits)-1], nil
}
return splits, nil
}
// FileIter provides an iterator for the files in a tree.
type FileIter struct {
s storer.EncodedObjectStorer
w TreeWalker
}
// NewFileIter takes a storer.EncodedObjectStorer and a Tree and returns a
// *FileIter that iterates over all files contained in the tree, recursively.
func NewFileIter(s storer.EncodedObjectStorer, t *Tree) *FileIter {
return &FileIter{s: s, w: *NewTreeWalker(t, true, nil)}
}
// Next moves the iterator to the next file and returns a pointer to it. If
// there are no more files, it returns io.EOF.
func (iter *FileIter) Next() (*File, error) {
for {
name, entry, err := iter.w.Next()
if err != nil {
return nil, err
}
if entry.Mode == filemode.Dir || entry.Mode == filemode.Submodule {
continue
}
blob, err := GetBlob(iter.s, entry.Hash)
if err != nil {
return nil, err
}
return NewFile(name, entry.Mode, blob), nil
}
}
// ForEach call the cb function for each file contained in this iter until
// an error happens or the end of the iter is reached. If plumbing.ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *FileIter) ForEach(cb func(*File) error) error {
defer iter.Close()
for {
f, err := iter.Next()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
if err := cb(f); err != nil {
if err == storer.ErrStop {
return nil
}
return err
}
}
}
func (iter *FileIter) Close() {
iter.w.Close()
}

View File

@ -0,0 +1,237 @@
// Package object contains implementations of all Git objects and utility
// functions to work with them.
package object
import (
"bytes"
"errors"
"fmt"
"io"
"strconv"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
// ErrUnsupportedObject trigger when a non-supported object is being decoded.
var ErrUnsupportedObject = errors.New("unsupported object type")
// Object is a generic representation of any git object. It is implemented by
// Commit, Tree, Blob, and Tag, and includes the functions that are common to
// them.
//
// Object is returned when an object can be of any type. It is frequently used
// with a type cast to acquire the specific type of object:
//
// func process(obj Object) {
// switch o := obj.(type) {
// case *Commit:
// // o is a Commit
// case *Tree:
// // o is a Tree
// case *Blob:
// // o is a Blob
// case *Tag:
// // o is a Tag
// }
// }
//
// This interface is intentionally different from plumbing.EncodedObject, which
// is a lower level interface used by storage implementations to read and write
// objects in its encoded form.
type Object interface {
ID() plumbing.Hash
Type() plumbing.ObjectType
Decode(plumbing.EncodedObject) error
Encode(plumbing.EncodedObject) error
}
// GetObject gets an object from an object storer and decodes it.
func GetObject(s storer.EncodedObjectStorer, h plumbing.Hash) (Object, error) {
o, err := s.EncodedObject(plumbing.AnyObject, h)
if err != nil {
return nil, err
}
return DecodeObject(s, o)
}
// DecodeObject decodes an encoded object into an Object and associates it to
// the given object storer.
func DecodeObject(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (Object, error) {
switch o.Type() {
case plumbing.CommitObject:
return DecodeCommit(s, o)
case plumbing.TreeObject:
return DecodeTree(s, o)
case plumbing.BlobObject:
return DecodeBlob(o)
case plumbing.TagObject:
return DecodeTag(s, o)
default:
return nil, plumbing.ErrInvalidType
}
}
// DateFormat is the format being used in the original git implementation
const DateFormat = "Mon Jan 02 15:04:05 2006 -0700"
// Signature is used to identify who and when created a commit or tag.
type Signature struct {
// Name represents a person name. It is an arbitrary string.
Name string
// Email is an email, but it cannot be assumed to be well-formed.
Email string
// When is the timestamp of the signature.
When time.Time
}
// Decode decodes a byte slice into a signature
func (s *Signature) Decode(b []byte) {
open := bytes.LastIndexByte(b, '<')
close := bytes.LastIndexByte(b, '>')
if open == -1 || close == -1 {
return
}
if close < open {
return
}
s.Name = string(bytes.Trim(b[:open], " "))
s.Email = string(b[open+1 : close])
hasTime := close+2 < len(b)
if hasTime {
s.decodeTimeAndTimeZone(b[close+2:])
}
}
// Encode encodes a Signature into a writer.
func (s *Signature) Encode(w io.Writer) error {
if _, err := fmt.Fprintf(w, "%s <%s> ", s.Name, s.Email); err != nil {
return err
}
if err := s.encodeTimeAndTimeZone(w); err != nil {
return err
}
return nil
}
var timeZoneLength = 5
func (s *Signature) decodeTimeAndTimeZone(b []byte) {
space := bytes.IndexByte(b, ' ')
if space == -1 {
space = len(b)
}
ts, err := strconv.ParseInt(string(b[:space]), 10, 64)
if err != nil {
return
}
s.When = time.Unix(ts, 0).In(time.UTC)
var tzStart = space + 1
if tzStart >= len(b) || tzStart+timeZoneLength > len(b) {
return
}
// Include a dummy year in this time.Parse() call to avoid a bug in Go:
// https://github.com/golang/go/issues/19750
//
// Parsing the timezone with no other details causes the tl.Location() call
// below to return time.Local instead of the parsed zone in some cases
tl, err := time.Parse("2006 -0700", "1970 "+string(b[tzStart:tzStart+timeZoneLength]))
if err != nil {
return
}
s.When = s.When.In(tl.Location())
}
func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error {
u := s.When.Unix()
if u < 0 {
u = 0
}
_, err := fmt.Fprintf(w, "%d %s", u, s.When.Format("-0700"))
return err
}
func (s *Signature) String() string {
return fmt.Sprintf("%s <%s>", s.Name, s.Email)
}
// ObjectIter provides an iterator for a set of objects.
type ObjectIter struct {
storer.EncodedObjectIter
s storer.EncodedObjectStorer
}
// NewObjectIter takes a storer.EncodedObjectStorer and a
// storer.EncodedObjectIter and returns an *ObjectIter that iterates over all
// objects contained in the storer.EncodedObjectIter.
func NewObjectIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *ObjectIter {
return &ObjectIter{iter, s}
}
// Next moves the iterator to the next object and returns a pointer to it. If
// there are no more objects, it returns io.EOF.
func (iter *ObjectIter) Next() (Object, error) {
for {
obj, err := iter.EncodedObjectIter.Next()
if err != nil {
return nil, err
}
o, err := iter.toObject(obj)
if err == plumbing.ErrInvalidType {
continue
}
if err != nil {
return nil, err
}
return o, nil
}
}
// ForEach call the cb function for each object contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *ObjectIter) ForEach(cb func(Object) error) error {
return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
o, err := iter.toObject(obj)
if err == plumbing.ErrInvalidType {
return nil
}
if err != nil {
return err
}
return cb(o)
})
}
func (iter *ObjectIter) toObject(obj plumbing.EncodedObject) (Object, error) {
switch obj.Type() {
case plumbing.BlobObject:
blob := &Blob{}
return blob, blob.Decode(obj)
case plumbing.TreeObject:
tree := &Tree{s: iter.s}
return tree, tree.Decode(obj)
case plumbing.CommitObject:
commit := &Commit{}
return commit, commit.Decode(obj)
case plumbing.TagObject:
tag := &Tag{}
return tag, tag.Decode(obj)
default:
return nil, plumbing.ErrInvalidType
}
}

View File

@ -0,0 +1,335 @@
package object
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
fdiff "gopkg.in/src-d/go-git.v4/plumbing/format/diff"
"gopkg.in/src-d/go-git.v4/utils/diff"
dmp "github.com/sergi/go-diff/diffmatchpatch"
)
var (
ErrCanceled = errors.New("operation canceled")
)
func getPatch(message string, changes ...*Change) (*Patch, error) {
ctx := context.Background()
return getPatchContext(ctx, message, changes...)
}
func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) {
var filePatches []fdiff.FilePatch
for _, c := range changes {
select {
case <-ctx.Done():
return nil, ErrCanceled
default:
}
fp, err := filePatchWithContext(ctx, c)
if err != nil {
return nil, err
}
filePatches = append(filePatches, fp)
}
return &Patch{message, filePatches}, nil
}
func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) {
from, to, err := c.Files()
if err != nil {
return nil, err
}
fromContent, fIsBinary, err := fileContent(from)
if err != nil {
return nil, err
}
toContent, tIsBinary, err := fileContent(to)
if err != nil {
return nil, err
}
if fIsBinary || tIsBinary {
return &textFilePatch{from: c.From, to: c.To}, nil
}
diffs := diff.Do(fromContent, toContent)
var chunks []fdiff.Chunk
for _, d := range diffs {
select {
case <-ctx.Done():
return nil, ErrCanceled
default:
}
var op fdiff.Operation
switch d.Type {
case dmp.DiffEqual:
op = fdiff.Equal
case dmp.DiffDelete:
op = fdiff.Delete
case dmp.DiffInsert:
op = fdiff.Add
}
chunks = append(chunks, &textChunk{d.Text, op})
}
return &textFilePatch{
chunks: chunks,
from: c.From,
to: c.To,
}, nil
}
func filePatch(c *Change) (fdiff.FilePatch, error) {
return filePatchWithContext(context.Background(), c)
}
func fileContent(f *File) (content string, isBinary bool, err error) {
if f == nil {
return
}
isBinary, err = f.IsBinary()
if err != nil || isBinary {
return
}
content, err = f.Contents()
return
}
// textPatch is an implementation of fdiff.Patch interface
type Patch struct {
message string
filePatches []fdiff.FilePatch
}
func (t *Patch) FilePatches() []fdiff.FilePatch {
return t.filePatches
}
func (t *Patch) Message() string {
return t.message
}
func (p *Patch) Encode(w io.Writer) error {
ue := fdiff.NewUnifiedEncoder(w, fdiff.DefaultContextLines)
return ue.Encode(p)
}
func (p *Patch) Stats() FileStats {
return getFileStatsFromFilePatches(p.FilePatches())
}
func (p *Patch) String() string {
buf := bytes.NewBuffer(nil)
err := p.Encode(buf)
if err != nil {
return fmt.Sprintf("malformed patch: %s", err.Error())
}
return buf.String()
}
// changeEntryWrapper is an implementation of fdiff.File interface
type changeEntryWrapper struct {
ce ChangeEntry
}
func (f *changeEntryWrapper) Hash() plumbing.Hash {
if !f.ce.TreeEntry.Mode.IsFile() {
return plumbing.ZeroHash
}
return f.ce.TreeEntry.Hash
}
func (f *changeEntryWrapper) Mode() filemode.FileMode {
return f.ce.TreeEntry.Mode
}
func (f *changeEntryWrapper) Path() string {
if !f.ce.TreeEntry.Mode.IsFile() {
return ""
}
return f.ce.Name
}
func (f *changeEntryWrapper) Empty() bool {
return !f.ce.TreeEntry.Mode.IsFile()
}
// textFilePatch is an implementation of fdiff.FilePatch interface
type textFilePatch struct {
chunks []fdiff.Chunk
from, to ChangeEntry
}
func (tf *textFilePatch) Files() (from fdiff.File, to fdiff.File) {
f := &changeEntryWrapper{tf.from}
t := &changeEntryWrapper{tf.to}
if !f.Empty() {
from = f
}
if !t.Empty() {
to = t
}
return
}
func (t *textFilePatch) IsBinary() bool {
return len(t.chunks) == 0
}
func (t *textFilePatch) Chunks() []fdiff.Chunk {
return t.chunks
}
// textChunk is an implementation of fdiff.Chunk interface
type textChunk struct {
content string
op fdiff.Operation
}
func (t *textChunk) Content() string {
return t.content
}
func (t *textChunk) Type() fdiff.Operation {
return t.op
}
// FileStat stores the status of changes in content of a file.
type FileStat struct {
Name string
Addition int
Deletion int
}
func (fs FileStat) String() string {
return printStat([]FileStat{fs})
}
// FileStats is a collection of FileStat.
type FileStats []FileStat
func (fileStats FileStats) String() string {
return printStat(fileStats)
}
func printStat(fileStats []FileStat) string {
padLength := float64(len(" "))
newlineLength := float64(len("\n"))
separatorLength := float64(len("|"))
// Soft line length limit. The text length calculation below excludes
// length of the change number. Adding that would take it closer to 80,
// but probably not more than 80, until it's a huge number.
lineLength := 72.0
// Get the longest filename and longest total change.
var longestLength float64
var longestTotalChange float64
for _, fs := range fileStats {
if int(longestLength) < len(fs.Name) {
longestLength = float64(len(fs.Name))
}
totalChange := fs.Addition + fs.Deletion
if int(longestTotalChange) < totalChange {
longestTotalChange = float64(totalChange)
}
}
// Parts of the output:
// <pad><filename><pad>|<pad><changeNumber><pad><+++/---><newline>
// example: " main.go | 10 +++++++--- "
// <pad><filename><pad>
leftTextLength := padLength + longestLength + padLength
// <pad><number><pad><+++++/-----><newline>
// Excluding number length here.
rightTextLength := padLength + padLength + newlineLength
totalTextArea := leftTextLength + separatorLength + rightTextLength
heightOfHistogram := lineLength - totalTextArea
// Scale the histogram.
var scaleFactor float64
if longestTotalChange > heightOfHistogram {
// Scale down to heightOfHistogram.
scaleFactor = float64(longestTotalChange / heightOfHistogram)
} else {
scaleFactor = 1.0
}
finalOutput := ""
for _, fs := range fileStats {
addn := float64(fs.Addition)
deln := float64(fs.Deletion)
adds := strings.Repeat("+", int(math.Floor(addn/scaleFactor)))
dels := strings.Repeat("-", int(math.Floor(deln/scaleFactor)))
finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels)
}
return finalOutput
}
func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats {
var fileStats FileStats
for _, fp := range filePatches {
// ignore empty patches (binary files, submodule refs updates)
if len(fp.Chunks()) == 0 {
continue
}
cs := FileStat{}
from, to := fp.Files()
if from == nil {
// New File is created.
cs.Name = to.Path()
} else if to == nil {
// File is deleted.
cs.Name = from.Path()
} else if from.Path() != to.Path() {
// File is renamed. Not supported.
// cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path())
} else {
cs.Name = from.Path()
}
for _, chunk := range fp.Chunks() {
switch chunk.Type() {
case fdiff.Add:
cs.Addition += strings.Count(chunk.Content(), "\n")
case fdiff.Delete:
cs.Deletion += strings.Count(chunk.Content(), "\n")
}
}
fileStats = append(fileStats, cs)
}
return fileStats
}

350
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go generated vendored Normal file
View File

@ -0,0 +1,350 @@
package object
import (
"bufio"
"bytes"
"fmt"
"io"
stdioutil "io/ioutil"
"strings"
"golang.org/x/crypto/openpgp"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
// Tag represents an annotated tag object. It points to a single git object of
// any type, but tags typically are applied to commit or blob objects. It
// provides a reference that associates the target with a tag name. It also
// contains meta-information about the tag, including the tagger, tag date and
// message.
//
// Note that this is not used for lightweight tags.
//
// https://git-scm.com/book/en/v2/Git-Internals-Git-References#Tags
type Tag struct {
// Hash of the tag.
Hash plumbing.Hash
// Name of the tag.
Name string
// Tagger is the one who created the tag.
Tagger Signature
// Message is an arbitrary text message.
Message string
// PGPSignature is the PGP signature of the tag.
PGPSignature string
// TargetType is the object type of the target.
TargetType plumbing.ObjectType
// Target is the hash of the target object.
Target plumbing.Hash
s storer.EncodedObjectStorer
}
// GetTag gets a tag from an object storer and decodes it.
func GetTag(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tag, error) {
o, err := s.EncodedObject(plumbing.TagObject, h)
if err != nil {
return nil, err
}
return DecodeTag(s, o)
}
// DecodeTag decodes an encoded object into a *Commit and associates it to the
// given object storer.
func DecodeTag(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tag, error) {
t := &Tag{s: s}
if err := t.Decode(o); err != nil {
return nil, err
}
return t, nil
}
// ID returns the object ID of the tag, not the object that the tag references.
// The returned value will always match the current value of Tag.Hash.
//
// ID is present to fulfill the Object interface.
func (t *Tag) ID() plumbing.Hash {
return t.Hash
}
// Type returns the type of object. It always returns plumbing.TagObject.
//
// Type is present to fulfill the Object interface.
func (t *Tag) Type() plumbing.ObjectType {
return plumbing.TagObject
}
// Decode transforms a plumbing.EncodedObject into a Tag struct.
func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
if o.Type() != plumbing.TagObject {
return ErrUnsupportedObject
}
t.Hash = o.Hash()
reader, err := o.Reader()
if err != nil {
return err
}
defer ioutil.CheckClose(reader, &err)
r := bufio.NewReader(reader)
for {
var line []byte
line, err = r.ReadBytes('\n')
if err != nil && err != io.EOF {
return err
}
line = bytes.TrimSpace(line)
if len(line) == 0 {
break // Start of message
}
split := bytes.SplitN(line, []byte{' '}, 2)
switch string(split[0]) {
case "object":
t.Target = plumbing.NewHash(string(split[1]))
case "type":
t.TargetType, err = plumbing.ParseObjectType(string(split[1]))
if err != nil {
return err
}
case "tag":
t.Name = string(split[1])
case "tagger":
t.Tagger.Decode(split[1])
}
if err == io.EOF {
return nil
}
}
data, err := stdioutil.ReadAll(r)
if err != nil {
return err
}
var pgpsig bool
// Check if data contains PGP signature.
if bytes.Contains(data, []byte(beginpgp)) {
// Split the lines at newline.
messageAndSig := bytes.Split(data, []byte("\n"))
for _, l := range messageAndSig {
if pgpsig {
if bytes.Contains(l, []byte(endpgp)) {
t.PGPSignature += endpgp + "\n"
pgpsig = false
} else {
t.PGPSignature += string(l) + "\n"
}
continue
}
// Check if it's the beginning of a PGP signature.
if bytes.Contains(l, []byte(beginpgp)) {
t.PGPSignature += beginpgp + "\n"
pgpsig = true
continue
}
t.Message += string(l) + "\n"
}
} else {
t.Message = string(data)
}
return nil
}
// Encode transforms a Tag into a plumbing.EncodedObject.
func (t *Tag) Encode(o plumbing.EncodedObject) error {
return t.encode(o, true)
}
func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) {
o.SetType(plumbing.TagObject)
w, err := o.Writer()
if err != nil {
return err
}
defer ioutil.CheckClose(w, &err)
if _, err = fmt.Fprintf(w,
"object %s\ntype %s\ntag %s\ntagger ",
t.Target.String(), t.TargetType.Bytes(), t.Name); err != nil {
return err
}
if err = t.Tagger.Encode(w); err != nil {
return err
}
if _, err = fmt.Fprint(w, "\n\n"); err != nil {
return err
}
if _, err = fmt.Fprint(w, t.Message); err != nil {
return err
}
// Note that this is highly sensitive to what it sent along in the message.
// Message *always* needs to end with a newline, or else the message and the
// signature will be concatenated into a corrupt object. Since this is a
// lower-level method, we assume you know what you are doing and have already
// done the needful on the message in the caller.
if includeSig {
if _, err = fmt.Fprint(w, t.PGPSignature); err != nil {
return err
}
}
return err
}
// Commit returns the commit pointed to by the tag. If the tag points to a
// different type of object ErrUnsupportedObject will be returned.
func (t *Tag) Commit() (*Commit, error) {
if t.TargetType != plumbing.CommitObject {
return nil, ErrUnsupportedObject
}
o, err := t.s.EncodedObject(plumbing.CommitObject, t.Target)
if err != nil {
return nil, err
}
return DecodeCommit(t.s, o)
}
// Tree returns the tree pointed to by the tag. If the tag points to a commit
// object the tree of that commit will be returned. If the tag does not point
// to a commit or tree object ErrUnsupportedObject will be returned.
func (t *Tag) Tree() (*Tree, error) {
switch t.TargetType {
case plumbing.CommitObject:
c, err := t.Commit()
if err != nil {
return nil, err
}
return c.Tree()
case plumbing.TreeObject:
return GetTree(t.s, t.Target)
default:
return nil, ErrUnsupportedObject
}
}
// Blob returns the blob pointed to by the tag. If the tag points to a
// different type of object ErrUnsupportedObject will be returned.
func (t *Tag) Blob() (*Blob, error) {
if t.TargetType != plumbing.BlobObject {
return nil, ErrUnsupportedObject
}
return GetBlob(t.s, t.Target)
}
// Object returns the object pointed to by the tag.
func (t *Tag) Object() (Object, error) {
o, err := t.s.EncodedObject(t.TargetType, t.Target)
if err != nil {
return nil, err
}
return DecodeObject(t.s, o)
}
// String returns the meta information contained in the tag as a formatted
// string.
func (t *Tag) String() string {
obj, _ := t.Object()
return fmt.Sprintf(
"%s %s\nTagger: %s\nDate: %s\n\n%s\n%s",
plumbing.TagObject, t.Name, t.Tagger.String(), t.Tagger.When.Format(DateFormat),
t.Message, objectAsString(obj),
)
}
// Verify performs PGP verification of the tag with a provided armored
// keyring and returns openpgp.Entity associated with verifying key on success.
func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) {
keyRingReader := strings.NewReader(armoredKeyRing)
keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader)
if err != nil {
return nil, err
}
// Extract signature.
signature := strings.NewReader(t.PGPSignature)
encoded := &plumbing.MemoryObject{}
// Encode tag components, excluding signature and get a reader object.
if err := t.encode(encoded, false); err != nil {
return nil, err
}
er, err := encoded.Reader()
if err != nil {
return nil, err
}
return openpgp.CheckArmoredDetachedSignature(keyring, er, signature)
}
// TagIter provides an iterator for a set of tags.
type TagIter struct {
storer.EncodedObjectIter
s storer.EncodedObjectStorer
}
// NewTagIter takes a storer.EncodedObjectStorer and a
// storer.EncodedObjectIter and returns a *TagIter that iterates over all
// tags contained in the storer.EncodedObjectIter.
//
// Any non-tag object returned by the storer.EncodedObjectIter is skipped.
func NewTagIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TagIter {
return &TagIter{iter, s}
}
// Next moves the iterator to the next tag and returns a pointer to it. If
// there are no more tags, it returns io.EOF.
func (iter *TagIter) Next() (*Tag, error) {
obj, err := iter.EncodedObjectIter.Next()
if err != nil {
return nil, err
}
return DecodeTag(iter.s, obj)
}
// ForEach call the cb function for each tag contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *TagIter) ForEach(cb func(*Tag) error) error {
return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
t, err := DecodeTag(iter.s, obj)
if err != nil {
return err
}
return cb(t)
})
}
func objectAsString(obj Object) string {
switch o := obj.(type) {
case *Commit:
return o.String()
default:
return ""
}
}

511
vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go generated vendored Normal file
View File

@ -0,0 +1,511 @@
package object
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"path"
"path/filepath"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
const (
maxTreeDepth = 1024
startingStackSize = 8
)
// New errors defined by this package.
var (
ErrMaxTreeDepth = errors.New("maximum tree depth exceeded")
ErrFileNotFound = errors.New("file not found")
ErrDirectoryNotFound = errors.New("directory not found")
ErrEntryNotFound = errors.New("entry not found")
)
// Tree is basically like a directory - it references a bunch of other trees
// and/or blobs (i.e. files and sub-directories)
type Tree struct {
Entries []TreeEntry
Hash plumbing.Hash
s storer.EncodedObjectStorer
m map[string]*TreeEntry
t map[string]*Tree // tree path cache
}
// GetTree gets a tree from an object storer and decodes it.
func GetTree(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tree, error) {
o, err := s.EncodedObject(plumbing.TreeObject, h)
if err != nil {
return nil, err
}
return DecodeTree(s, o)
}
// DecodeTree decodes an encoded object into a *Tree and associates it to the
// given object storer.
func DecodeTree(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tree, error) {
t := &Tree{s: s}
if err := t.Decode(o); err != nil {
return nil, err
}
return t, nil
}
// TreeEntry represents a file
type TreeEntry struct {
Name string
Mode filemode.FileMode
Hash plumbing.Hash
}
// File returns the hash of the file identified by the `path` argument.
// The path is interpreted as relative to the tree receiver.
func (t *Tree) File(path string) (*File, error) {
e, err := t.FindEntry(path)
if err != nil {
return nil, ErrFileNotFound
}
blob, err := GetBlob(t.s, e.Hash)
if err != nil {
if err == plumbing.ErrObjectNotFound {
return nil, ErrFileNotFound
}
return nil, err
}
return NewFile(path, e.Mode, blob), nil
}
// Size returns the plaintext size of an object, without reading it
// into memory.
func (t *Tree) Size(path string) (int64, error) {
e, err := t.FindEntry(path)
if err != nil {
return 0, ErrEntryNotFound
}
return t.s.EncodedObjectSize(e.Hash)
}
// Tree returns the tree identified by the `path` argument.
// The path is interpreted as relative to the tree receiver.
func (t *Tree) Tree(path string) (*Tree, error) {
e, err := t.FindEntry(path)
if err != nil {
return nil, ErrDirectoryNotFound
}
tree, err := GetTree(t.s, e.Hash)
if err == plumbing.ErrObjectNotFound {
return nil, ErrDirectoryNotFound
}
return tree, err
}
// TreeEntryFile returns the *File for a given *TreeEntry.
func (t *Tree) TreeEntryFile(e *TreeEntry) (*File, error) {
blob, err := GetBlob(t.s, e.Hash)
if err != nil {
return nil, err
}
return NewFile(e.Name, e.Mode, blob), nil
}
// FindEntry search a TreeEntry in this tree or any subtree.
func (t *Tree) FindEntry(path string) (*TreeEntry, error) {
if t.t == nil {
t.t = make(map[string]*Tree)
}
pathParts := strings.Split(path, "/")
startingTree := t
pathCurrent := ""
// search for the longest path in the tree path cache
for i := len(pathParts); i > 1; i-- {
path := filepath.Join(pathParts[:i]...)
tree, ok := t.t[path]
if ok {
startingTree = tree
pathParts = pathParts[i:]
pathCurrent = path
break
}
}
var tree *Tree
var err error
for tree = startingTree; len(pathParts) > 1; pathParts = pathParts[1:] {
if tree, err = tree.dir(pathParts[0]); err != nil {
return nil, err
}
pathCurrent = filepath.Join(pathCurrent, pathParts[0])
t.t[pathCurrent] = tree
}
return tree.entry(pathParts[0])
}
func (t *Tree) dir(baseName string) (*Tree, error) {
entry, err := t.entry(baseName)
if err != nil {
return nil, ErrDirectoryNotFound
}
obj, err := t.s.EncodedObject(plumbing.TreeObject, entry.Hash)
if err != nil {
return nil, err
}
tree := &Tree{s: t.s}
err = tree.Decode(obj)
return tree, err
}
func (t *Tree) entry(baseName string) (*TreeEntry, error) {
if t.m == nil {
t.buildMap()
}
entry, ok := t.m[baseName]
if !ok {
return nil, ErrEntryNotFound
}
return entry, nil
}
// Files returns a FileIter allowing to iterate over the Tree
func (t *Tree) Files() *FileIter {
return NewFileIter(t.s, t)
}
// ID returns the object ID of the tree. The returned value will always match
// the current value of Tree.Hash.
//
// ID is present to fulfill the Object interface.
func (t *Tree) ID() plumbing.Hash {
return t.Hash
}
// Type returns the type of object. It always returns plumbing.TreeObject.
func (t *Tree) Type() plumbing.ObjectType {
return plumbing.TreeObject
}
// Decode transform an plumbing.EncodedObject into a Tree struct
func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
if o.Type() != plumbing.TreeObject {
return ErrUnsupportedObject
}
t.Hash = o.Hash()
if o.Size() == 0 {
return nil
}
t.Entries = nil
t.m = nil
reader, err := o.Reader()
if err != nil {
return err
}
defer ioutil.CheckClose(reader, &err)
r := bufio.NewReader(reader)
for {
str, err := r.ReadString(' ')
if err != nil {
if err == io.EOF {
break
}
return err
}
str = str[:len(str)-1] // strip last byte (' ')
mode, err := filemode.New(str)
if err != nil {
return err
}
name, err := r.ReadString(0)
if err != nil && err != io.EOF {
return err
}
var hash plumbing.Hash
if _, err = io.ReadFull(r, hash[:]); err != nil {
return err
}
baseName := name[:len(name)-1]
t.Entries = append(t.Entries, TreeEntry{
Hash: hash,
Mode: mode,
Name: baseName,
})
}
return nil
}
// Encode transforms a Tree into a plumbing.EncodedObject.
func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
o.SetType(plumbing.TreeObject)
w, err := o.Writer()
if err != nil {
return err
}
defer ioutil.CheckClose(w, &err)
for _, entry := range t.Entries {
if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil {
return err
}
if _, err = w.Write([]byte{0x00}); err != nil {
return err
}
if _, err = w.Write([]byte(entry.Hash[:])); err != nil {
return err
}
}
return err
}
func (t *Tree) buildMap() {
t.m = make(map[string]*TreeEntry)
for i := 0; i < len(t.Entries); i++ {
t.m[t.Entries[i].Name] = &t.Entries[i]
}
}
// Diff returns a list of changes between this tree and the provided one
func (from *Tree) Diff(to *Tree) (Changes, error) {
return DiffTree(from, to)
}
// Diff returns a list of changes between this tree and the provided one
// Error will be returned if context expires
// Provided context must be non nil
func (from *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) {
return DiffTreeContext(ctx, from, to)
}
// Patch returns a slice of Patch objects with all the changes between trees
// in chunks. This representation can be used to create several diff outputs.
func (from *Tree) Patch(to *Tree) (*Patch, error) {
return from.PatchContext(context.Background(), to)
}
// Patch returns a slice of Patch objects with all the changes between trees
// in chunks. This representation can be used to create several diff outputs.
// If context expires, an error will be returned
// Provided context must be non-nil
func (from *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) {
changes, err := DiffTreeContext(ctx, from, to)
if err != nil {
return nil, err
}
return changes.PatchContext(ctx)
}
// treeEntryIter facilitates iterating through the TreeEntry objects in a Tree.
type treeEntryIter struct {
t *Tree
pos int
}
func (iter *treeEntryIter) Next() (TreeEntry, error) {
if iter.pos >= len(iter.t.Entries) {
return TreeEntry{}, io.EOF
}
iter.pos++
return iter.t.Entries[iter.pos-1], nil
}
// TreeWalker provides a means of walking through all of the entries in a Tree.
type TreeWalker struct {
stack []*treeEntryIter
base string
recursive bool
seen map[plumbing.Hash]bool
s storer.EncodedObjectStorer
t *Tree
}
// NewTreeWalker returns a new TreeWalker for the given tree.
//
// It is the caller's responsibility to call Close() when finished with the
// tree walker.
func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWalker {
stack := make([]*treeEntryIter, 0, startingStackSize)
stack = append(stack, &treeEntryIter{t, 0})
return &TreeWalker{
stack: stack,
recursive: recursive,
seen: seen,
s: t.s,
t: t,
}
}
// Next returns the next object from the tree. Objects are returned in order
// and subtrees are included. After the last object has been returned further
// calls to Next() will return io.EOF.
//
// In the current implementation any objects which cannot be found in the
// underlying repository will be skipped automatically. It is possible that this
// may change in future versions.
func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) {
var obj Object
for {
current := len(w.stack) - 1
if current < 0 {
// Nothing left on the stack so we're finished
err = io.EOF
return
}
if current > maxTreeDepth {
// We're probably following bad data or some self-referencing tree
err = ErrMaxTreeDepth
return
}
entry, err = w.stack[current].Next()
if err == io.EOF {
// Finished with the current tree, move back up to the parent
w.stack = w.stack[:current]
w.base, _ = path.Split(w.base)
w.base = path.Clean(w.base) // Remove trailing slash
continue
}
if err != nil {
return
}
if w.seen[entry.Hash] {
continue
}
if entry.Mode == filemode.Dir {
obj, err = GetTree(w.s, entry.Hash)
}
name = path.Join(w.base, entry.Name)
if err != nil {
err = io.EOF
return
}
break
}
if !w.recursive {
return
}
if t, ok := obj.(*Tree); ok {
w.stack = append(w.stack, &treeEntryIter{t, 0})
w.base = path.Join(w.base, entry.Name)
}
return
}
// Tree returns the tree that the tree walker most recently operated on.
func (w *TreeWalker) Tree() *Tree {
current := len(w.stack) - 1
if w.stack[current].pos == 0 {
current--
}
if current < 0 {
return nil
}
return w.stack[current].t
}
// Close releases any resources used by the TreeWalker.
func (w *TreeWalker) Close() {
w.stack = nil
}
// TreeIter provides an iterator for a set of trees.
type TreeIter struct {
storer.EncodedObjectIter
s storer.EncodedObjectStorer
}
// NewTreeIter takes a storer.EncodedObjectStorer and a
// storer.EncodedObjectIter and returns a *TreeIter that iterates over all
// tree contained in the storer.EncodedObjectIter.
//
// Any non-tree object returned by the storer.EncodedObjectIter is skipped.
func NewTreeIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TreeIter {
return &TreeIter{iter, s}
}
// Next moves the iterator to the next tree and returns a pointer to it. If
// there are no more trees, it returns io.EOF.
func (iter *TreeIter) Next() (*Tree, error) {
for {
obj, err := iter.EncodedObjectIter.Next()
if err != nil {
return nil, err
}
if obj.Type() != plumbing.TreeObject {
continue
}
return DecodeTree(iter.s, obj)
}
}
// ForEach call the cb function for each tree contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *TreeIter) ForEach(cb func(*Tree) error) error {
return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error {
if obj.Type() != plumbing.TreeObject {
return nil
}
t, err := DecodeTree(iter.s, obj)
if err != nil {
return err
}
return cb(t)
})
}

View File

@ -0,0 +1,136 @@
package object
import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
)
// A treenoder is a helper type that wraps git trees into merkletrie
// noders.
//
// As a merkletrie noder doesn't understand the concept of modes (e.g.
// file permissions), the treenoder includes the mode of the git tree in
// the hash, so changes in the modes will be detected as modifications
// to the file contents by the merkletrie difftree algorithm. This is
// consistent with how the "git diff-tree" command works.
type treeNoder struct {
parent *Tree // the root node is its own parent
name string // empty string for the root node
mode filemode.FileMode
hash plumbing.Hash
children []noder.Noder // memoized
}
// NewTreeRootNode returns the root node of a Tree
func NewTreeRootNode(t *Tree) noder.Noder {
if t == nil {
return &treeNoder{}
}
return &treeNoder{
parent: t,
name: "",
mode: filemode.Dir,
hash: t.Hash,
}
}
func (t *treeNoder) isRoot() bool {
return t.name == ""
}
func (t *treeNoder) String() string {
return "treeNoder <" + t.name + ">"
}
func (t *treeNoder) Hash() []byte {
if t.mode == filemode.Deprecated {
return append(t.hash[:], filemode.Regular.Bytes()...)
}
return append(t.hash[:], t.mode.Bytes()...)
}
func (t *treeNoder) Name() string {
return t.name
}
func (t *treeNoder) IsDir() bool {
return t.mode == filemode.Dir
}
// Children will return the children of a treenoder as treenoders,
// building them from the children of the wrapped git tree.
func (t *treeNoder) Children() ([]noder.Noder, error) {
if t.mode != filemode.Dir {
return noder.NoChildren, nil
}
// children are memoized for efficiency
if t.children != nil {
return t.children, nil
}
// the parent of the returned children will be ourself as a tree if
// we are a not the root treenoder. The root is special as it
// is is own parent.
parent := t.parent
if !t.isRoot() {
var err error
if parent, err = t.parent.Tree(t.name); err != nil {
return nil, err
}
}
return transformChildren(parent)
}
// Returns the children of a tree as treenoders.
// Efficiency is key here.
func transformChildren(t *Tree) ([]noder.Noder, error) {
var err error
var e TreeEntry
// there will be more tree entries than children in the tree,
// due to submodules and empty directories, but I think it is still
// worth it to pre-allocate the whole array now, even if sometimes
// is bigger than needed.
ret := make([]noder.Noder, 0, len(t.Entries))
walker := NewTreeWalker(t, false, nil) // don't recurse
// don't defer walker.Close() for efficiency reasons.
for {
_, e, err = walker.Next()
if err == io.EOF {
break
}
if err != nil {
walker.Close()
return nil, err
}
ret = append(ret, &treeNoder{
parent: t,
name: e.Name,
mode: e.Mode,
hash: e.Hash,
})
}
walker.Close()
return ret, nil
}
// len(t.tree.Entries) != the number of elements walked by treewalker
// for some reason because of empty directories, submodules, etc, so we
// have to walk here.
func (t *treeNoder) NumChildren() (int, error) {
children, err := t.Children()
if err != nil {
return 0, err
}
return len(children), nil
}

View File

@ -0,0 +1,203 @@
package packp
import (
"fmt"
"sort"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/memory"
)
// AdvRefs values represent the information transmitted on an
// advertised-refs message. Values from this type are not zero-value
// safe, use the New function instead.
type AdvRefs struct {
// Prefix stores prefix payloads.
//
// When using this message over (smart) HTTP, you have to add a pktline
// before the whole thing with the following payload:
//
// '# service=$servicename" LF
//
// Moreover, some (all) git HTTP smart servers will send a flush-pkt
// just after the first pkt-line.
//
// To accommodate both situations, the Prefix field allow you to store
// any data you want to send before the actual pktlines. It will also
// be filled up with whatever is found on the line.
Prefix [][]byte
// Head stores the resolved HEAD reference if present.
// This can be present with git-upload-pack, not with git-receive-pack.
Head *plumbing.Hash
// Capabilities are the capabilities.
Capabilities *capability.List
// References are the hash references.
References map[string]plumbing.Hash
// Peeled are the peeled hash references.
Peeled map[string]plumbing.Hash
// Shallows are the shallow object ids.
Shallows []plumbing.Hash
}
// NewAdvRefs returns a pointer to a new AdvRefs value, ready to be used.
func NewAdvRefs() *AdvRefs {
return &AdvRefs{
Prefix: [][]byte{},
Capabilities: capability.NewList(),
References: make(map[string]plumbing.Hash),
Peeled: make(map[string]plumbing.Hash),
Shallows: []plumbing.Hash{},
}
}
func (a *AdvRefs) AddReference(r *plumbing.Reference) error {
switch r.Type() {
case plumbing.SymbolicReference:
v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String())
a.Capabilities.Add(capability.SymRef, v)
case plumbing.HashReference:
a.References[r.Name().String()] = r.Hash()
default:
return plumbing.ErrInvalidType
}
return nil
}
func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) {
s := memory.ReferenceStorage{}
if err := a.addRefs(s); err != nil {
return s, plumbing.NewUnexpectedError(err)
}
return s, nil
}
func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error {
for name, hash := range a.References {
ref := plumbing.NewReferenceFromStrings(name, hash.String())
if err := s.SetReference(ref); err != nil {
return err
}
}
if a.supportSymrefs() {
return a.addSymbolicRefs(s)
}
return a.resolveHead(s)
}
// If the server does not support symrefs capability,
// we need to guess the reference where HEAD is pointing to.
//
// Git versions prior to 1.8.4.3 has an special procedure to get
// the reference where is pointing to HEAD:
// - Check if a reference called master exists. If exists and it
// has the same hash as HEAD hash, we can say that HEAD is pointing to master
// - If master does not exists or does not have the same hash as HEAD,
// order references and check in that order if that reference has the same
// hash than HEAD. If yes, set HEAD pointing to that branch hash
// - If no reference is found, throw an error
func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error {
if a.Head == nil {
return nil
}
ref, err := s.Reference(plumbing.ReferenceName(plumbing.Master))
// check first if HEAD is pointing to master
if err == nil {
ok, err := a.createHeadIfCorrectReference(ref, s)
if err != nil {
return err
}
if ok {
return nil
}
}
if err != nil && err != plumbing.ErrReferenceNotFound {
return err
}
// From here we are trying to guess the branch that HEAD is pointing
refIter, err := s.IterReferences()
if err != nil {
return err
}
var refNames []string
err = refIter.ForEach(func(r *plumbing.Reference) error {
refNames = append(refNames, string(r.Name()))
return nil
})
if err != nil {
return err
}
sort.Strings(refNames)
var headSet bool
for _, refName := range refNames {
ref, err := s.Reference(plumbing.ReferenceName(refName))
if err != nil {
return err
}
ok, err := a.createHeadIfCorrectReference(ref, s)
if err != nil {
return err
}
if ok {
headSet = true
break
}
}
if !headSet {
return plumbing.ErrReferenceNotFound
}
return nil
}
func (a *AdvRefs) createHeadIfCorrectReference(
reference *plumbing.Reference,
s storer.ReferenceStorer) (bool, error) {
if reference.Hash() == *a.Head {
headRef := plumbing.NewSymbolicReference(plumbing.HEAD, reference.Name())
if err := s.SetReference(headRef); err != nil {
return false, err
}
return true, nil
}
return false, nil
}
func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error {
for _, symref := range a.Capabilities.Get(capability.SymRef) {
chunks := strings.Split(symref, ":")
if len(chunks) != 2 {
err := fmt.Errorf("bad number of `:` in symref value (%q)", symref)
return plumbing.NewUnexpectedError(err)
}
name := plumbing.ReferenceName(chunks[0])
target := plumbing.ReferenceName(chunks[1])
ref := plumbing.NewSymbolicReference(name, target)
if err := s.SetReference(ref); err != nil {
return nil
}
}
return nil
}
func (a *AdvRefs) supportSymrefs() bool {
return a.Capabilities.Supports(capability.SymRef)
}

View File

@ -0,0 +1,288 @@
package packp
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
// Decode reads the next advertised-refs message form its input and
// stores it in the AdvRefs.
func (a *AdvRefs) Decode(r io.Reader) error {
d := newAdvRefsDecoder(r)
return d.Decode(a)
}
type advRefsDecoder struct {
s *pktline.Scanner // a pkt-line scanner from the input stream
line []byte // current pkt-line contents, use parser.nextLine() to make it advance
nLine int // current pkt-line number for debugging, begins at 1
hash plumbing.Hash // last hash read
err error // sticky error, use the parser.error() method to fill this out
data *AdvRefs // parsed data is stored here
}
var (
// ErrEmptyAdvRefs is returned by Decode if it gets an empty advertised
// references message.
ErrEmptyAdvRefs = errors.New("empty advertised-ref message")
// ErrEmptyInput is returned by Decode if the input is empty.
ErrEmptyInput = errors.New("empty input")
)
func newAdvRefsDecoder(r io.Reader) *advRefsDecoder {
return &advRefsDecoder{
s: pktline.NewScanner(r),
}
}
func (d *advRefsDecoder) Decode(v *AdvRefs) error {
d.data = v
for state := decodePrefix; state != nil; {
state = state(d)
}
return d.err
}
type decoderStateFn func(*advRefsDecoder) decoderStateFn
// fills out the parser stiky error
func (d *advRefsDecoder) error(format string, a ...interface{}) {
msg := fmt.Sprintf(
"pkt-line %d: %s", d.nLine,
fmt.Sprintf(format, a...),
)
d.err = NewErrUnexpectedData(msg, d.line)
}
// Reads a new pkt-line from the scanner, makes its payload available as
// p.line and increments p.nLine. A successful invocation returns true,
// otherwise, false is returned and the sticky error is filled out
// accordingly. Trims eols at the end of the payloads.
func (d *advRefsDecoder) nextLine() bool {
d.nLine++
if !d.s.Scan() {
if d.err = d.s.Err(); d.err != nil {
return false
}
if d.nLine == 1 {
d.err = ErrEmptyInput
return false
}
d.error("EOF")
return false
}
d.line = d.s.Bytes()
d.line = bytes.TrimSuffix(d.line, eol)
return true
}
// The HTTP smart prefix is often followed by a flush-pkt.
func decodePrefix(d *advRefsDecoder) decoderStateFn {
if ok := d.nextLine(); !ok {
return nil
}
if !isPrefix(d.line) {
return decodeFirstHash
}
tmp := make([]byte, len(d.line))
copy(tmp, d.line)
d.data.Prefix = append(d.data.Prefix, tmp)
if ok := d.nextLine(); !ok {
return nil
}
if !isFlush(d.line) {
return decodeFirstHash
}
d.data.Prefix = append(d.data.Prefix, pktline.Flush)
if ok := d.nextLine(); !ok {
return nil
}
return decodeFirstHash
}
func isPrefix(payload []byte) bool {
return len(payload) > 0 && payload[0] == '#'
}
// If the first hash is zero, then a no-refs is coming. Otherwise, a
// list-of-refs is coming, and the hash will be followed by the first
// advertised ref.
func decodeFirstHash(p *advRefsDecoder) decoderStateFn {
// If the repository is empty, we receive a flush here (HTTP).
if isFlush(p.line) {
p.err = ErrEmptyAdvRefs
return nil
}
if len(p.line) < hashSize {
p.error("cannot read hash, pkt-line too short")
return nil
}
if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil {
p.error("invalid hash text: %s", err)
return nil
}
p.line = p.line[hashSize:]
if p.hash.IsZero() {
return decodeSkipNoRefs
}
return decodeFirstRef
}
// Skips SP "capabilities^{}" NUL
func decodeSkipNoRefs(p *advRefsDecoder) decoderStateFn {
if len(p.line) < len(noHeadMark) {
p.error("too short zero-id ref")
return nil
}
if !bytes.HasPrefix(p.line, noHeadMark) {
p.error("malformed zero-id ref")
return nil
}
p.line = p.line[len(noHeadMark):]
return decodeCaps
}
// decode the refname, expects SP refname NULL
func decodeFirstRef(l *advRefsDecoder) decoderStateFn {
if len(l.line) < 3 {
l.error("line too short after hash")
return nil
}
if !bytes.HasPrefix(l.line, sp) {
l.error("no space after hash")
return nil
}
l.line = l.line[1:]
chunks := bytes.SplitN(l.line, null, 2)
if len(chunks) < 2 {
l.error("NULL not found")
return nil
}
ref := chunks[0]
l.line = chunks[1]
if bytes.Equal(ref, []byte(head)) {
l.data.Head = &l.hash
} else {
l.data.References[string(ref)] = l.hash
}
return decodeCaps
}
func decodeCaps(p *advRefsDecoder) decoderStateFn {
if err := p.data.Capabilities.Decode(p.line); err != nil {
p.error("invalid capabilities: %s", err)
return nil
}
return decodeOtherRefs
}
// The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}).
// If there are no refs, then there might be a shallow or flush-ptk.
func decodeOtherRefs(p *advRefsDecoder) decoderStateFn {
if ok := p.nextLine(); !ok {
return nil
}
if bytes.HasPrefix(p.line, shallow) {
return decodeShallow
}
if len(p.line) == 0 {
return nil
}
saveTo := p.data.References
if bytes.HasSuffix(p.line, peeled) {
p.line = bytes.TrimSuffix(p.line, peeled)
saveTo = p.data.Peeled
}
ref, hash, err := readRef(p.line)
if err != nil {
p.error("%s", err)
return nil
}
saveTo[ref] = hash
return decodeOtherRefs
}
// Reads a ref-name
func readRef(data []byte) (string, plumbing.Hash, error) {
chunks := bytes.Split(data, sp)
switch {
case len(chunks) == 1:
return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found")
case len(chunks) > 2:
return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found")
default:
return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil
}
}
// Keeps reading shallows until a flush-pkt is found
func decodeShallow(p *advRefsDecoder) decoderStateFn {
if !bytes.HasPrefix(p.line, shallow) {
p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)])
return nil
}
p.line = bytes.TrimPrefix(p.line, shallow)
if len(p.line) != hashSize {
p.error(fmt.Sprintf(
"malformed shallow hash: wrong length, expected 40 bytes, read %d bytes",
len(p.line)))
return nil
}
text := p.line[:hashSize]
var h plumbing.Hash
if _, err := hex.Decode(h[:], text); err != nil {
p.error("invalid hash text: %s", err)
return nil
}
p.data.Shallows = append(p.data.Shallows, h)
if ok := p.nextLine(); !ok {
return nil
}
if len(p.line) == 0 {
return nil // succesfull parse of the advertised-refs message
}
return decodeShallow
}

View File

@ -0,0 +1,176 @@
package packp
import (
"bytes"
"fmt"
"io"
"sort"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
)
// Encode writes the AdvRefs encoding to a writer.
//
// All the payloads will end with a newline character. Capabilities,
// references and shallows are written in alphabetical order, except for
// peeled references that always follow their corresponding references.
func (a *AdvRefs) Encode(w io.Writer) error {
e := newAdvRefsEncoder(w)
return e.Encode(a)
}
type advRefsEncoder struct {
data *AdvRefs // data to encode
pe *pktline.Encoder // where to write the encoded data
firstRefName string // reference name to encode in the first pkt-line (HEAD if present)
firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present)
sortedRefs []string // hash references to encode ordered by increasing order
err error // sticky error
}
func newAdvRefsEncoder(w io.Writer) *advRefsEncoder {
return &advRefsEncoder{
pe: pktline.NewEncoder(w),
}
}
func (e *advRefsEncoder) Encode(v *AdvRefs) error {
e.data = v
e.sortRefs()
e.setFirstRef()
for state := encodePrefix; state != nil; {
state = state(e)
}
return e.err
}
func (e *advRefsEncoder) sortRefs() {
if len(e.data.References) > 0 {
refs := make([]string, 0, len(e.data.References))
for refName := range e.data.References {
refs = append(refs, refName)
}
sort.Strings(refs)
e.sortedRefs = refs
}
}
func (e *advRefsEncoder) setFirstRef() {
if e.data.Head != nil {
e.firstRefName = head
e.firstRefHash = *e.data.Head
return
}
if len(e.sortedRefs) > 0 {
refName := e.sortedRefs[0]
e.firstRefName = refName
e.firstRefHash = e.data.References[refName]
}
}
type encoderStateFn func(*advRefsEncoder) encoderStateFn
func encodePrefix(e *advRefsEncoder) encoderStateFn {
for _, p := range e.data.Prefix {
if bytes.Equal(p, pktline.Flush) {
if e.err = e.pe.Flush(); e.err != nil {
return nil
}
continue
}
if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil {
return nil
}
}
return encodeFirstLine
}
// Adds the first pkt-line payload: head hash, head ref and capabilities.
// If HEAD ref is not found, the first reference ordered in increasing order will be used.
// If there aren't HEAD neither refs, the first line will be "PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list)".
// See: https://github.com/git/git/blob/master/Documentation/technical/pack-protocol.txt
// See: https://github.com/git/git/blob/master/Documentation/technical/protocol-common.txt
func encodeFirstLine(e *advRefsEncoder) encoderStateFn {
const formatFirstLine = "%s %s\x00%s\n"
var firstLine string
capabilities := formatCaps(e.data.Capabilities)
if e.firstRefName == "" {
firstLine = fmt.Sprintf(formatFirstLine, plumbing.ZeroHash.String(), "capabilities^{}", capabilities)
} else {
firstLine = fmt.Sprintf(formatFirstLine, e.firstRefHash.String(), e.firstRefName, capabilities)
}
if e.err = e.pe.EncodeString(firstLine); e.err != nil {
return nil
}
return encodeRefs
}
func formatCaps(c *capability.List) string {
if c == nil {
return ""
}
return c.String()
}
// Adds the (sorted) refs: hash SP refname EOL
// and their peeled refs if any.
func encodeRefs(e *advRefsEncoder) encoderStateFn {
for _, r := range e.sortedRefs {
if r == e.firstRefName {
continue
}
hash := e.data.References[r]
if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil {
return nil
}
if hash, ok := e.data.Peeled[r]; ok {
if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil {
return nil
}
}
}
return encodeShallow
}
// Adds the (sorted) shallows: "shallow" SP hash EOL
func encodeShallow(e *advRefsEncoder) encoderStateFn {
sorted := sortShallows(e.data.Shallows)
for _, hash := range sorted {
if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil {
return nil
}
}
return encodeFlush
}
func sortShallows(c []plumbing.Hash) []string {
ret := []string{}
for _, h := range c {
ret = append(ret, h.String())
}
sort.Strings(ret)
return ret
}
func encodeFlush(e *advRefsEncoder) encoderStateFn {
e.err = e.pe.Flush()
return nil
}

View File

@ -0,0 +1,252 @@
// Package capability defines the server and client capabilities.
package capability
// Capability describes a server or client capability.
type Capability string
func (n Capability) String() string {
return string(n)
}
const (
// MultiACK capability allows the server to return "ACK obj-id continue" as
// soon as it finds a commit that it can use as a common base, between the
// client's wants and the client's have set.
//
// By sending this early, the server can potentially head off the client
// from walking any further down that particular branch of the client's
// repository history. The client may still need to walk down other
// branches, sending have lines for those, until the server has a
// complete cut across the DAG, or the client has said "done".
//
// Without multi_ack, a client sends have lines in --date-order until
// the server has found a common base. That means the client will send
// have lines that are already known by the server to be common, because
// they overlap in time with another branch that the server hasn't found
// a common base on yet.
//
// For example suppose the client has commits in caps that the server
// doesn't and the server has commits in lower case that the client
// doesn't, as in the following diagram:
//
// +---- u ---------------------- x
// / +----- y
// / /
// a -- b -- c -- d -- E -- F
// \
// +--- Q -- R -- S
//
// If the client wants x,y and starts out by saying have F,S, the server
// doesn't know what F,S is. Eventually the client says "have d" and
// the server sends "ACK d continue" to let the client know to stop
// walking down that line (so don't send c-b-a), but it's not done yet,
// it needs a base for x. The client keeps going with S-R-Q, until a
// gets reached, at which point the server has a clear base and it all
// ends.
//
// Without multi_ack the client would have sent that c-b-a chain anyway,
// interleaved with S-R-Q.
MultiACK Capability = "multi_ack"
// MultiACKDetailed is an extension of multi_ack that permits client to
// better understand the server's in-memory state.
MultiACKDetailed Capability = "multi_ack_detailed"
// NoDone should only be used with the smart HTTP protocol. If
// multi_ack_detailed and no-done are both present, then the sender is
// free to immediately send a pack following its first "ACK obj-id ready"
// message.
//
// Without no-done in the smart HTTP protocol, the server session would
// end and the client has to make another trip to send "done" before
// the server can send the pack. no-done removes the last round and
// thus slightly reduces latency.
NoDone Capability = "no-done"
// ThinPack is one with deltas which reference base objects not
// contained within the pack (but are known to exist at the receiving
// end). This can reduce the network traffic significantly, but it
// requires the receiving end to know how to "thicken" these packs by
// adding the missing bases to the pack.
//
// The upload-pack server advertises 'thin-pack' when it can generate
// and send a thin pack. A client requests the 'thin-pack' capability
// when it understands how to "thicken" it, notifying the server that
// it can receive such a pack. A client MUST NOT request the
// 'thin-pack' capability if it cannot turn a thin pack into a
// self-contained pack.
//
// Receive-pack, on the other hand, is assumed by default to be able to
// handle thin packs, but can ask the client not to use the feature by
// advertising the 'no-thin' capability. A client MUST NOT send a thin
// pack if the server advertises the 'no-thin' capability.
//
// The reasons for this asymmetry are historical. The receive-pack
// program did not exist until after the invention of thin packs, so
// historically the reference implementation of receive-pack always
// understood thin packs. Adding 'no-thin' later allowed receive-pack
// to disable the feature in a backwards-compatible manner.
ThinPack Capability = "thin-pack"
// Sideband means that server can send, and client understand multiplexed
// progress reports and error info interleaved with the packfile itself.
//
// These two options are mutually exclusive. A modern client always
// favors Sideband64k.
//
// Either mode indicates that the packfile data will be streamed broken
// up into packets of up to either 1000 bytes in the case of 'side_band',
// or 65520 bytes in the case of 'side_band_64k'. Each packet is made up
// of a leading 4-byte pkt-line length of how much data is in the packet,
// followed by a 1-byte stream code, followed by the actual data.
//
// The stream code can be one of:
//
// 1 - pack data
// 2 - progress messages
// 3 - fatal error message just before stream aborts
//
// The "side-band-64k" capability came about as a way for newer clients
// that can handle much larger packets to request packets that are
// actually crammed nearly full, while maintaining backward compatibility
// for the older clients.
//
// Further, with side-band and its up to 1000-byte messages, it's actually
// 999 bytes of payload and 1 byte for the stream code. With side-band-64k,
// same deal, you have up to 65519 bytes of data and 1 byte for the stream
// code.
//
// The client MUST send only maximum of one of "side-band" and "side-
// band-64k". Server MUST diagnose it as an error if client requests
// both.
Sideband Capability = "side-band"
Sideband64k Capability = "side-band-64k"
// OFSDelta server can send, and client understand PACKv2 with delta
// referring to its base by position in pack rather than by an obj-id. That
// is, they can send/read OBJ_OFS_DELTA (aka type 6) in a packfile.
OFSDelta Capability = "ofs-delta"
// Agent the server may optionally send this capability to notify the client
// that the server is running version `X`. The client may optionally return
// its own agent string by responding with an `agent=Y` capability (but it
// MUST NOT do so if the server did not mention the agent capability). The
// `X` and `Y` strings may contain any printable ASCII characters except
// space (i.e., the byte range 32 < x < 127), and are typically of the form
// "package/version" (e.g., "git/1.8.3.1"). The agent strings are purely
// informative for statistics and debugging purposes, and MUST NOT be used
// to programmatically assume the presence or absence of particular features.
Agent Capability = "agent"
// Shallow capability adds "deepen", "shallow" and "unshallow" commands to
// the fetch-pack/upload-pack protocol so clients can request shallow
// clones.
Shallow Capability = "shallow"
// DeepenSince adds "deepen-since" command to fetch-pack/upload-pack
// protocol so the client can request shallow clones that are cut at a
// specific time, instead of depth. Internally it's equivalent of doing
// "rev-list --max-age=<timestamp>" on the server side. "deepen-since"
// cannot be used with "deepen".
DeepenSince Capability = "deepen-since"
// DeepenNot adds "deepen-not" command to fetch-pack/upload-pack
// protocol so the client can request shallow clones that are cut at a
// specific revision, instead of depth. Internally it's equivalent of
// doing "rev-list --not <rev>" on the server side. "deepen-not"
// cannot be used with "deepen", but can be used with "deepen-since".
DeepenNot Capability = "deepen-not"
// DeepenRelative if this capability is requested by the client, the
// semantics of "deepen" command is changed. The "depth" argument is the
// depth from the current shallow boundary, instead of the depth from
// remote refs.
DeepenRelative Capability = "deepen-relative"
// NoProgress the client was started with "git clone -q" or something, and
// doesn't want that side band 2. Basically the client just says "I do not
// wish to receive stream 2 on sideband, so do not send it to me, and if
// you did, I will drop it on the floor anyway". However, the sideband
// channel 3 is still used for error responses.
NoProgress Capability = "no-progress"
// IncludeTag capability is about sending annotated tags if we are
// sending objects they point to. If we pack an object to the client, and
// a tag object points exactly at that object, we pack the tag object too.
// In general this allows a client to get all new annotated tags when it
// fetches a branch, in a single network connection.
//
// Clients MAY always send include-tag, hardcoding it into a request when
// the server advertises this capability. The decision for a client to
// request include-tag only has to do with the client's desires for tag
// data, whether or not a server had advertised objects in the
// refs/tags/* namespace.
//
// Servers MUST pack the tags if their referrant is packed and the client
// has requested include-tags.
//
// Clients MUST be prepared for the case where a server has ignored
// include-tag and has not actually sent tags in the pack. In such
// cases the client SHOULD issue a subsequent fetch to acquire the tags
// that include-tag would have otherwise given the client.
//
// The server SHOULD send include-tag, if it supports it, regardless
// of whether or not there are tags available.
IncludeTag Capability = "include-tag"
// ReportStatus the receive-pack process can receive a 'report-status'
// capability, which tells it that the client wants a report of what
// happened after a packfile upload and reference update. If the pushing
// client requests this capability, after unpacking and updating references
// the server will respond with whether the packfile unpacked successfully
// and if each reference was updated successfully. If any of those were not
// successful, it will send back an error message. See pack-protocol.txt
// for example messages.
ReportStatus Capability = "report-status"
// DeleteRefs If the server sends back this capability, it means that
// it is capable of accepting a zero-id value as the target
// value of a reference update. It is not sent back by the client, it
// simply informs the client that it can be sent zero-id values
// to delete references
DeleteRefs Capability = "delete-refs"
// Quiet If the receive-pack server advertises this capability, it is
// capable of silencing human-readable progress output which otherwise may
// be shown when processing the received pack. A send-pack client should
// respond with the 'quiet' capability to suppress server-side progress
// reporting if the local progress reporting is also being suppressed
// (e.g., via `push -q`, or if stderr does not go to a tty).
Quiet Capability = "quiet"
// Atomic If the server sends this capability it is capable of accepting
// atomic pushes. If the pushing client requests this capability, the server
// will update the refs in one atomic transaction. Either all refs are
// updated or none.
Atomic Capability = "atomic"
// PushOptions If the server sends this capability it is able to accept
// push options after the update commands have been sent, but before the
// packfile is streamed. If the pushing client requests this capability,
// the server will pass the options to the pre- and post- receive hooks
// that process this push request.
PushOptions Capability = "push-options"
// AllowTipSHA1InWant if the upload-pack server advertises this capability,
// fetch-pack may send "want" lines with SHA-1s that exist at the server but
// are not advertised by upload-pack.
AllowTipSHA1InWant Capability = "allow-tip-sha1-in-want"
// AllowReachableSHA1InWant if the upload-pack server advertises this
// capability, fetch-pack may send "want" lines with SHA-1s that exist at
// the server but are not advertised by upload-pack.
AllowReachableSHA1InWant Capability = "allow-reachable-sha1-in-want"
// PushCert the receive-pack server that advertises this capability is
// willing to accept a signed push certificate, and asks the <nonce> to be
// included in the push certificate. A send-pack client MUST NOT
// send a push-cert packet unless the receive-pack server advertises
// this capability.
PushCert Capability = "push-cert"
// SymRef symbolic reference support for better negotiation.
SymRef Capability = "symref"
)
const DefaultAgent = "go-git/4.x"
var known = map[Capability]bool{
MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true,
Sideband: true, Sideband64k: true, OFSDelta: true, Agent: true,
Shallow: true, DeepenSince: true, DeepenNot: true, DeepenRelative: true,
NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true,
Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true,
AllowReachableSHA1InWant: true, PushCert: true, SymRef: true,
}
var requiresArgument = map[Capability]bool{
Agent: true, PushCert: true, SymRef: true,
}
var multipleArgument = map[Capability]bool{
SymRef: true,
}

View File

@ -0,0 +1,196 @@
package capability
import (
"bytes"
"errors"
"fmt"
"strings"
)
var (
// ErrArgumentsRequired is returned if no arguments are giving with a
// capability that requires arguments
ErrArgumentsRequired = errors.New("arguments required")
// ErrArguments is returned if arguments are given with a capabilities that
// not supports arguments
ErrArguments = errors.New("arguments not allowed")
// ErrEmtpyArgument is returned when an empty value is given
ErrEmtpyArgument = errors.New("empty argument")
// ErrMultipleArguments multiple argument given to a capabilities that not
// support it
ErrMultipleArguments = errors.New("multiple arguments not allowed")
)
// List represents a list of capabilities
type List struct {
m map[Capability]*entry
sort []string
}
type entry struct {
Name Capability
Values []string
}
// NewList returns a new List of capabilities
func NewList() *List {
return &List{
m: make(map[Capability]*entry),
}
}
// IsEmpty returns true if the List is empty
func (l *List) IsEmpty() bool {
return len(l.sort) == 0
}
// Decode decodes list of capabilities from raw into the list
func (l *List) Decode(raw []byte) error {
// git 1.x receive pack used to send a leading space on its
// git-receive-pack capabilities announcement. We just trim space to be
// tolerant to space changes in different versions.
raw = bytes.TrimSpace(raw)
if len(raw) == 0 {
return nil
}
for _, data := range bytes.Split(raw, []byte{' '}) {
pair := bytes.SplitN(data, []byte{'='}, 2)
c := Capability(pair[0])
if len(pair) == 1 {
if err := l.Add(c); err != nil {
return err
}
continue
}
if err := l.Add(c, string(pair[1])); err != nil {
return err
}
}
return nil
}
// Get returns the values for a capability
func (l *List) Get(capability Capability) []string {
if _, ok := l.m[capability]; !ok {
return nil
}
return l.m[capability].Values
}
// Set sets a capability removing the previous values
func (l *List) Set(capability Capability, values ...string) error {
if _, ok := l.m[capability]; ok {
delete(l.m, capability)
}
return l.Add(capability, values...)
}
// Add adds a capability, values are optional
func (l *List) Add(c Capability, values ...string) error {
if err := l.validate(c, values); err != nil {
return err
}
if !l.Supports(c) {
l.m[c] = &entry{Name: c}
l.sort = append(l.sort, c.String())
}
if len(values) == 0 {
return nil
}
if known[c] && !multipleArgument[c] && len(l.m[c].Values) > 0 {
return ErrMultipleArguments
}
l.m[c].Values = append(l.m[c].Values, values...)
return nil
}
func (l *List) validateNoEmptyArgs(values []string) error {
for _, v := range values {
if v == "" {
return ErrEmtpyArgument
}
}
return nil
}
func (l *List) validate(c Capability, values []string) error {
if !known[c] {
return l.validateNoEmptyArgs(values)
}
if requiresArgument[c] && len(values) == 0 {
return ErrArgumentsRequired
}
if !requiresArgument[c] && len(values) != 0 {
return ErrArguments
}
if !multipleArgument[c] && len(values) > 1 {
return ErrMultipleArguments
}
return l.validateNoEmptyArgs(values)
}
// Supports returns true if capability is present
func (l *List) Supports(capability Capability) bool {
_, ok := l.m[capability]
return ok
}
// Delete deletes a capability from the List
func (l *List) Delete(capability Capability) {
if !l.Supports(capability) {
return
}
delete(l.m, capability)
for i, c := range l.sort {
if c != string(capability) {
continue
}
l.sort = append(l.sort[:i], l.sort[i+1:]...)
return
}
}
// All returns a slice with all defined capabilities.
func (l *List) All() []Capability {
var cs []Capability
for _, key := range l.sort {
cs = append(cs, Capability(key))
}
return cs
}
// String generates the capabilities strings, the capabilities are sorted in
// insertion order
func (l *List) String() string {
var o []string
for _, key := range l.sort {
cap := l.m[Capability(key)]
if len(cap.Values) == 0 {
o = append(o, key)
continue
}
for _, value := range cap.Values {
o = append(o, fmt.Sprintf("%s=%s", key, value))
}
}
return strings.Join(o, " ")
}

View File

@ -0,0 +1,70 @@
package packp
import (
"fmt"
)
type stateFn func() stateFn
const (
// common
hashSize = 40
// advrefs
head = "HEAD"
noHead = "capabilities^{}"
)
var (
// common
sp = []byte(" ")
eol = []byte("\n")
eq = []byte{'='}
// advertised-refs
null = []byte("\x00")
peeled = []byte("^{}")
noHeadMark = []byte(" capabilities^{}\x00")
// upload-request
want = []byte("want ")
shallow = []byte("shallow ")
deepen = []byte("deepen")
deepenCommits = []byte("deepen ")
deepenSince = []byte("deepen-since ")
deepenReference = []byte("deepen-not ")
// shallow-update
unshallow = []byte("unshallow ")
// server-response
ack = []byte("ACK")
nak = []byte("NAK")
// updreq
shallowNoSp = []byte("shallow")
)
func isFlush(payload []byte) bool {
return len(payload) == 0
}
// ErrUnexpectedData represents an unexpected data decoding a message
type ErrUnexpectedData struct {
Msg string
Data []byte
}
// NewErrUnexpectedData returns a new ErrUnexpectedData containing the data and
// the message given
func NewErrUnexpectedData(msg string, data []byte) error {
return &ErrUnexpectedData{Msg: msg, Data: data}
}
func (err *ErrUnexpectedData) Error() string {
if len(err.Data) == 0 {
return err.Msg
}
return fmt.Sprintf("%s (%s)", err.Msg, err.Data)
}

View File

@ -0,0 +1,724 @@
package packp
/*
A nice way to trace the real data transmitted and received by git, use:
GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git
GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git
Here follows a copy of the current protocol specification at the time of
this writing.
(Please notice that most http git servers will add a flush-pkt after the
first pkt-line when using HTTP smart.)
Documentation Common to Pack and Http Protocols
===============================================
ABNF Notation
-------------
ABNF notation as described by RFC 5234 is used within the protocol documents,
except the following replacement core rules are used:
----
HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f"
----
We also define the following common rules:
----
NUL = %x00
zero-id = 40*"0"
obj-id = 40*(HEXDIGIT)
refname = "HEAD"
refname /= "refs/" <see discussion below>
----
A refname is a hierarchical octet string beginning with "refs/" and
not violating the 'git-check-ref-format' command's validation rules.
More specifically, they:
. They can include slash `/` for hierarchical (directory)
grouping, but no slash-separated component can begin with a
dot `.`.
. They must contain at least one `/`. This enforces the presence of a
category like `heads/`, `tags/` etc. but the actual names are not
restricted.
. They cannot have two consecutive dots `..` anywhere.
. They cannot have ASCII control characters (i.e. bytes whose
values are lower than \040, or \177 `DEL`), space, tilde `~`,
caret `^`, colon `:`, question-mark `?`, asterisk `*`,
or open bracket `[` anywhere.
. They cannot end with a slash `/` or a dot `.`.
. They cannot end with the sequence `.lock`.
. They cannot contain a sequence `@{`.
. They cannot contain a `\\`.
pkt-line Format
---------------
Much (but not all) of the payload is described around pkt-lines.
A pkt-line is a variable length binary string. The first four bytes
of the line, the pkt-len, indicates the total length of the line,
in hexadecimal. The pkt-len includes the 4 bytes used to contain
the length's hexadecimal representation.
A pkt-line MAY contain binary data, so implementors MUST ensure
pkt-line parsing/formatting routines are 8-bit clean.
A non-binary line SHOULD BE terminated by an LF, which if present
MUST be included in the total length. Receivers MUST treat pkt-lines
with non-binary data the same whether or not they contain the trailing
LF (stripping the LF if present, and not complaining when it is
missing).
The maximum length of a pkt-line's data component is 65516 bytes.
Implementations MUST NOT send pkt-line whose length exceeds 65520
(65516 bytes of payload + 4 bytes of length data).
Implementations SHOULD NOT send an empty pkt-line ("0004").
A pkt-line with a length field of 0 ("0000"), called a flush-pkt,
is a special case and MUST be handled differently than an empty
pkt-line ("0004").
----
pkt-line = data-pkt / flush-pkt
data-pkt = pkt-len pkt-payload
pkt-len = 4*(HEXDIG)
pkt-payload = (pkt-len - 4)*(OCTET)
flush-pkt = "0000"
----
Examples (as C-style strings):
----
pkt-line actual value
---------------------------------
"0006a\n" "a\n"
"0005a" "a"
"000bfoobar\n" "foobar\n"
"0004" ""
----
Packfile transfer protocols
===========================
Git supports transferring data in packfiles over the ssh://, git://, http:// and
file:// transports. There exist two sets of protocols, one for pushing
data from a client to a server and another for fetching data from a
server to a client. The three transports (ssh, git, file) use the same
protocol to transfer data. http is documented in http-protocol.txt.
The processes invoked in the canonical Git implementation are 'upload-pack'
on the server side and 'fetch-pack' on the client side for fetching data;
then 'receive-pack' on the server and 'send-pack' on the client for pushing
data. The protocol functions to have a server tell a client what is
currently on the server, then for the two to negotiate the smallest amount
of data to send in order to fully update one or the other.
pkt-line Format
---------------
The descriptions below build on the pkt-line format described in
protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless
otherwise noted the usual pkt-line LF rules apply: the sender SHOULD
include a LF, but the receiver MUST NOT complain if it is not present.
Transports
----------
There are three transports over which the packfile protocol is
initiated. The Git transport is a simple, unauthenticated server that
takes the command (almost always 'upload-pack', though Git
servers can be configured to be globally writable, in which 'receive-
pack' initiation is also allowed) with which the client wishes to
communicate and executes it and connects it to the requesting
process.
In the SSH transport, the client just runs the 'upload-pack'
or 'receive-pack' process on the server over the SSH protocol and then
communicates with that invoked process over the SSH connection.
The file:// transport runs the 'upload-pack' or 'receive-pack'
process locally and communicates with it over a pipe.
Git Transport
-------------
The Git transport starts off by sending the command and repository
on the wire using the pkt-line format, followed by a NUL byte and a
hostname parameter, terminated by a NUL byte.
0032git-upload-pack /project.git\0host=myserver.com\0
--
git-proto-request = request-command SP pathname NUL [ host-parameter NUL ]
request-command = "git-upload-pack" / "git-receive-pack" /
"git-upload-archive" ; case sensitive
pathname = *( %x01-ff ) ; exclude NUL
host-parameter = "host=" hostname [ ":" port ]
--
Only host-parameter is allowed in the git-proto-request. Clients
MUST NOT attempt to send additional parameters. It is used for the
git-daemon name based virtual hosting. See --interpolated-path
option to git daemon, with the %H/%CH format characters.
Basically what the Git client is doing to connect to an 'upload-pack'
process on the server side over the Git protocol is this:
$ echo -e -n \
"0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" |
nc -v example.com 9418
If the server refuses the request for some reasons, it could abort
gracefully with an error message.
----
error-line = PKT-LINE("ERR" SP explanation-text)
----
SSH Transport
-------------
Initiating the upload-pack or receive-pack processes over SSH is
executing the binary on the server via SSH remote execution.
It is basically equivalent to running this:
$ ssh git.example.com "git-upload-pack '/project.git'"
For a server to support Git pushing and pulling for a given user over
SSH, that user needs to be able to execute one or both of those
commands via the SSH shell that they are provided on login. On some
systems, that shell access is limited to only being able to run those
two commands, or even just one of them.
In an ssh:// format URI, it's absolute in the URI, so the '/' after
the host name (or port number) is sent as an argument, which is then
read by the remote git-upload-pack exactly as is, so it's effectively
an absolute path in the remote filesystem.
git clone ssh://user@example.com/project.git
|
v
ssh user@example.com "git-upload-pack '/project.git'"
In a "user@host:path" format URI, its relative to the user's home
directory, because the Git client will run:
git clone user@example.com:project.git
|
v
ssh user@example.com "git-upload-pack 'project.git'"
The exception is if a '~' is used, in which case
we execute it without the leading '/'.
ssh://user@example.com/~alice/project.git,
|
v
ssh user@example.com "git-upload-pack '~alice/project.git'"
A few things to remember here:
- The "command name" is spelled with dash (e.g. git-upload-pack), but
this can be overridden by the client;
- The repository path is always quoted with single quotes.
Fetching Data From a Server
---------------------------
When one Git repository wants to get data that a second repository
has, the first can 'fetch' from the second. This operation determines
what data the server has that the client does not then streams that
data down to the client in packfile format.
Reference Discovery
-------------------
When the client initially connects the server will immediately respond
with a listing of each reference it has (all branches and tags) along
with the object name that each reference currently points to.
$ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" |
nc -v example.com 9418
00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack
side-band side-band-64k ofs-delta shallow no-progress include-tag
00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration
003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master
003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9
003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0
003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{}
0000
The returned response is a pkt-line stream describing each ref and
its current value. The stream MUST be sorted by name according to
the C locale ordering.
If HEAD is a valid ref, HEAD MUST appear as the first advertised
ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the
advertisement list at all, but other refs may still appear.
The stream MUST include capability declarations behind a NUL on the
first ref. The peeled value of a ref (that is "ref^{}") MUST be
immediately after the ref itself, if presented. A conforming server
MUST peel the ref if it's an annotated tag.
----
advertised-refs = (no-refs / list-of-refs)
*shallow
flush-pkt
no-refs = PKT-LINE(zero-id SP "capabilities^{}"
NUL capability-list)
list-of-refs = first-ref *other-ref
first-ref = PKT-LINE(obj-id SP refname
NUL capability-list)
other-ref = PKT-LINE(other-tip / other-peeled)
other-tip = obj-id SP refname
other-peeled = obj-id SP refname "^{}"
shallow = PKT-LINE("shallow" SP obj-id)
capability-list = capability *(SP capability)
capability = 1*(LC_ALPHA / DIGIT / "-" / "_")
LC_ALPHA = %x61-7A
----
Server and client MUST use lowercase for obj-id, both MUST treat obj-id
as case-insensitive.
See protocol-capabilities.txt for a list of allowed server capabilities
and descriptions.
Packfile Negotiation
--------------------
After reference and capabilities discovery, the client can decide to
terminate the connection by sending a flush-pkt, telling the server it can
now gracefully terminate, and disconnect, when it does not need any pack
data. This can happen with the ls-remote command, and also can happen when
the client already is up-to-date.
Otherwise, it enters the negotiation phase, where the client and
server determine what the minimal packfile necessary for transport is,
by telling the server what objects it wants, its shallow objects
(if any), and the maximum commit depth it wants (if any). The client
will also send a list of the capabilities it wants to be in effect,
out of what the server said it could do with the first 'want' line.
----
upload-request = want-list
*shallow-line
*1depth-request
flush-pkt
want-list = first-want
*additional-want
shallow-line = PKT-LINE("shallow" SP obj-id)
depth-request = PKT-LINE("deepen" SP depth) /
PKT-LINE("deepen-since" SP timestamp) /
PKT-LINE("deepen-not" SP ref)
first-want = PKT-LINE("want" SP obj-id SP capability-list)
additional-want = PKT-LINE("want" SP obj-id)
depth = 1*DIGIT
----
Clients MUST send all the obj-ids it wants from the reference
discovery phase as 'want' lines. Clients MUST send at least one
'want' command in the request body. Clients MUST NOT mention an
obj-id in a 'want' command which did not appear in the response
obtained through ref discovery.
The client MUST write all obj-ids which it only has shallow copies
of (meaning that it does not have the parents of a commit) as
'shallow' lines so that the server is aware of the limitations of
the client's history.
The client now sends the maximum commit history depth it wants for
this transaction, which is the number of commits it wants from the
tip of the history, if any, as a 'deepen' line. A depth of 0 is the
same as not making a depth request. The client does not want to receive
any commits beyond this depth, nor does it want objects needed only to
complete those commits. Commits whose parents are not received as a
result are defined as shallow and marked as such in the server. This
information is sent back to the client in the next step.
Once all the 'want's and 'shallow's (and optional 'deepen') are
transferred, clients MUST send a flush-pkt, to tell the server side
that it is done sending the list.
Otherwise, if the client sent a positive depth request, the server
will determine which commits will and will not be shallow and
send this information to the client. If the client did not request
a positive depth, this step is skipped.
----
shallow-update = *shallow-line
*unshallow-line
flush-pkt
shallow-line = PKT-LINE("shallow" SP obj-id)
unshallow-line = PKT-LINE("unshallow" SP obj-id)
----
If the client has requested a positive depth, the server will compute
the set of commits which are no deeper than the desired depth. The set
of commits start at the client's wants.
The server writes 'shallow' lines for each
commit whose parents will not be sent as a result. The server writes
an 'unshallow' line for each commit which the client has indicated is
shallow, but is no longer shallow at the currently requested depth
(that is, its parents will now be sent). The server MUST NOT mark
as unshallow anything which the client has not indicated was shallow.
Now the client will send a list of the obj-ids it has using 'have'
lines, so the server can make a packfile that only contains the objects
that the client needs. In multi_ack mode, the canonical implementation
will send up to 32 of these at a time, then will send a flush-pkt. The
canonical implementation will skip ahead and send the next 32 immediately,
so that there is always a block of 32 "in-flight on the wire" at a time.
----
upload-haves = have-list
compute-end
have-list = *have-line
have-line = PKT-LINE("have" SP obj-id)
compute-end = flush-pkt / PKT-LINE("done")
----
If the server reads 'have' lines, it then will respond by ACKing any
of the obj-ids the client said it had that the server also has. The
server will ACK obj-ids differently depending on which ack mode is
chosen by the client.
In multi_ack mode:
* the server will respond with 'ACK obj-id continue' for any common
commits.
* once the server has found an acceptable common base commit and is
ready to make a packfile, it will blindly ACK all 'have' obj-ids
back to the client.
* the server will then send a 'NAK' and then wait for another response
from the client - either a 'done' or another list of 'have' lines.
In multi_ack_detailed mode:
* the server will differentiate the ACKs where it is signaling
that it is ready to send data with 'ACK obj-id ready' lines, and
signals the identified common commits with 'ACK obj-id common' lines.
Without either multi_ack or multi_ack_detailed:
* upload-pack sends "ACK obj-id" on the first common object it finds.
After that it says nothing until the client gives it a "done".
* upload-pack sends "NAK" on a flush-pkt if no common object
has been found yet. If one has been found, and thus an ACK
was already sent, it's silent on the flush-pkt.
After the client has gotten enough ACK responses that it can determine
that the server has enough information to send an efficient packfile
(in the canonical implementation, this is determined when it has received
enough ACKs that it can color everything left in the --date-order queue
as common with the server, or the --date-order queue is empty), or the
client determines that it wants to give up (in the canonical implementation,
this is determined when the client sends 256 'have' lines without getting
any of them ACKed by the server - meaning there is nothing in common and
the server should just send all of its objects), then the client will send
a 'done' command. The 'done' command signals to the server that the client
is ready to receive its packfile data.
However, the 256 limit *only* turns on in the canonical client
implementation if we have received at least one "ACK %s continue"
during a prior round. This helps to ensure that at least one common
ancestor is found before we give up entirely.
Once the 'done' line is read from the client, the server will either
send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object
name of the last commit determined to be common. The server only sends
ACK after 'done' if there is at least one common base and multi_ack or
multi_ack_detailed is enabled. The server always sends NAK after 'done'
if there is no common base found.
Then the server will start sending its packfile data.
----
server-response = *ack_multi ack / nak
ack_multi = PKT-LINE("ACK" SP obj-id ack_status)
ack_status = "continue" / "common" / "ready"
ack = PKT-LINE("ACK" SP obj-id)
nak = PKT-LINE("NAK")
----
A simple clone may look like this (with no 'have' lines):
----
C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \
side-band-64k ofs-delta\n
C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n
C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n
C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n
C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n
C: 0000
C: 0009done\n
S: 0008NAK\n
S: [PACKFILE]
----
An incremental update (fetch) response might look like this:
----
C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \
side-band-64k ofs-delta\n
C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n
C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n
C: 0000
C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n
C: [30 more have lines]
C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n
C: 0000
S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n
S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n
S: 0008NAK\n
C: 0009done\n
S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n
S: [PACKFILE]
----
Packfile Data
-------------
Now that the client and server have finished negotiation about what
the minimal amount of data that needs to be sent to the client is, the server
will construct and send the required data in packfile format.
See pack-format.txt for what the packfile itself actually looks like.
If 'side-band' or 'side-band-64k' capabilities have been specified by
the client, the server will send the packfile data multiplexed.
Each packet starting with the packet-line length of the amount of data
that follows, followed by a single byte specifying the sideband the
following data is coming in on.
In 'side-band' mode, it will send up to 999 data bytes plus 1 control
code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k'
mode it will send up to 65519 data bytes plus 1 control code, for a
total of up to 65520 bytes in a pkt-line.
The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain
packfile data, sideband '2' will be used for progress information that the
client will generally print to stderr and sideband '3' is used for error
information.
If no 'side-band' capability was specified, the server will stream the
entire packfile without multiplexing.
Pushing Data To a Server
------------------------
Pushing data to a server will invoke the 'receive-pack' process on the
server, which will allow the client to tell it which references it should
update and then send all the data the server will need for those new
references to be complete. Once all the data is received and validated,
the server will then update its references to what the client specified.
Authentication
--------------
The protocol itself contains no authentication mechanisms. That is to be
handled by the transport, such as SSH, before the 'receive-pack' process is
invoked. If 'receive-pack' is configured over the Git transport, those
repositories will be writable by anyone who can access that port (9418) as
that transport is unauthenticated.
Reference Discovery
-------------------
The reference discovery phase is done nearly the same way as it is in the
fetching protocol. Each reference obj-id and name on the server is sent
in packet-line format to the client, followed by a flush-pkt. The only
real difference is that the capability listing is different - the only
possible values are 'report-status', 'delete-refs', 'ofs-delta' and
'push-options'.
Reference Update Request and Packfile Transfer
----------------------------------------------
Once the client knows what references the server is at, it can send a
list of reference update requests. For each reference on the server
that it wants to update, it sends a line listing the obj-id currently on
the server, the obj-id the client would like to update it to and the name
of the reference.
This list is followed by a flush-pkt. Then the push options are transmitted
one per packet followed by another flush-pkt. After that the packfile that
should contain all the objects that the server will need to complete the new
references will be sent.
----
update-request = *shallow ( command-list | push-cert ) [packfile]
shallow = PKT-LINE("shallow" SP obj-id)
command-list = PKT-LINE(command NUL capability-list)
*PKT-LINE(command)
flush-pkt
command = create / delete / update
create = zero-id SP new-id SP name
delete = old-id SP zero-id SP name
update = old-id SP new-id SP name
old-id = obj-id
new-id = obj-id
push-cert = PKT-LINE("push-cert" NUL capability-list LF)
PKT-LINE("certificate version 0.1" LF)
PKT-LINE("pusher" SP ident LF)
PKT-LINE("pushee" SP url LF)
PKT-LINE("nonce" SP nonce LF)
PKT-LINE(LF)
*PKT-LINE(command LF)
*PKT-LINE(gpg-signature-lines LF)
PKT-LINE("push-cert-end" LF)
packfile = "PACK" 28*(OCTET)
----
If the receiving end does not support delete-refs, the sending end MUST
NOT ask for delete command.
If the receiving end does not support push-cert, the sending end
MUST NOT send a push-cert command. When a push-cert command is
sent, command-list MUST NOT be sent; the commands recorded in the
push certificate is used instead.
The packfile MUST NOT be sent if the only command used is 'delete'.
A packfile MUST be sent if either create or update command is used,
even if the server already has all the necessary objects. In this
case the client MUST send an empty packfile. The only time this
is likely to happen is if the client is creating
a new branch or a tag that points to an existing obj-id.
The server will receive the packfile, unpack it, then validate each
reference that is being updated that it hasn't changed while the request
was being processed (the obj-id is still the same as the old-id), and
it will run any update hooks to make sure that the update is acceptable.
If all of that is fine, the server will then update the references.
Push Certificate
----------------
A push certificate begins with a set of header lines. After the
header and an empty line, the protocol commands follow, one per
line. Note that the trailing LF in push-cert PKT-LINEs is _not_
optional; it must be present.
Currently, the following header fields are defined:
`pusher` ident::
Identify the GPG key in "Human Readable Name <email@address>"
format.
`pushee` url::
The repository URL (anonymized, if the URL contains
authentication material) the user who ran `git push`
intended to push into.
`nonce` nonce::
The 'nonce' string the receiving repository asked the
pushing user to include in the certificate, to prevent
replay attacks.
The GPG signature lines are a detached signature for the contents
recorded in the push certificate before the signature block begins.
The detached signature is used to certify that the commands were
given by the pusher, who must be the signer.
Report Status
-------------
After receiving the pack data from the sender, the receiver sends a
report if 'report-status' capability is in effect.
It is a short listing of what happened in that update. It will first
list the status of the packfile unpacking as either 'unpack ok' or
'unpack [error]'. Then it will list the status for each of the references
that it tried to update. Each line is either 'ok [refname]' if the
update was successful, or 'ng [refname] [error]' if the update was not.
----
report-status = unpack-status
1*(command-status)
flush-pkt
unpack-status = PKT-LINE("unpack" SP unpack-result)
unpack-result = "ok" / error-msg
command-status = command-ok / command-fail
command-ok = PKT-LINE("ok" SP refname)
command-fail = PKT-LINE("ng" SP refname SP error-msg)
error-msg = 1*(OCTECT) ; where not "ok"
----
Updates can be unsuccessful for a number of reasons. The reference can have
changed since the reference discovery phase was originally sent, meaning
someone pushed in the meantime. The reference being pushed could be a
non-fast-forward reference and the update hooks or configuration could be
set to not allow that, etc. Also, some references can be updated while others
can be rejected.
An example client/server communication might look like this:
----
S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n
S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n
S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n
S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n
S: 0000
C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n
C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n
C: 0000
C: [PACKDATA]
S: 000eunpack ok\n
S: 0018ok refs/heads/debug\n
S: 002ang refs/heads/master non-fast-forward\n
----
*/

View File

@ -0,0 +1,165 @@
package packp
import (
"bytes"
"fmt"
"io"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
const (
ok = "ok"
)
// ReportStatus is a report status message, as used in the git-receive-pack
// process whenever the 'report-status' capability is negotiated.
type ReportStatus struct {
UnpackStatus string
CommandStatuses []*CommandStatus
}
// NewReportStatus creates a new ReportStatus message.
func NewReportStatus() *ReportStatus {
return &ReportStatus{}
}
// Error returns the first error if any.
func (s *ReportStatus) Error() error {
if s.UnpackStatus != ok {
return fmt.Errorf("unpack error: %s", s.UnpackStatus)
}
for _, s := range s.CommandStatuses {
if err := s.Error(); err != nil {
return err
}
}
return nil
}
// Encode writes the report status to a writer.
func (s *ReportStatus) Encode(w io.Writer) error {
e := pktline.NewEncoder(w)
if err := e.Encodef("unpack %s\n", s.UnpackStatus); err != nil {
return err
}
for _, cs := range s.CommandStatuses {
if err := cs.encode(w); err != nil {
return err
}
}
return e.Flush()
}
// Decode reads from the given reader and decodes a report-status message. It
// does not read more input than what is needed to fill the report status.
func (s *ReportStatus) Decode(r io.Reader) error {
scan := pktline.NewScanner(r)
if err := s.scanFirstLine(scan); err != nil {
return err
}
if err := s.decodeReportStatus(scan.Bytes()); err != nil {
return err
}
flushed := false
for scan.Scan() {
b := scan.Bytes()
if isFlush(b) {
flushed = true
break
}
if err := s.decodeCommandStatus(b); err != nil {
return err
}
}
if !flushed {
return fmt.Errorf("missing flush")
}
return scan.Err()
}
func (s *ReportStatus) scanFirstLine(scan *pktline.Scanner) error {
if scan.Scan() {
return nil
}
if scan.Err() != nil {
return scan.Err()
}
return io.ErrUnexpectedEOF
}
func (s *ReportStatus) decodeReportStatus(b []byte) error {
if isFlush(b) {
return fmt.Errorf("premature flush")
}
b = bytes.TrimSuffix(b, eol)
line := string(b)
fields := strings.SplitN(line, " ", 2)
if len(fields) != 2 || fields[0] != "unpack" {
return fmt.Errorf("malformed unpack status: %s", line)
}
s.UnpackStatus = fields[1]
return nil
}
func (s *ReportStatus) decodeCommandStatus(b []byte) error {
b = bytes.TrimSuffix(b, eol)
line := string(b)
fields := strings.SplitN(line, " ", 3)
status := ok
if len(fields) == 3 && fields[0] == "ng" {
status = fields[2]
} else if len(fields) != 2 || fields[0] != "ok" {
return fmt.Errorf("malformed command status: %s", line)
}
cs := &CommandStatus{
ReferenceName: plumbing.ReferenceName(fields[1]),
Status: status,
}
s.CommandStatuses = append(s.CommandStatuses, cs)
return nil
}
// CommandStatus is the status of a reference in a report status.
// See ReportStatus struct.
type CommandStatus struct {
ReferenceName plumbing.ReferenceName
Status string
}
// Error returns the error, if any.
func (s *CommandStatus) Error() error {
if s.Status == ok {
return nil
}
return fmt.Errorf("command error on %s: %s",
s.ReferenceName.String(), s.Status)
}
func (s *CommandStatus) encode(w io.Writer) error {
e := pktline.NewEncoder(w)
if s.Error() == nil {
return e.Encodef("ok %s\n", s.ReferenceName.String())
}
return e.Encodef("ng %s %s\n", s.ReferenceName.String(), s.Status)
}

View File

@ -0,0 +1,92 @@
package packp
import (
"bytes"
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
const (
shallowLineLen = 48
unshallowLineLen = 50
)
type ShallowUpdate struct {
Shallows []plumbing.Hash
Unshallows []plumbing.Hash
}
func (r *ShallowUpdate) Decode(reader io.Reader) error {
s := pktline.NewScanner(reader)
for s.Scan() {
line := s.Bytes()
line = bytes.TrimSpace(line)
var err error
switch {
case bytes.HasPrefix(line, shallow):
err = r.decodeShallowLine(line)
case bytes.HasPrefix(line, unshallow):
err = r.decodeUnshallowLine(line)
case bytes.Equal(line, pktline.Flush):
return nil
}
if err != nil {
return err
}
}
return s.Err()
}
func (r *ShallowUpdate) decodeShallowLine(line []byte) error {
hash, err := r.decodeLine(line, shallow, shallowLineLen)
if err != nil {
return err
}
r.Shallows = append(r.Shallows, hash)
return nil
}
func (r *ShallowUpdate) decodeUnshallowLine(line []byte) error {
hash, err := r.decodeLine(line, unshallow, unshallowLineLen)
if err != nil {
return err
}
r.Unshallows = append(r.Unshallows, hash)
return nil
}
func (r *ShallowUpdate) decodeLine(line, prefix []byte, expLen int) (plumbing.Hash, error) {
if len(line) != expLen {
return plumbing.ZeroHash, fmt.Errorf("malformed %s%q", prefix, line)
}
raw := string(line[expLen-40 : expLen])
return plumbing.NewHash(raw), nil
}
func (r *ShallowUpdate) Encode(w io.Writer) error {
e := pktline.NewEncoder(w)
for _, h := range r.Shallows {
if err := e.Encodef("%s%s\n", shallow, h.String()); err != nil {
return err
}
}
for _, h := range r.Unshallows {
if err := e.Encodef("%s%s\n", unshallow, h.String()); err != nil {
return err
}
}
return e.Flush()
}

View File

@ -0,0 +1,33 @@
package sideband
// Type sideband type "side-band" or "side-band-64k"
type Type int8
const (
// Sideband legacy sideband type up to 1000-byte messages
Sideband Type = iota
// Sideband64k sideband type up to 65519-byte messages
Sideband64k Type = iota
// MaxPackedSize for Sideband type
MaxPackedSize = 1000
// MaxPackedSize64k for Sideband64k type
MaxPackedSize64k = 65520
)
// Channel sideband channel
type Channel byte
// WithPayload encode the payload as a message
func (ch Channel) WithPayload(payload []byte) []byte {
return append([]byte{byte(ch)}, payload...)
}
const (
// PackData packfile content
PackData Channel = 1
// ProgressMessage progress messages
ProgressMessage Channel = 2
// ErrorMessage fatal error message just before stream aborts
ErrorMessage Channel = 3
)

View File

@ -0,0 +1,148 @@
package sideband
import (
"errors"
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
// ErrMaxPackedExceeded returned by Read, if the maximum packed size is exceeded
var ErrMaxPackedExceeded = errors.New("max. packed size exceeded")
// Progress where the progress information is stored
type Progress interface {
io.Writer
}
// Demuxer demultiplexes the progress reports and error info interleaved with the
// packfile itself.
//
// A sideband has three different channels the main one, called PackData, contains
// the packfile data; the ErrorMessage channel, that contains server errors; and
// the last one, ProgressMessage channel, containing information about the ongoing
// task happening in the server (optional, can be suppressed sending NoProgress
// or Quiet capabilities to the server)
//
// In order to demultiplex the data stream, method `Read` should be called to
// retrieve the PackData channel, the incoming data from the ProgressMessage is
// written at `Progress` (if any), if any message is retrieved from the
// ErrorMessage channel an error is returned and we can assume that the
// connection has been closed.
type Demuxer struct {
t Type
r io.Reader
s *pktline.Scanner
max int
pending []byte
// Progress is where the progress messages are stored
Progress Progress
}
// NewDemuxer returns a new Demuxer for the given t and read from r
func NewDemuxer(t Type, r io.Reader) *Demuxer {
max := MaxPackedSize64k
if t == Sideband {
max = MaxPackedSize
}
return &Demuxer{
t: t,
r: r,
max: max,
s: pktline.NewScanner(r),
}
}
// Read reads up to len(p) bytes from the PackData channel into p, an error can
// be return if an error happens when reading or if a message is sent in the
// ErrorMessage channel.
//
// When a ProgressMessage is read, is not copy to b, instead of this is written
// to the Progress
func (d *Demuxer) Read(b []byte) (n int, err error) {
var read, req int
req = len(b)
for read < req {
n, err := d.doRead(b[read:req])
read += n
if err != nil {
return read, err
}
}
return read, nil
}
func (d *Demuxer) doRead(b []byte) (int, error) {
read, err := d.nextPackData()
size := len(read)
wanted := len(b)
if size > wanted {
d.pending = read[wanted:]
}
if wanted > size {
wanted = size
}
size = copy(b, read[:wanted])
return size, err
}
func (d *Demuxer) nextPackData() ([]byte, error) {
content := d.getPending()
if len(content) != 0 {
return content, nil
}
if !d.s.Scan() {
if err := d.s.Err(); err != nil {
return nil, err
}
return nil, io.EOF
}
content = d.s.Bytes()
size := len(content)
if size == 0 {
return nil, nil
} else if size > d.max {
return nil, ErrMaxPackedExceeded
}
switch Channel(content[0]) {
case PackData:
return content[1:], nil
case ProgressMessage:
if d.Progress != nil {
_, err := d.Progress.Write(content[1:])
return nil, err
}
case ErrorMessage:
return nil, fmt.Errorf("unexpected error: %s", content[1:])
default:
return nil, fmt.Errorf("unknown channel %s", content)
}
return nil, nil
}
func (d *Demuxer) getPending() (b []byte) {
if len(d.pending) == 0 {
return nil
}
content := d.pending
d.pending = nil
return content
}

View File

@ -0,0 +1,31 @@
// Package sideband implements a sideband mutiplex/demultiplexer
package sideband
// If 'side-band' or 'side-band-64k' capabilities have been specified by
// the client, the server will send the packfile data multiplexed.
//
// Either mode indicates that the packfile data will be streamed broken
// up into packets of up to either 1000 bytes in the case of 'side_band',
// or 65520 bytes in the case of 'side_band_64k'. Each packet is made up
// of a leading 4-byte pkt-line length of how much data is in the packet,
// followed by a 1-byte stream code, followed by the actual data.
//
// The stream code can be one of:
//
// 1 - pack data
// 2 - progress messages
// 3 - fatal error message just before stream aborts
//
// The "side-band-64k" capability came about as a way for newer clients
// that can handle much larger packets to request packets that are
// actually crammed nearly full, while maintaining backward compatibility
// for the older clients.
//
// Further, with side-band and its up to 1000-byte messages, it's actually
// 999 bytes of payload and 1 byte for the stream code. With side-band-64k,
// same deal, you have up to 65519 bytes of data and 1 byte for the stream
// code.
//
// The client MUST send only maximum of one of "side-band" and "side-
// band-64k". Server MUST diagnose it as an error if client requests
// both.

View File

@ -0,0 +1,65 @@
package sideband
import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
// Muxer multiplex the packfile along with the progress messages and the error
// information. The multiplex is perform using pktline format.
type Muxer struct {
max int
e *pktline.Encoder
}
const chLen = 1
// NewMuxer returns a new Muxer for the given t that writes on w.
//
// If t is equal to `Sideband` the max pack size is set to MaxPackedSize, in any
// other value is given, max pack is set to MaxPackedSize64k, that is the
// maximum length of a line in pktline format.
func NewMuxer(t Type, w io.Writer) *Muxer {
max := MaxPackedSize64k
if t == Sideband {
max = MaxPackedSize
}
return &Muxer{
max: max - chLen,
e: pktline.NewEncoder(w),
}
}
// Write writes p in the PackData channel
func (m *Muxer) Write(p []byte) (int, error) {
return m.WriteChannel(PackData, p)
}
// WriteChannel writes p in the given channel. This method can be used with any
// channel, but is recommend use it only for the ProgressMessage and
// ErrorMessage channels and use Write for the PackData channel
func (m *Muxer) WriteChannel(t Channel, p []byte) (int, error) {
wrote := 0
size := len(p)
for wrote < size {
n, err := m.doWrite(t, p[wrote:])
wrote += n
if err != nil {
return wrote, err
}
}
return wrote, nil
}
func (m *Muxer) doWrite(ch Channel, p []byte) (int, error) {
sz := len(p)
if sz > m.max {
sz = m.max
}
return sz, m.e.Encode(ch.WithPayload(p[:sz]))
}

View File

@ -0,0 +1,127 @@
package packp
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
const ackLineLen = 44
// ServerResponse object acknowledgement from upload-pack service
type ServerResponse struct {
ACKs []plumbing.Hash
}
// Decode decodes the response into the struct, isMultiACK should be true, if
// the request was done with multi_ack or multi_ack_detailed capabilities.
func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
// TODO: implement support for multi_ack or multi_ack_detailed responses
if isMultiACK {
return errors.New("multi_ack and multi_ack_detailed are not supported")
}
s := pktline.NewScanner(reader)
for s.Scan() {
line := s.Bytes()
if err := r.decodeLine(line); err != nil {
return err
}
// we need to detect when the end of a response header and the beginning
// of a packfile header happened, some requests to the git daemon
// produces a duplicate ACK header even when multi_ack is not supported.
stop, err := r.stopReading(reader)
if err != nil {
return err
}
if stop {
break
}
}
return s.Err()
}
// stopReading detects when a valid command such as ACK or NAK is found to be
// read in the buffer without moving the read pointer.
func (r *ServerResponse) stopReading(reader *bufio.Reader) (bool, error) {
ahead, err := reader.Peek(7)
if err == io.EOF {
return true, nil
}
if err != nil {
return false, err
}
if len(ahead) > 4 && r.isValidCommand(ahead[0:3]) {
return false, nil
}
if len(ahead) == 7 && r.isValidCommand(ahead[4:]) {
return false, nil
}
return true, nil
}
func (r *ServerResponse) isValidCommand(b []byte) bool {
commands := [][]byte{ack, nak}
for _, c := range commands {
if bytes.Equal(b, c) {
return true
}
}
return false
}
func (r *ServerResponse) decodeLine(line []byte) error {
if len(line) == 0 {
return fmt.Errorf("unexpected flush")
}
if bytes.Equal(line[0:3], ack) {
return r.decodeACKLine(line)
}
if bytes.Equal(line[0:3], nak) {
return nil
}
return fmt.Errorf("unexpected content %q", string(line))
}
func (r *ServerResponse) decodeACKLine(line []byte) error {
if len(line) < ackLineLen {
return fmt.Errorf("malformed ACK %q", line)
}
sp := bytes.Index(line, []byte(" "))
h := plumbing.NewHash(string(line[sp+1 : sp+41]))
r.ACKs = append(r.ACKs, h)
return nil
}
// Encode encodes the ServerResponse into a writer.
func (r *ServerResponse) Encode(w io.Writer) error {
if len(r.ACKs) > 1 {
return errors.New("multi_ack and multi_ack_detailed are not supported")
}
e := pktline.NewEncoder(w)
if len(r.ACKs) == 0 {
return e.Encodef("%s\n", nak)
}
return e.Encodef("%s %s\n", ack, r.ACKs[0].String())
}

View File

@ -0,0 +1,168 @@
package packp
import (
"fmt"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
)
// UploadRequest values represent the information transmitted on a
// upload-request message. Values from this type are not zero-value
// safe, use the New function instead.
// This is a low level type, use UploadPackRequest instead.
type UploadRequest struct {
Capabilities *capability.List
Wants []plumbing.Hash
Shallows []plumbing.Hash
Depth Depth
}
// Depth values stores the desired depth of the requested packfile: see
// DepthCommit, DepthSince and DepthReference.
type Depth interface {
isDepth()
IsZero() bool
}
// DepthCommits values stores the maximum number of requested commits in
// the packfile. Zero means infinite. A negative value will have
// undefined consequences.
type DepthCommits int
func (d DepthCommits) isDepth() {}
func (d DepthCommits) IsZero() bool {
return d == 0
}
// DepthSince values requests only commits newer than the specified time.
type DepthSince time.Time
func (d DepthSince) isDepth() {}
func (d DepthSince) IsZero() bool {
return time.Time(d).IsZero()
}
// DepthReference requests only commits not to found in the specified reference.
type DepthReference string
func (d DepthReference) isDepth() {}
func (d DepthReference) IsZero() bool {
return string(d) == ""
}
// NewUploadRequest returns a pointer to a new UploadRequest value, ready to be
// used. It has no capabilities, wants or shallows and an infinite depth. Please
// note that to encode an upload-request it has to have at least one wanted hash.
func NewUploadRequest() *UploadRequest {
return &UploadRequest{
Capabilities: capability.NewList(),
Wants: []plumbing.Hash{},
Shallows: []plumbing.Hash{},
Depth: DepthCommits(0),
}
}
// NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest
// value, the request capabilities are filled with the most optiomal ones, based
// on the adv value (advertaised capabilities), the UploadRequest generated it
// has no wants or shallows and an infinite depth.
func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest {
r := NewUploadRequest()
if adv.Supports(capability.MultiACKDetailed) {
r.Capabilities.Set(capability.MultiACKDetailed)
} else if adv.Supports(capability.MultiACK) {
r.Capabilities.Set(capability.MultiACK)
}
if adv.Supports(capability.Sideband64k) {
r.Capabilities.Set(capability.Sideband64k)
} else if adv.Supports(capability.Sideband) {
r.Capabilities.Set(capability.Sideband)
}
if adv.Supports(capability.ThinPack) {
r.Capabilities.Set(capability.ThinPack)
}
if adv.Supports(capability.OFSDelta) {
r.Capabilities.Set(capability.OFSDelta)
}
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
}
return r
}
// Validate validates the content of UploadRequest, following the next rules:
// - Wants MUST have at least one reference
// - capability.Shallow MUST be present if Shallows is not empty
// - is a non-zero DepthCommits is given capability.Shallow MUST be present
// - is a DepthSince is given capability.Shallow MUST be present
// - is a DepthReference is given capability.DeepenNot MUST be present
// - MUST contain only maximum of one of capability.Sideband and capability.Sideband64k
// - MUST contain only maximum of one of capability.MultiACK and capability.MultiACKDetailed
func (r *UploadRequest) Validate() error {
if len(r.Wants) == 0 {
return fmt.Errorf("want can't be empty")
}
if err := r.validateRequiredCapabilities(); err != nil {
return err
}
if err := r.validateConflictCapabilities(); err != nil {
return err
}
return nil
}
func (r *UploadRequest) validateRequiredCapabilities() error {
msg := "missing capability %s"
if len(r.Shallows) != 0 && !r.Capabilities.Supports(capability.Shallow) {
return fmt.Errorf(msg, capability.Shallow)
}
switch r.Depth.(type) {
case DepthCommits:
if r.Depth != DepthCommits(0) {
if !r.Capabilities.Supports(capability.Shallow) {
return fmt.Errorf(msg, capability.Shallow)
}
}
case DepthSince:
if !r.Capabilities.Supports(capability.DeepenSince) {
return fmt.Errorf(msg, capability.DeepenSince)
}
case DepthReference:
if !r.Capabilities.Supports(capability.DeepenNot) {
return fmt.Errorf(msg, capability.DeepenNot)
}
}
return nil
}
func (r *UploadRequest) validateConflictCapabilities() error {
msg := "capabilities %s and %s are mutually exclusive"
if r.Capabilities.Supports(capability.Sideband) &&
r.Capabilities.Supports(capability.Sideband64k) {
return fmt.Errorf(msg, capability.Sideband, capability.Sideband64k)
}
if r.Capabilities.Supports(capability.MultiACK) &&
r.Capabilities.Supports(capability.MultiACKDetailed) {
return fmt.Errorf(msg, capability.MultiACK, capability.MultiACKDetailed)
}
return nil
}

View File

@ -0,0 +1,257 @@
package packp
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"strconv"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
// Decode reads the next upload-request form its input and
// stores it in the UploadRequest.
func (u *UploadRequest) Decode(r io.Reader) error {
d := newUlReqDecoder(r)
return d.Decode(u)
}
type ulReqDecoder struct {
s *pktline.Scanner // a pkt-line scanner from the input stream
line []byte // current pkt-line contents, use parser.nextLine() to make it advance
nLine int // current pkt-line number for debugging, begins at 1
err error // sticky error, use the parser.error() method to fill this out
data *UploadRequest // parsed data is stored here
}
func newUlReqDecoder(r io.Reader) *ulReqDecoder {
return &ulReqDecoder{
s: pktline.NewScanner(r),
}
}
func (d *ulReqDecoder) Decode(v *UploadRequest) error {
d.data = v
for state := d.decodeFirstWant; state != nil; {
state = state()
}
return d.err
}
// fills out the parser stiky error
func (d *ulReqDecoder) error(format string, a ...interface{}) {
msg := fmt.Sprintf(
"pkt-line %d: %s", d.nLine,
fmt.Sprintf(format, a...),
)
d.err = NewErrUnexpectedData(msg, d.line)
}
// Reads a new pkt-line from the scanner, makes its payload available as
// p.line and increments p.nLine. A successful invocation returns true,
// otherwise, false is returned and the sticky error is filled out
// accordingly. Trims eols at the end of the payloads.
func (d *ulReqDecoder) nextLine() bool {
d.nLine++
if !d.s.Scan() {
if d.err = d.s.Err(); d.err != nil {
return false
}
d.error("EOF")
return false
}
d.line = d.s.Bytes()
d.line = bytes.TrimSuffix(d.line, eol)
return true
}
// Expected format: want <hash>[ capabilities]
func (d *ulReqDecoder) decodeFirstWant() stateFn {
if ok := d.nextLine(); !ok {
return nil
}
if !bytes.HasPrefix(d.line, want) {
d.error("missing 'want ' prefix")
return nil
}
d.line = bytes.TrimPrefix(d.line, want)
hash, ok := d.readHash()
if !ok {
return nil
}
d.data.Wants = append(d.data.Wants, hash)
return d.decodeCaps
}
func (d *ulReqDecoder) readHash() (plumbing.Hash, bool) {
if len(d.line) < hashSize {
d.err = fmt.Errorf("malformed hash: %v", d.line)
return plumbing.ZeroHash, false
}
var hash plumbing.Hash
if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil {
d.error("invalid hash text: %s", err)
return plumbing.ZeroHash, false
}
d.line = d.line[hashSize:]
return hash, true
}
// Expected format: sp cap1 sp cap2 sp cap3...
func (d *ulReqDecoder) decodeCaps() stateFn {
d.line = bytes.TrimPrefix(d.line, sp)
if err := d.data.Capabilities.Decode(d.line); err != nil {
d.error("invalid capabilities: %s", err)
}
return d.decodeOtherWants
}
// Expected format: want <hash>
func (d *ulReqDecoder) decodeOtherWants() stateFn {
if ok := d.nextLine(); !ok {
return nil
}
if bytes.HasPrefix(d.line, shallow) {
return d.decodeShallow
}
if bytes.HasPrefix(d.line, deepen) {
return d.decodeDeepen
}
if len(d.line) == 0 {
return nil
}
if !bytes.HasPrefix(d.line, want) {
d.error("unexpected payload while expecting a want: %q", d.line)
return nil
}
d.line = bytes.TrimPrefix(d.line, want)
hash, ok := d.readHash()
if !ok {
return nil
}
d.data.Wants = append(d.data.Wants, hash)
return d.decodeOtherWants
}
// Expected format: shallow <hash>
func (d *ulReqDecoder) decodeShallow() stateFn {
if bytes.HasPrefix(d.line, deepen) {
return d.decodeDeepen
}
if len(d.line) == 0 {
return nil
}
if !bytes.HasPrefix(d.line, shallow) {
d.error("unexpected payload while expecting a shallow: %q", d.line)
return nil
}
d.line = bytes.TrimPrefix(d.line, shallow)
hash, ok := d.readHash()
if !ok {
return nil
}
d.data.Shallows = append(d.data.Shallows, hash)
if ok := d.nextLine(); !ok {
return nil
}
return d.decodeShallow
}
// Expected format: deepen <n> / deepen-since <ul> / deepen-not <ref>
func (d *ulReqDecoder) decodeDeepen() stateFn {
if bytes.HasPrefix(d.line, deepenCommits) {
return d.decodeDeepenCommits
}
if bytes.HasPrefix(d.line, deepenSince) {
return d.decodeDeepenSince
}
if bytes.HasPrefix(d.line, deepenReference) {
return d.decodeDeepenReference
}
if len(d.line) == 0 {
return nil
}
d.error("unexpected deepen specification: %q", d.line)
return nil
}
func (d *ulReqDecoder) decodeDeepenCommits() stateFn {
d.line = bytes.TrimPrefix(d.line, deepenCommits)
var n int
if n, d.err = strconv.Atoi(string(d.line)); d.err != nil {
return nil
}
if n < 0 {
d.err = fmt.Errorf("negative depth")
return nil
}
d.data.Depth = DepthCommits(n)
return d.decodeFlush
}
func (d *ulReqDecoder) decodeDeepenSince() stateFn {
d.line = bytes.TrimPrefix(d.line, deepenSince)
var secs int64
secs, d.err = strconv.ParseInt(string(d.line), 10, 64)
if d.err != nil {
return nil
}
t := time.Unix(secs, 0).UTC()
d.data.Depth = DepthSince(t)
return d.decodeFlush
}
func (d *ulReqDecoder) decodeDeepenReference() stateFn {
d.line = bytes.TrimPrefix(d.line, deepenReference)
d.data.Depth = DepthReference(string(d.line))
return d.decodeFlush
}
func (d *ulReqDecoder) decodeFlush() stateFn {
if ok := d.nextLine(); !ok {
return nil
}
if len(d.line) != 0 {
d.err = fmt.Errorf("unexpected payload while expecting a flush-pkt: %q", d.line)
}
return nil
}

View File

@ -0,0 +1,145 @@
package packp
import (
"bytes"
"fmt"
"io"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
// Encode writes the UlReq encoding of u to the stream.
//
// All the payloads will end with a newline character. Wants and
// shallows are sorted alphabetically. A depth of 0 means no depth
// request is sent.
func (u *UploadRequest) Encode(w io.Writer) error {
e := newUlReqEncoder(w)
return e.Encode(u)
}
type ulReqEncoder struct {
pe *pktline.Encoder // where to write the encoded data
data *UploadRequest // the data to encode
err error // sticky error
}
func newUlReqEncoder(w io.Writer) *ulReqEncoder {
return &ulReqEncoder{
pe: pktline.NewEncoder(w),
}
}
func (e *ulReqEncoder) Encode(v *UploadRequest) error {
e.data = v
if len(v.Wants) == 0 {
return fmt.Errorf("empty wants provided")
}
plumbing.HashesSort(e.data.Wants)
for state := e.encodeFirstWant; state != nil; {
state = state()
}
return e.err
}
func (e *ulReqEncoder) encodeFirstWant() stateFn {
var err error
if e.data.Capabilities.IsEmpty() {
err = e.pe.Encodef("want %s\n", e.data.Wants[0])
} else {
err = e.pe.Encodef(
"want %s %s\n",
e.data.Wants[0],
e.data.Capabilities.String(),
)
}
if err != nil {
e.err = fmt.Errorf("encoding first want line: %s", err)
return nil
}
return e.encodeAditionalWants
}
func (e *ulReqEncoder) encodeAditionalWants() stateFn {
last := e.data.Wants[0]
for _, w := range e.data.Wants[1:] {
if bytes.Equal(last[:], w[:]) {
continue
}
if err := e.pe.Encodef("want %s\n", w); err != nil {
e.err = fmt.Errorf("encoding want %q: %s", w, err)
return nil
}
last = w
}
return e.encodeShallows
}
func (e *ulReqEncoder) encodeShallows() stateFn {
plumbing.HashesSort(e.data.Shallows)
var last plumbing.Hash
for _, s := range e.data.Shallows {
if bytes.Equal(last[:], s[:]) {
continue
}
if err := e.pe.Encodef("shallow %s\n", s); err != nil {
e.err = fmt.Errorf("encoding shallow %q: %s", s, err)
return nil
}
last = s
}
return e.encodeDepth
}
func (e *ulReqEncoder) encodeDepth() stateFn {
switch depth := e.data.Depth.(type) {
case DepthCommits:
if depth != 0 {
commits := int(depth)
if err := e.pe.Encodef("deepen %d\n", commits); err != nil {
e.err = fmt.Errorf("encoding depth %d: %s", depth, err)
return nil
}
}
case DepthSince:
when := time.Time(depth).UTC()
if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil {
e.err = fmt.Errorf("encoding depth %s: %s", when, err)
return nil
}
case DepthReference:
reference := string(depth)
if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil {
e.err = fmt.Errorf("encoding depth %s: %s", reference, err)
return nil
}
default:
e.err = fmt.Errorf("unsupported depth type")
return nil
}
return e.encodeFlush
}
func (e *ulReqEncoder) encodeFlush() stateFn {
if err := e.pe.Flush(); err != nil {
e.err = fmt.Errorf("encoding flush-pkt: %s", err)
return nil
}
return nil
}

View File

@ -0,0 +1,122 @@
package packp
import (
"errors"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband"
)
var (
ErrEmptyCommands = errors.New("commands cannot be empty")
ErrMalformedCommand = errors.New("malformed command")
)
// ReferenceUpdateRequest values represent reference upload requests.
// Values from this type are not zero-value safe, use the New function instead.
type ReferenceUpdateRequest struct {
Capabilities *capability.List
Commands []*Command
Shallow *plumbing.Hash
// Packfile contains an optional packfile reader.
Packfile io.ReadCloser
// Progress receives sideband progress messages from the server
Progress sideband.Progress
}
// New returns a pointer to a new ReferenceUpdateRequest value.
func NewReferenceUpdateRequest() *ReferenceUpdateRequest {
return &ReferenceUpdateRequest{
// TODO: Add support for push-cert
Capabilities: capability.NewList(),
Commands: nil,
}
}
// NewReferenceUpdateRequestFromCapabilities returns a pointer to a new
// ReferenceUpdateRequest value, the request capabilities are filled with the
// most optimal ones, based on the adv value (advertised capabilities), the
// ReferenceUpdateRequest contains no commands
//
// It does set the following capabilities:
// - agent
// - report-status
// - ofs-delta
// - ref-delta
// - delete-refs
// It leaves up to the user to add the following capabilities later:
// - atomic
// - ofs-delta
// - side-band
// - side-band-64k
// - quiet
// - push-cert
func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceUpdateRequest {
r := NewReferenceUpdateRequest()
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
}
if adv.Supports(capability.ReportStatus) {
r.Capabilities.Set(capability.ReportStatus)
}
return r
}
func (r *ReferenceUpdateRequest) validate() error {
if len(r.Commands) == 0 {
return ErrEmptyCommands
}
for _, c := range r.Commands {
if err := c.validate(); err != nil {
return err
}
}
return nil
}
type Action string
const (
Create Action = "create"
Update = "update"
Delete = "delete"
Invalid = "invalid"
)
type Command struct {
Name plumbing.ReferenceName
Old plumbing.Hash
New plumbing.Hash
}
func (c *Command) Action() Action {
if c.Old == plumbing.ZeroHash && c.New == plumbing.ZeroHash {
return Invalid
}
if c.Old == plumbing.ZeroHash {
return Create
}
if c.New == plumbing.ZeroHash {
return Delete
}
return Update
}
func (c *Command) validate() error {
if c.Action() == Invalid {
return ErrMalformedCommand
}
return nil
}

View File

@ -0,0 +1,250 @@
package packp
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
var (
shallowLineLength = len(shallow) + hashSize
minCommandLength = hashSize*2 + 2 + 1
minCommandAndCapsLenth = minCommandLength + 1
)
var (
ErrEmpty = errors.New("empty update-request message")
errNoCommands = errors.New("unexpected EOF before any command")
errMissingCapabilitiesDelimiter = errors.New("capabilities delimiter not found")
)
func errMalformedRequest(reason string) error {
return fmt.Errorf("malformed request: %s", reason)
}
func errInvalidHashSize(got int) error {
return fmt.Errorf("invalid hash size: expected %d, got %d",
hashSize, got)
}
func errInvalidHash(err error) error {
return fmt.Errorf("invalid hash: %s", err.Error())
}
func errInvalidShallowLineLength(got int) error {
return errMalformedRequest(fmt.Sprintf(
"invalid shallow line length: expected %d, got %d",
shallowLineLength, got))
}
func errInvalidCommandCapabilitiesLineLength(got int) error {
return errMalformedRequest(fmt.Sprintf(
"invalid command and capabilities line length: expected at least %d, got %d",
minCommandAndCapsLenth, got))
}
func errInvalidCommandLineLength(got int) error {
return errMalformedRequest(fmt.Sprintf(
"invalid command line length: expected at least %d, got %d",
minCommandLength, got))
}
func errInvalidShallowObjId(err error) error {
return errMalformedRequest(
fmt.Sprintf("invalid shallow object id: %s", err.Error()))
}
func errInvalidOldObjId(err error) error {
return errMalformedRequest(
fmt.Sprintf("invalid old object id: %s", err.Error()))
}
func errInvalidNewObjId(err error) error {
return errMalformedRequest(
fmt.Sprintf("invalid new object id: %s", err.Error()))
}
func errMalformedCommand(err error) error {
return errMalformedRequest(fmt.Sprintf(
"malformed command: %s", err.Error()))
}
// Decode reads the next update-request message form the reader and wr
func (req *ReferenceUpdateRequest) Decode(r io.Reader) error {
var rc io.ReadCloser
var ok bool
rc, ok = r.(io.ReadCloser)
if !ok {
rc = ioutil.NopCloser(r)
}
d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)}
return d.Decode(req)
}
type updReqDecoder struct {
r io.ReadCloser
s *pktline.Scanner
req *ReferenceUpdateRequest
}
func (d *updReqDecoder) Decode(req *ReferenceUpdateRequest) error {
d.req = req
funcs := []func() error{
d.scanLine,
d.decodeShallow,
d.decodeCommandAndCapabilities,
d.decodeCommands,
d.setPackfile,
req.validate,
}
for _, f := range funcs {
if err := f(); err != nil {
return err
}
}
return nil
}
func (d *updReqDecoder) scanLine() error {
if ok := d.s.Scan(); !ok {
return d.scanErrorOr(ErrEmpty)
}
return nil
}
func (d *updReqDecoder) decodeShallow() error {
b := d.s.Bytes()
if !bytes.HasPrefix(b, shallowNoSp) {
return nil
}
if len(b) != shallowLineLength {
return errInvalidShallowLineLength(len(b))
}
h, err := parseHash(string(b[len(shallow):]))
if err != nil {
return errInvalidShallowObjId(err)
}
if ok := d.s.Scan(); !ok {
return d.scanErrorOr(errNoCommands)
}
d.req.Shallow = &h
return nil
}
func (d *updReqDecoder) decodeCommands() error {
for {
b := d.s.Bytes()
if bytes.Equal(b, pktline.Flush) {
return nil
}
c, err := parseCommand(b)
if err != nil {
return err
}
d.req.Commands = append(d.req.Commands, c)
if ok := d.s.Scan(); !ok {
return d.s.Err()
}
}
}
func (d *updReqDecoder) decodeCommandAndCapabilities() error {
b := d.s.Bytes()
i := bytes.IndexByte(b, 0)
if i == -1 {
return errMissingCapabilitiesDelimiter
}
if len(b) < minCommandAndCapsLenth {
return errInvalidCommandCapabilitiesLineLength(len(b))
}
cmd, err := parseCommand(b[:i])
if err != nil {
return err
}
d.req.Commands = append(d.req.Commands, cmd)
if err := d.req.Capabilities.Decode(b[i+1:]); err != nil {
return err
}
if err := d.scanLine(); err != nil {
return err
}
return nil
}
func (d *updReqDecoder) setPackfile() error {
d.req.Packfile = d.r
return nil
}
func parseCommand(b []byte) (*Command, error) {
if len(b) < minCommandLength {
return nil, errInvalidCommandLineLength(len(b))
}
var (
os, ns string
n plumbing.ReferenceName
)
if _, err := fmt.Sscanf(string(b), "%s %s %s", &os, &ns, &n); err != nil {
return nil, errMalformedCommand(err)
}
oh, err := parseHash(os)
if err != nil {
return nil, errInvalidOldObjId(err)
}
nh, err := parseHash(ns)
if err != nil {
return nil, errInvalidNewObjId(err)
}
return &Command{Old: oh, New: nh, Name: plumbing.ReferenceName(n)}, nil
}
func parseHash(s string) (plumbing.Hash, error) {
if len(s) != hashSize {
return plumbing.ZeroHash, errInvalidHashSize(len(s))
}
if _, err := hex.DecodeString(s); err != nil {
return plumbing.ZeroHash, errInvalidHash(err)
}
h := plumbing.NewHash(s)
return h, nil
}
func (d *updReqDecoder) scanErrorOr(origErr error) error {
if err := d.s.Err(); err != nil {
return err
}
return origErr
}

View File

@ -0,0 +1,75 @@
package packp
import (
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
)
var (
zeroHashString = plumbing.ZeroHash.String()
)
// Encode writes the ReferenceUpdateRequest encoding to the stream.
func (r *ReferenceUpdateRequest) Encode(w io.Writer) error {
if err := r.validate(); err != nil {
return err
}
e := pktline.NewEncoder(w)
if err := r.encodeShallow(e, r.Shallow); err != nil {
return err
}
if err := r.encodeCommands(e, r.Commands, r.Capabilities); err != nil {
return err
}
if r.Packfile != nil {
if _, err := io.Copy(w, r.Packfile); err != nil {
return err
}
return r.Packfile.Close()
}
return nil
}
func (r *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder,
h *plumbing.Hash) error {
if h == nil {
return nil
}
objId := []byte(h.String())
return e.Encodef("%s%s", shallow, objId)
}
func (r *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder,
cmds []*Command, cap *capability.List) error {
if err := e.Encodef("%s\x00%s",
formatCommand(cmds[0]), cap.String()); err != nil {
return err
}
for _, cmd := range cmds[1:] {
if err := e.Encodef(formatCommand(cmd)); err != nil {
return err
}
}
return e.Flush()
}
func formatCommand(cmd *Command) string {
o := cmd.Old.String()
n := cmd.New.String()
return fmt.Sprintf("%s %s %s", o, n, cmd.Name)
}

View File

@ -0,0 +1,98 @@
package packp
import (
"bytes"
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
)
// UploadPackRequest represents a upload-pack request.
// Zero-value is not safe, use NewUploadPackRequest instead.
type UploadPackRequest struct {
UploadRequest
UploadHaves
}
// NewUploadPackRequest creates a new UploadPackRequest and returns a pointer.
func NewUploadPackRequest() *UploadPackRequest {
ur := NewUploadRequest()
return &UploadPackRequest{
UploadHaves: UploadHaves{},
UploadRequest: *ur,
}
}
// NewUploadPackRequestFromCapabilities creates a new UploadPackRequest and
// returns a pointer. The request capabilities are filled with the most optiomal
// ones, based on the adv value (advertaised capabilities), the UploadPackRequest
// it has no wants, haves or shallows and an infinite depth
func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackRequest {
ur := NewUploadRequestFromCapabilities(adv)
return &UploadPackRequest{
UploadHaves: UploadHaves{},
UploadRequest: *ur,
}
}
// IsEmpty a request if empty if Haves are contained in the Wants, or if Wants
// length is zero
func (r *UploadPackRequest) IsEmpty() bool {
return isSubset(r.Wants, r.Haves)
}
func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool {
for _, h := range needle {
found := false
for _, oh := range haystack {
if h == oh {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// UploadHaves is a message to signal the references that a client has in a
// upload-pack. Do not use this directly. Use UploadPackRequest request instead.
type UploadHaves struct {
Haves []plumbing.Hash
}
// Encode encodes the UploadHaves into the Writer. If flush is true, a flush
// command will be encoded at the end of the writer content.
func (u *UploadHaves) Encode(w io.Writer, flush bool) error {
e := pktline.NewEncoder(w)
plumbing.HashesSort(u.Haves)
var last plumbing.Hash
for _, have := range u.Haves {
if bytes.Equal(last[:], have[:]) {
continue
}
if err := e.Encodef("have %s\n", have); err != nil {
return fmt.Errorf("sending haves for %q: %s", have, err)
}
last = have
}
if flush && len(u.Haves) != 0 {
if err := e.Flush(); err != nil {
return fmt.Errorf("sending flush-pkt after haves: %s", err)
}
}
return nil
}

View File

@ -0,0 +1,109 @@
package packp
import (
"errors"
"io"
"bufio"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
// ErrUploadPackResponseNotDecoded is returned if Read is called without
// decoding first
var ErrUploadPackResponseNotDecoded = errors.New("upload-pack-response should be decoded")
// UploadPackResponse contains all the information responded by the upload-pack
// service, the response implements io.ReadCloser that allows to read the
// packfile directly from it.
type UploadPackResponse struct {
ShallowUpdate
ServerResponse
r io.ReadCloser
isShallow bool
isMultiACK bool
isOk bool
}
// NewUploadPackResponse create a new UploadPackResponse instance, the request
// being responded by the response is required.
func NewUploadPackResponse(req *UploadPackRequest) *UploadPackResponse {
isShallow := !req.Depth.IsZero()
isMultiACK := req.Capabilities.Supports(capability.MultiACK) ||
req.Capabilities.Supports(capability.MultiACKDetailed)
return &UploadPackResponse{
isShallow: isShallow,
isMultiACK: isMultiACK,
}
}
// NewUploadPackResponseWithPackfile creates a new UploadPackResponse instance,
// and sets its packfile reader.
func NewUploadPackResponseWithPackfile(req *UploadPackRequest,
pf io.ReadCloser) *UploadPackResponse {
r := NewUploadPackResponse(req)
r.r = pf
return r
}
// Decode decodes all the responses sent by upload-pack service into the struct
// and prepares it to read the packfile using the Read method
func (r *UploadPackResponse) Decode(reader io.ReadCloser) error {
buf := bufio.NewReader(reader)
if r.isShallow {
if err := r.ShallowUpdate.Decode(buf); err != nil {
return err
}
}
if err := r.ServerResponse.Decode(buf, r.isMultiACK); err != nil {
return err
}
// now the reader is ready to read the packfile content
r.r = ioutil.NewReadCloser(buf, reader)
return nil
}
// Encode encodes an UploadPackResponse.
func (r *UploadPackResponse) Encode(w io.Writer) (err error) {
if r.isShallow {
if err := r.ShallowUpdate.Encode(w); err != nil {
return err
}
}
if err := r.ServerResponse.Encode(w); err != nil {
return err
}
defer ioutil.CheckClose(r.r, &err)
_, err = io.Copy(w, r.r)
return err
}
// Read reads the packfile data, if the request was done with any Sideband
// capability the content read should be demultiplexed. If the methods wasn't
// called before the ErrUploadPackResponseNotDecoded will be return
func (r *UploadPackResponse) Read(p []byte) (int, error) {
if r.r == nil {
return 0, ErrUploadPackResponseNotDecoded
}
return r.r.Read(p)
}
// Close the underlying reader, if any
func (r *UploadPackResponse) Close() error {
if r.r == nil {
return nil
}
return r.r.Close()
}

209
vendor/gopkg.in/src-d/go-git.v4/plumbing/reference.go generated vendored Normal file
View File

@ -0,0 +1,209 @@
package plumbing
import (
"errors"
"fmt"
"strings"
)
const (
refPrefix = "refs/"
refHeadPrefix = refPrefix + "heads/"
refTagPrefix = refPrefix + "tags/"
refRemotePrefix = refPrefix + "remotes/"
refNotePrefix = refPrefix + "notes/"
symrefPrefix = "ref: "
)
// RefRevParseRules are a set of rules to parse references into short names.
// These are the same rules as used by git in shorten_unambiguous_ref.
// See: https://github.com/git/git/blob/e0aaa1b6532cfce93d87af9bc813fb2e7a7ce9d7/refs.c#L417
var RefRevParseRules = []string{
"refs/%s",
"refs/tags/%s",
"refs/heads/%s",
"refs/remotes/%s",
"refs/remotes/%s/HEAD",
}
var (
ErrReferenceNotFound = errors.New("reference not found")
)
// ReferenceType reference type's
type ReferenceType int8
const (
InvalidReference ReferenceType = 0
HashReference ReferenceType = 1
SymbolicReference ReferenceType = 2
)
func (r ReferenceType) String() string {
switch r {
case InvalidReference:
return "invalid-reference"
case HashReference:
return "hash-reference"
case SymbolicReference:
return "symbolic-reference"
}
return ""
}
// ReferenceName reference name's
type ReferenceName string
// NewBranchReferenceName returns a reference name describing a branch based on
// his short name.
func NewBranchReferenceName(name string) ReferenceName {
return ReferenceName(refHeadPrefix + name)
}
// NewNoteReferenceName returns a reference name describing a note based on his
// short name.
func NewNoteReferenceName(name string) ReferenceName {
return ReferenceName(refNotePrefix + name)
}
// NewRemoteReferenceName returns a reference name describing a remote branch
// based on his short name and the remote name.
func NewRemoteReferenceName(remote, name string) ReferenceName {
return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, name))
}
// NewRemoteHEADReferenceName returns a reference name describing a the HEAD
// branch of a remote.
func NewRemoteHEADReferenceName(remote string) ReferenceName {
return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, HEAD))
}
// NewTagReferenceName returns a reference name describing a tag based on short
// his name.
func NewTagReferenceName(name string) ReferenceName {
return ReferenceName(refTagPrefix + name)
}
// IsBranch check if a reference is a branch
func (r ReferenceName) IsBranch() bool {
return strings.HasPrefix(string(r), refHeadPrefix)
}
// IsNote check if a reference is a note
func (r ReferenceName) IsNote() bool {
return strings.HasPrefix(string(r), refNotePrefix)
}
// IsRemote check if a reference is a remote
func (r ReferenceName) IsRemote() bool {
return strings.HasPrefix(string(r), refRemotePrefix)
}
// IsTag check if a reference is a tag
func (r ReferenceName) IsTag() bool {
return strings.HasPrefix(string(r), refTagPrefix)
}
func (r ReferenceName) String() string {
return string(r)
}
// Short returns the short name of a ReferenceName
func (r ReferenceName) Short() string {
s := string(r)
res := s
for _, format := range RefRevParseRules {
_, err := fmt.Sscanf(s, format, &res)
if err == nil {
continue
}
}
return res
}
const (
HEAD ReferenceName = "HEAD"
Master ReferenceName = "refs/heads/master"
)
// Reference is a representation of git reference
type Reference struct {
t ReferenceType
n ReferenceName
h Hash
target ReferenceName
}
// NewReferenceFromStrings creates a reference from name and target as string,
// the resulting reference can be a SymbolicReference or a HashReference base
// on the target provided
func NewReferenceFromStrings(name, target string) *Reference {
n := ReferenceName(name)
if strings.HasPrefix(target, symrefPrefix) {
target := ReferenceName(target[len(symrefPrefix):])
return NewSymbolicReference(n, target)
}
return NewHashReference(n, NewHash(target))
}
// NewSymbolicReference creates a new SymbolicReference reference
func NewSymbolicReference(n, target ReferenceName) *Reference {
return &Reference{
t: SymbolicReference,
n: n,
target: target,
}
}
// NewHashReference creates a new HashReference reference
func NewHashReference(n ReferenceName, h Hash) *Reference {
return &Reference{
t: HashReference,
n: n,
h: h,
}
}
// Type return the type of a reference
func (r *Reference) Type() ReferenceType {
return r.t
}
// Name return the name of a reference
func (r *Reference) Name() ReferenceName {
return r.n
}
// Hash return the hash of a hash reference
func (r *Reference) Hash() Hash {
return r.h
}
// Target return the target of a symbolic reference
func (r *Reference) Target() ReferenceName {
return r.target
}
// Strings dump a reference as a [2]string
func (r *Reference) Strings() [2]string {
var o [2]string
o[0] = r.Name().String()
switch r.Type() {
case HashReference:
o[1] = r.Hash().String()
case SymbolicReference:
o[1] = symrefPrefix + r.Target().String()
}
return o
}
func (r *Reference) String() string {
s := r.Strings()
return fmt.Sprintf("%s %s", s[1], s[0])
}

11
vendor/gopkg.in/src-d/go-git.v4/plumbing/revision.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
package plumbing
// Revision represents a git revision
// to get more details about git revisions
// please check git manual page :
// https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html
type Revision string
func (r Revision) String() string {
return string(r)
}

View File

@ -0,0 +1,230 @@
// Package revlist provides support to access the ancestors of commits, in a
// similar way as the git-rev-list command.
package revlist
import (
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
"gopkg.in/src-d/go-git.v4/plumbing/object"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
)
// Objects applies a complementary set. It gets all the hashes from all
// the reachable objects from the given objects. Ignore param are object hashes
// that we want to ignore on the result. All that objects must be accessible
// from the object storer.
func Objects(
s storer.EncodedObjectStorer,
objs,
ignore []plumbing.Hash,
) ([]plumbing.Hash, error) {
return ObjectsWithStorageForIgnores(s, s, objs, ignore)
}
// ObjectsWithStorageForIgnores is the same as Objects, but a
// secondary storage layer can be provided, to be used to finding the
// full set of objects to be ignored while finding the reachable
// objects. This is useful when the main `s` storage layer is slow
// and/or remote, while the ignore list is available somewhere local.
func ObjectsWithStorageForIgnores(
s, ignoreStore storer.EncodedObjectStorer,
objs,
ignore []plumbing.Hash,
) ([]plumbing.Hash, error) {
ignore, err := objects(ignoreStore, ignore, nil, true)
if err != nil {
return nil, err
}
return objects(s, objs, ignore, false)
}
func objects(
s storer.EncodedObjectStorer,
objects,
ignore []plumbing.Hash,
allowMissingObjects bool,
) ([]plumbing.Hash, error) {
seen := hashListToSet(ignore)
result := make(map[plumbing.Hash]bool)
visited := make(map[plumbing.Hash]bool)
walkerFunc := func(h plumbing.Hash) {
if !seen[h] {
result[h] = true
seen[h] = true
}
}
for _, h := range objects {
if err := processObject(s, h, seen, visited, ignore, walkerFunc); err != nil {
if allowMissingObjects && err == plumbing.ErrObjectNotFound {
continue
}
return nil, err
}
}
return hashSetToList(result), nil
}
// processObject obtains the object using the hash an process it depending of its type
func processObject(
s storer.EncodedObjectStorer,
h plumbing.Hash,
seen map[plumbing.Hash]bool,
visited map[plumbing.Hash]bool,
ignore []plumbing.Hash,
walkerFunc func(h plumbing.Hash),
) error {
if seen[h] {
return nil
}
o, err := s.EncodedObject(plumbing.AnyObject, h)
if err != nil {
return err
}
do, err := object.DecodeObject(s, o)
if err != nil {
return err
}
switch do := do.(type) {
case *object.Commit:
return reachableObjects(do, seen, visited, ignore, walkerFunc)
case *object.Tree:
return iterateCommitTrees(seen, do, walkerFunc)
case *object.Tag:
walkerFunc(do.Hash)
return processObject(s, do.Target, seen, visited, ignore, walkerFunc)
case *object.Blob:
walkerFunc(do.Hash)
default:
return fmt.Errorf("object type not valid: %s. "+
"Object reference: %s", o.Type(), o.Hash())
}
return nil
}
// reachableObjects returns, using the callback function, all the reachable
// objects from the specified commit. To avoid to iterate over seen commits,
// if a commit hash is into the 'seen' set, we will not iterate all his trees
// and blobs objects.
func reachableObjects(
commit *object.Commit,
seen map[plumbing.Hash]bool,
visited map[plumbing.Hash]bool,
ignore []plumbing.Hash,
cb func(h plumbing.Hash),
) error {
i := object.NewCommitPreorderIter(commit, seen, ignore)
pending := make(map[plumbing.Hash]bool)
addPendingParents(pending, visited, commit)
for {
commit, err := i.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if pending[commit.Hash] {
delete(pending, commit.Hash)
}
addPendingParents(pending, visited, commit)
if visited[commit.Hash] && len(pending) == 0 {
break
}
if seen[commit.Hash] {
continue
}
cb(commit.Hash)
tree, err := commit.Tree()
if err != nil {
return err
}
if err := iterateCommitTrees(seen, tree, cb); err != nil {
return err
}
}
return nil
}
func addPendingParents(pending, visited map[plumbing.Hash]bool, commit *object.Commit) {
for _, p := range commit.ParentHashes {
if !visited[p] {
pending[p] = true
}
}
}
// iterateCommitTrees iterate all reachable trees from the given commit
func iterateCommitTrees(
seen map[plumbing.Hash]bool,
tree *object.Tree,
cb func(h plumbing.Hash),
) error {
if seen[tree.Hash] {
return nil
}
cb(tree.Hash)
treeWalker := object.NewTreeWalker(tree, true, seen)
for {
_, e, err := treeWalker.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if e.Mode == filemode.Submodule {
continue
}
if seen[e.Hash] {
continue
}
cb(e.Hash)
}
return nil
}
func hashSetToList(hashes map[plumbing.Hash]bool) []plumbing.Hash {
var result []plumbing.Hash
for key := range hashes {
result = append(result, key)
}
return result
}
func hashListToSet(hashes []plumbing.Hash) map[plumbing.Hash]bool {
result := make(map[plumbing.Hash]bool)
for _, h := range hashes {
result[h] = true
}
return result
}

View File

@ -0,0 +1,2 @@
// Package storer defines the interfaces to store objects, references, etc.
package storer

View File

@ -0,0 +1,9 @@
package storer
import "gopkg.in/src-d/go-git.v4/plumbing/format/index"
// IndexStorer generic storage of index.Index
type IndexStorer interface {
SetIndex(*index.Index) error
Index() (*index.Index, error)
}

View File

@ -0,0 +1,288 @@
package storer
import (
"errors"
"io"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
)
var (
//ErrStop is used to stop a ForEach function in an Iter
ErrStop = errors.New("stop iter")
)
// EncodedObjectStorer generic storage of objects
type EncodedObjectStorer interface {
// NewEncodedObject returns a new plumbing.EncodedObject, the real type
// of the object can be a custom implementation or the default one,
// plumbing.MemoryObject.
NewEncodedObject() plumbing.EncodedObject
// SetEncodedObject saves an object into the storage, the object should
// be create with the NewEncodedObject, method, and file if the type is
// not supported.
SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error)
// EncodedObject gets an object by hash with the given
// plumbing.ObjectType. Implementors should return
// (nil, plumbing.ErrObjectNotFound) if an object doesn't exist with
// both the given hash and object type.
//
// Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject,
// TreeObject and AnyObject. If plumbing.AnyObject is given, the object must
// be looked up regardless of its type.
EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
// IterObjects returns a custom EncodedObjectStorer over all the object
// on the storage.
//
// Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject,
IterEncodedObjects(plumbing.ObjectType) (EncodedObjectIter, error)
// HasEncodedObject returns ErrObjNotFound if the object doesn't
// exist. If the object does exist, it returns nil.
HasEncodedObject(plumbing.Hash) error
// EncodedObjectSize returns the plaintext size of the encoded object.
EncodedObjectSize(plumbing.Hash) (int64, error)
}
// DeltaObjectStorer is an EncodedObjectStorer that can return delta
// objects.
type DeltaObjectStorer interface {
// DeltaObject is the same as EncodedObject but without resolving deltas.
// Deltas will be returned as plumbing.DeltaObject instances.
DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
}
// Transactioner is a optional method for ObjectStorer, it enable transaction
// base write and read operations in the storage
type Transactioner interface {
// Begin starts a transaction.
Begin() Transaction
}
// LooseObjectStorer is an optional interface for managing "loose"
// objects, i.e. those not in packfiles.
type LooseObjectStorer interface {
// ForEachObjectHash iterates over all the (loose) object hashes
// in the repository without necessarily having to read those objects.
// Objects only inside pack files may be omitted.
// If ErrStop is sent the iteration is stop but no error is returned.
ForEachObjectHash(func(plumbing.Hash) error) error
// LooseObjectTime looks up the (m)time associated with the
// loose object (that is not in a pack file). Some
// implementations (e.g. without loose objects)
// always return an error.
LooseObjectTime(plumbing.Hash) (time.Time, error)
// DeleteLooseObject deletes a loose object if it exists.
DeleteLooseObject(plumbing.Hash) error
}
// PackedObjectStorer is an optional interface for managing objects in
// packfiles.
type PackedObjectStorer interface {
// ObjectPacks returns hashes of object packs if the underlying
// implementation has pack files.
ObjectPacks() ([]plumbing.Hash, error)
// DeleteOldObjectPackAndIndex deletes an object pack and the corresponding index file if they exist.
// Deletion is only performed if the pack is older than the supplied time (or the time is zero).
DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error
}
// PackfileWriter is a optional method for ObjectStorer, it enable direct write
// of packfile to the storage
type PackfileWriter interface {
// PackfileWriter returns a writer for writing a packfile to the storage
//
// If the Storer not implements PackfileWriter the objects should be written
// using the Set method.
PackfileWriter() (io.WriteCloser, error)
}
// EncodedObjectIter is a generic closable interface for iterating over objects.
type EncodedObjectIter interface {
Next() (plumbing.EncodedObject, error)
ForEach(func(plumbing.EncodedObject) error) error
Close()
}
// Transaction is an in-progress storage transaction. A transaction must end
// with a call to Commit or Rollback.
type Transaction interface {
SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error)
EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
Commit() error
Rollback() error
}
// EncodedObjectLookupIter implements EncodedObjectIter. It iterates over a
// series of object hashes and yields their associated objects by retrieving
// each one from object storage. The retrievals are lazy and only occur when the
// iterator moves forward with a call to Next().
//
// The EncodedObjectLookupIter must be closed with a call to Close() when it is
// no longer needed.
type EncodedObjectLookupIter struct {
storage EncodedObjectStorer
series []plumbing.Hash
t plumbing.ObjectType
pos int
}
// NewEncodedObjectLookupIter returns an object iterator given an object storage
// and a slice of object hashes.
func NewEncodedObjectLookupIter(
storage EncodedObjectStorer, t plumbing.ObjectType, series []plumbing.Hash) *EncodedObjectLookupIter {
return &EncodedObjectLookupIter{
storage: storage,
series: series,
t: t,
}
}
// Next returns the next object from the iterator. If the iterator has reached
// the end it will return io.EOF as an error. If the object can't be found in
// the object storage, it will return plumbing.ErrObjectNotFound as an error.
// If the object is retreieved successfully error will be nil.
func (iter *EncodedObjectLookupIter) Next() (plumbing.EncodedObject, error) {
if iter.pos >= len(iter.series) {
return nil, io.EOF
}
hash := iter.series[iter.pos]
obj, err := iter.storage.EncodedObject(iter.t, hash)
if err == nil {
iter.pos++
}
return obj, err
}
// ForEach call the cb function for each object contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *EncodedObjectLookupIter) ForEach(cb func(plumbing.EncodedObject) error) error {
return ForEachIterator(iter, cb)
}
// Close releases any resources used by the iterator.
func (iter *EncodedObjectLookupIter) Close() {
iter.pos = len(iter.series)
}
// EncodedObjectSliceIter implements EncodedObjectIter. It iterates over a
// series of objects stored in a slice and yields each one in turn when Next()
// is called.
//
// The EncodedObjectSliceIter must be closed with a call to Close() when it is
// no longer needed.
type EncodedObjectSliceIter struct {
series []plumbing.EncodedObject
}
// NewEncodedObjectSliceIter returns an object iterator for the given slice of
// objects.
func NewEncodedObjectSliceIter(series []plumbing.EncodedObject) *EncodedObjectSliceIter {
return &EncodedObjectSliceIter{
series: series,
}
}
// Next returns the next object from the iterator. If the iterator has reached
// the end it will return io.EOF as an error. If the object is retreieved
// successfully error will be nil.
func (iter *EncodedObjectSliceIter) Next() (plumbing.EncodedObject, error) {
if len(iter.series) == 0 {
return nil, io.EOF
}
obj := iter.series[0]
iter.series = iter.series[1:]
return obj, nil
}
// ForEach call the cb function for each object contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *EncodedObjectSliceIter) ForEach(cb func(plumbing.EncodedObject) error) error {
return ForEachIterator(iter, cb)
}
// Close releases any resources used by the iterator.
func (iter *EncodedObjectSliceIter) Close() {
iter.series = []plumbing.EncodedObject{}
}
// MultiEncodedObjectIter implements EncodedObjectIter. It iterates over several
// EncodedObjectIter,
//
// The MultiObjectIter must be closed with a call to Close() when it is no
// longer needed.
type MultiEncodedObjectIter struct {
iters []EncodedObjectIter
}
// NewMultiEncodedObjectIter returns an object iterator for the given slice of
// EncodedObjectIters.
func NewMultiEncodedObjectIter(iters []EncodedObjectIter) EncodedObjectIter {
return &MultiEncodedObjectIter{iters: iters}
}
// Next returns the next object from the iterator, if one iterator reach io.EOF
// is removed and the next one is used.
func (iter *MultiEncodedObjectIter) Next() (plumbing.EncodedObject, error) {
if len(iter.iters) == 0 {
return nil, io.EOF
}
obj, err := iter.iters[0].Next()
if err == io.EOF {
iter.iters[0].Close()
iter.iters = iter.iters[1:]
return iter.Next()
}
return obj, err
}
// ForEach call the cb function for each object contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *MultiEncodedObjectIter) ForEach(cb func(plumbing.EncodedObject) error) error {
return ForEachIterator(iter, cb)
}
// Close releases any resources used by the iterator.
func (iter *MultiEncodedObjectIter) Close() {
for _, i := range iter.iters {
i.Close()
}
}
type bareIterator interface {
Next() (plumbing.EncodedObject, error)
Close()
}
// ForEachIterator is a helper function to build iterators without need to
// rewrite the same ForEach function each time.
func ForEachIterator(iter bareIterator, cb func(plumbing.EncodedObject) error) error {
defer iter.Close()
for {
obj, err := iter.Next()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
if err := cb(obj); err != nil {
if err == ErrStop {
return nil
}
return err
}
}
}

View File

@ -0,0 +1,240 @@
package storer
import (
"errors"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
)
const MaxResolveRecursion = 1024
// ErrMaxResolveRecursion is returned by ResolveReference is MaxResolveRecursion
// is exceeded
var ErrMaxResolveRecursion = errors.New("max. recursion level reached")
// ReferenceStorer is a generic storage of references.
type ReferenceStorer interface {
SetReference(*plumbing.Reference) error
// CheckAndSetReference sets the reference `new`, but if `old` is
// not `nil`, it first checks that the current stored value for
// `old.Name()` matches the given reference value in `old`. If
// not, it returns an error and doesn't update `new`.
CheckAndSetReference(new, old *plumbing.Reference) error
Reference(plumbing.ReferenceName) (*plumbing.Reference, error)
IterReferences() (ReferenceIter, error)
RemoveReference(plumbing.ReferenceName) error
CountLooseRefs() (int, error)
PackRefs() error
}
// ReferenceIter is a generic closable interface for iterating over references.
type ReferenceIter interface {
Next() (*plumbing.Reference, error)
ForEach(func(*plumbing.Reference) error) error
Close()
}
type referenceFilteredIter struct {
ff func(r *plumbing.Reference) bool
iter ReferenceIter
}
// NewReferenceFilteredIter returns a reference iterator for the given reference
// Iterator. This iterator will iterate only references that accomplish the
// provided function.
func NewReferenceFilteredIter(
ff func(r *plumbing.Reference) bool, iter ReferenceIter) ReferenceIter {
return &referenceFilteredIter{ff, iter}
}
// Next returns the next reference from the iterator. If the iterator has reached
// the end it will return io.EOF as an error.
func (iter *referenceFilteredIter) Next() (*plumbing.Reference, error) {
for {
r, err := iter.iter.Next()
if err != nil {
return nil, err
}
if iter.ff(r) {
return r, nil
}
continue
}
}
// ForEach call the cb function for each reference contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stopped but no error is returned. The iterator is closed.
func (iter *referenceFilteredIter) ForEach(cb func(*plumbing.Reference) error) error {
defer iter.Close()
for {
r, err := iter.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if err := cb(r); err != nil {
if err == ErrStop {
break
}
return err
}
}
return nil
}
// Close releases any resources used by the iterator.
func (iter *referenceFilteredIter) Close() {
iter.iter.Close()
}
// ReferenceSliceIter implements ReferenceIter. It iterates over a series of
// references stored in a slice and yields each one in turn when Next() is
// called.
//
// The ReferenceSliceIter must be closed with a call to Close() when it is no
// longer needed.
type ReferenceSliceIter struct {
series []*plumbing.Reference
pos int
}
// NewReferenceSliceIter returns a reference iterator for the given slice of
// objects.
func NewReferenceSliceIter(series []*plumbing.Reference) ReferenceIter {
return &ReferenceSliceIter{
series: series,
}
}
// Next returns the next reference from the iterator. If the iterator has
// reached the end it will return io.EOF as an error.
func (iter *ReferenceSliceIter) Next() (*plumbing.Reference, error) {
if iter.pos >= len(iter.series) {
return nil, io.EOF
}
obj := iter.series[iter.pos]
iter.pos++
return obj, nil
}
// ForEach call the cb function for each reference contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error {
return forEachReferenceIter(iter, cb)
}
type bareReferenceIterator interface {
Next() (*plumbing.Reference, error)
Close()
}
func forEachReferenceIter(iter bareReferenceIterator, cb func(*plumbing.Reference) error) error {
defer iter.Close()
for {
obj, err := iter.Next()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
if err := cb(obj); err != nil {
if err == ErrStop {
return nil
}
return err
}
}
}
// Close releases any resources used by the iterator.
func (iter *ReferenceSliceIter) Close() {
iter.pos = len(iter.series)
}
// MultiReferenceIter implements ReferenceIter. It iterates over several
// ReferenceIter,
//
// The MultiReferenceIter must be closed with a call to Close() when it is no
// longer needed.
type MultiReferenceIter struct {
iters []ReferenceIter
}
// NewMultiReferenceIter returns an reference iterator for the given slice of
// EncodedObjectIters.
func NewMultiReferenceIter(iters []ReferenceIter) ReferenceIter {
return &MultiReferenceIter{iters: iters}
}
// Next returns the next reference from the iterator, if one iterator reach
// io.EOF is removed and the next one is used.
func (iter *MultiReferenceIter) Next() (*plumbing.Reference, error) {
if len(iter.iters) == 0 {
return nil, io.EOF
}
obj, err := iter.iters[0].Next()
if err == io.EOF {
iter.iters[0].Close()
iter.iters = iter.iters[1:]
return iter.Next()
}
return obj, err
}
// ForEach call the cb function for each reference contained on this iter until
// an error happens or the end of the iter is reached. If ErrStop is sent
// the iteration is stop but no error is returned. The iterator is closed.
func (iter *MultiReferenceIter) ForEach(cb func(*plumbing.Reference) error) error {
return forEachReferenceIter(iter, cb)
}
// Close releases any resources used by the iterator.
func (iter *MultiReferenceIter) Close() {
for _, i := range iter.iters {
i.Close()
}
}
// ResolveReference resolves a SymbolicReference to a HashReference.
func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) {
r, err := s.Reference(n)
if err != nil || r == nil {
return r, err
}
return resolveReference(s, r, 0)
}
func resolveReference(s ReferenceStorer, r *plumbing.Reference, recursion int) (*plumbing.Reference, error) {
if r.Type() != plumbing.SymbolicReference {
return r, nil
}
if recursion > MaxResolveRecursion {
return nil, ErrMaxResolveRecursion
}
t, err := s.Reference(r.Target())
if err != nil {
return nil, err
}
recursion++
return resolveReference(s, t, recursion)
}

View File

@ -0,0 +1,10 @@
package storer
import "gopkg.in/src-d/go-git.v4/plumbing"
// ShallowStorer is a storage of references to shallow commits by hash,
// meaning that these commits have missing parents because of a shallow fetch.
type ShallowStorer interface {
SetShallow([]plumbing.Hash) error
Shallow() ([]plumbing.Hash, error)
}

View File

@ -0,0 +1,15 @@
package storer
// Storer is a basic storer for encoded objects and references.
type Storer interface {
EncodedObjectStorer
ReferenceStorer
}
// Initializer should be implemented by storers that require to perform any
// operation when creating a new repository (i.e. git init).
type Initializer interface {
// Init performs initialization of the storer and returns the error, if
// any.
Init() error
}

View File

@ -0,0 +1,48 @@
// Package client contains helper function to deal with the different client
// protocols.
package client
import (
"fmt"
"gopkg.in/src-d/go-git.v4/plumbing/transport"
"gopkg.in/src-d/go-git.v4/plumbing/transport/file"
"gopkg.in/src-d/go-git.v4/plumbing/transport/git"
"gopkg.in/src-d/go-git.v4/plumbing/transport/http"
"gopkg.in/src-d/go-git.v4/plumbing/transport/ssh"
)
// Protocols are the protocols supported by default.
var Protocols = map[string]transport.Transport{
"http": http.DefaultClient,
"https": http.DefaultClient,
"ssh": ssh.DefaultClient,
"git": git.DefaultClient,
"file": file.DefaultClient,
}
// InstallProtocol adds or modifies an existing protocol.
func InstallProtocol(scheme string, c transport.Transport) {
if c == nil {
delete(Protocols, scheme)
return
}
Protocols[scheme] = c
}
// NewClient returns the appropriate client among of the set of known protocols:
// http://, https://, ssh:// and file://.
// See `InstallProtocol` to add or modify protocols.
func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) {
f, ok := Protocols[endpoint.Protocol]
if !ok {
return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol)
}
if f == nil {
return nil, fmt.Errorf("malformed client for scheme %q, client is defined as nil", endpoint.Protocol)
}
return f, nil
}

View File

@ -0,0 +1,274 @@
// Package transport includes the implementation for different transport
// protocols.
//
// `Client` can be used to fetch and send packfiles to a git server.
// The `client` package provides higher level functions to instantiate the
// appropriate `Client` based on the repository URL.
//
// go-git supports HTTP and SSH (see `Protocols`), but you can also install
// your own protocols (see the `client` package).
//
// Each protocol has its own implementation of `Client`, but you should
// generally not use them directly, use `client.NewClient` instead.
package transport
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"net/url"
"strconv"
"strings"
giturl "gopkg.in/src-d/go-git.v4/internal/url"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
)
var (
ErrRepositoryNotFound = errors.New("repository not found")
ErrEmptyRemoteRepository = errors.New("remote repository is empty")
ErrAuthenticationRequired = errors.New("authentication required")
ErrAuthorizationFailed = errors.New("authorization failed")
ErrEmptyUploadPackRequest = errors.New("empty git-upload-pack given")
ErrInvalidAuthMethod = errors.New("invalid auth method")
ErrAlreadyConnected = errors.New("session already established")
)
const (
UploadPackServiceName = "git-upload-pack"
ReceivePackServiceName = "git-receive-pack"
)
// Transport can initiate git-upload-pack and git-receive-pack processes.
// It is implemented both by the client and the server, making this a RPC.
type Transport interface {
// NewUploadPackSession starts a git-upload-pack session for an endpoint.
NewUploadPackSession(*Endpoint, AuthMethod) (UploadPackSession, error)
// NewReceivePackSession starts a git-receive-pack session for an endpoint.
NewReceivePackSession(*Endpoint, AuthMethod) (ReceivePackSession, error)
}
type Session interface {
// AdvertisedReferences retrieves the advertised references for a
// repository.
// If the repository does not exist, returns ErrRepositoryNotFound.
// If the repository exists, but is empty, returns ErrEmptyRemoteRepository.
AdvertisedReferences() (*packp.AdvRefs, error)
io.Closer
}
type AuthMethod interface {
fmt.Stringer
Name() string
}
// UploadPackSession represents a git-upload-pack session.
// A git-upload-pack session has two steps: reference discovery
// (AdvertisedReferences) and uploading pack (UploadPack).
type UploadPackSession interface {
Session
// UploadPack takes a git-upload-pack request and returns a response,
// including a packfile. Don't be confused by terminology, the client
// side of a git-upload-pack is called git-fetch-pack, although here
// the same interface is used to make it RPC-like.
UploadPack(context.Context, *packp.UploadPackRequest) (*packp.UploadPackResponse, error)
}
// ReceivePackSession represents a git-receive-pack session.
// A git-receive-pack session has two steps: reference discovery
// (AdvertisedReferences) and receiving pack (ReceivePack).
// In that order.
type ReceivePackSession interface {
Session
// ReceivePack sends an update references request and a packfile
// reader and returns a ReportStatus and error. Don't be confused by
// terminology, the client side of a git-receive-pack is called
// git-send-pack, although here the same interface is used to make it
// RPC-like.
ReceivePack(context.Context, *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error)
}
// Endpoint represents a Git URL in any supported protocol.
type Endpoint struct {
// Protocol is the protocol of the endpoint (e.g. git, https, file).
Protocol string
// User is the user.
User string
// Password is the password.
Password string
// Host is the host.
Host string
// Port is the port to connect, if 0 the default port for the given protocol
// wil be used.
Port int
// Path is the repository path.
Path string
}
var defaultPorts = map[string]int{
"http": 80,
"https": 443,
"git": 9418,
"ssh": 22,
}
// String returns a string representation of the Git URL.
func (u *Endpoint) String() string {
var buf bytes.Buffer
if u.Protocol != "" {
buf.WriteString(u.Protocol)
buf.WriteByte(':')
}
if u.Protocol != "" || u.Host != "" || u.User != "" || u.Password != "" {
buf.WriteString("//")
if u.User != "" || u.Password != "" {
buf.WriteString(url.PathEscape(u.User))
if u.Password != "" {
buf.WriteByte(':')
buf.WriteString(url.PathEscape(u.Password))
}
buf.WriteByte('@')
}
if u.Host != "" {
buf.WriteString(u.Host)
if u.Port != 0 {
port, ok := defaultPorts[strings.ToLower(u.Protocol)]
if !ok || ok && port != u.Port {
fmt.Fprintf(&buf, ":%d", u.Port)
}
}
}
}
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
buf.WriteByte('/')
}
buf.WriteString(u.Path)
return buf.String()
}
func NewEndpoint(endpoint string) (*Endpoint, error) {
if e, ok := parseSCPLike(endpoint); ok {
return e, nil
}
if e, ok := parseFile(endpoint); ok {
return e, nil
}
return parseURL(endpoint)
}
func parseURL(endpoint string) (*Endpoint, error) {
u, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
if !u.IsAbs() {
return nil, plumbing.NewPermanentError(fmt.Errorf(
"invalid endpoint: %s", endpoint,
))
}
var user, pass string
if u.User != nil {
user = u.User.Username()
pass, _ = u.User.Password()
}
return &Endpoint{
Protocol: u.Scheme,
User: user,
Password: pass,
Host: u.Hostname(),
Port: getPort(u),
Path: getPath(u),
}, nil
}
func getPort(u *url.URL) int {
p := u.Port()
if p == "" {
return 0
}
i, err := strconv.Atoi(p)
if err != nil {
return 0
}
return i
}
func getPath(u *url.URL) string {
var res string = u.Path
if u.RawQuery != "" {
res += "?" + u.RawQuery
}
if u.Fragment != "" {
res += "#" + u.Fragment
}
return res
}
func parseSCPLike(endpoint string) (*Endpoint, bool) {
if giturl.MatchesScheme(endpoint) || !giturl.MatchesScpLike(endpoint) {
return nil, false
}
user, host, portStr, path := giturl.FindScpLikeComponents(endpoint)
port, err := strconv.Atoi(portStr)
if err != nil {
port = 22
}
return &Endpoint{
Protocol: "ssh",
User: user,
Host: host,
Port: port,
Path: path,
}, true
}
func parseFile(endpoint string) (*Endpoint, bool) {
if giturl.MatchesScheme(endpoint) {
return nil, false
}
path := endpoint
return &Endpoint{
Protocol: "file",
Path: path,
}, true
}
// UnsupportedCapabilities are the capabilities not supported by any client
// implementation
var UnsupportedCapabilities = []capability.Capability{
capability.MultiACK,
capability.MultiACKDetailed,
capability.ThinPack,
}
// FilterUnsupportedCapabilities it filter out all the UnsupportedCapabilities
// from a capability.List, the intended usage is on the client implementation
// to filter the capabilities from an AdvRefs message.
func FilterUnsupportedCapabilities(list *capability.List) {
for _, c := range UnsupportedCapabilities {
list.Delete(c)
}
}

View File

@ -0,0 +1,156 @@
// Package file implements the file transport protocol.
package file
import (
"bufio"
"errors"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing/transport"
"gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common"
)
// DefaultClient is the default local client.
var DefaultClient = NewClient(
transport.UploadPackServiceName,
transport.ReceivePackServiceName,
)
type runner struct {
UploadPackBin string
ReceivePackBin string
}
// NewClient returns a new local client using the given git-upload-pack and
// git-receive-pack binaries.
func NewClient(uploadPackBin, receivePackBin string) transport.Transport {
return common.NewClient(&runner{
UploadPackBin: uploadPackBin,
ReceivePackBin: receivePackBin,
})
}
func prefixExecPath(cmd string) (string, error) {
// Use `git --exec-path` to find the exec path.
execCmd := exec.Command("git", "--exec-path")
stdout, err := execCmd.StdoutPipe()
if err != nil {
return "", err
}
stdoutBuf := bufio.NewReader(stdout)
err = execCmd.Start()
if err != nil {
return "", err
}
execPathBytes, isPrefix, err := stdoutBuf.ReadLine()
if err != nil {
return "", err
}
if isPrefix {
return "", errors.New("Couldn't read exec-path line all at once")
}
err = execCmd.Wait()
if err != nil {
return "", err
}
execPath := string(execPathBytes)
execPath = strings.TrimSpace(execPath)
cmd = filepath.Join(execPath, cmd)
// Make sure it actually exists.
_, err = exec.LookPath(cmd)
if err != nil {
return "", err
}
return cmd, nil
}
func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod,
) (common.Command, error) {
switch cmd {
case transport.UploadPackServiceName:
cmd = r.UploadPackBin
case transport.ReceivePackServiceName:
cmd = r.ReceivePackBin
}
_, err := exec.LookPath(cmd)
if err != nil {
if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound {
cmd, err = prefixExecPath(cmd)
if err != nil {
return nil, err
}
} else {
return nil, err
}
}
return &command{cmd: exec.Command(cmd, ep.Path)}, nil
}
type command struct {
cmd *exec.Cmd
stderrCloser io.Closer
closed bool
}
func (c *command) Start() error {
return c.cmd.Start()
}
func (c *command) StderrPipe() (io.Reader, error) {
// Pipe returned by Command.StderrPipe has a race with Read + Command.Wait.
// We use an io.Pipe and close it after the command finishes.
r, w := io.Pipe()
c.cmd.Stderr = w
c.stderrCloser = r
return r, nil
}
func (c *command) StdinPipe() (io.WriteCloser, error) {
return c.cmd.StdinPipe()
}
func (c *command) StdoutPipe() (io.Reader, error) {
return c.cmd.StdoutPipe()
}
func (c *command) Kill() error {
c.cmd.Process.Kill()
return c.Close()
}
// Close waits for the command to exit.
func (c *command) Close() error {
if c.closed {
return nil
}
defer func() {
c.closed = true
_ = c.stderrCloser.Close()
}()
err := c.cmd.Wait()
if _, ok := err.(*os.PathError); ok {
return nil
}
// When a repository does not exist, the command exits with code 128.
if _, ok := err.(*exec.ExitError); ok {
return nil
}
return err
}

View File

@ -0,0 +1,53 @@
package file
import (
"fmt"
"os"
"gopkg.in/src-d/go-git.v4/plumbing/transport"
"gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common"
"gopkg.in/src-d/go-git.v4/plumbing/transport/server"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
// ServeUploadPack serves a git-upload-pack request using standard output, input
// and error. This is meant to be used when implementing a git-upload-pack
// command.
func ServeUploadPack(path string) error {
ep, err := transport.NewEndpoint(path)
if err != nil {
return err
}
// TODO: define and implement a server-side AuthMethod
s, err := server.DefaultServer.NewUploadPackSession(ep, nil)
if err != nil {
return fmt.Errorf("error creating session: %s", err)
}
return common.ServeUploadPack(srvCmd, s)
}
// ServeReceivePack serves a git-receive-pack request using standard output,
// input and error. This is meant to be used when implementing a
// git-receive-pack command.
func ServeReceivePack(path string) error {
ep, err := transport.NewEndpoint(path)
if err != nil {
return err
}
// TODO: define and implement a server-side AuthMethod
s, err := server.DefaultServer.NewReceivePackSession(ep, nil)
if err != nil {
return fmt.Errorf("error creating session: %s", err)
}
return common.ServeReceivePack(srvCmd, s)
}
var srvCmd = common.ServerCommand{
Stdin: os.Stdin,
Stdout: ioutil.WriteNopCloser(os.Stdout),
Stderr: os.Stderr,
}

View File

@ -0,0 +1,109 @@
// Package git implements the git transport protocol.
package git
import (
"fmt"
"io"
"net"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
"gopkg.in/src-d/go-git.v4/plumbing/transport"
"gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
// DefaultClient is the default git client.
var DefaultClient = common.NewClient(&runner{})
const DefaultPort = 9418
type runner struct{}
// Command returns a new Command for the given cmd in the given Endpoint
func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) {
// auth not allowed since git protocol doesn't support authentication
if auth != nil {
return nil, transport.ErrInvalidAuthMethod
}
c := &command{command: cmd, endpoint: ep}
if err := c.connect(); err != nil {
return nil, err
}
return c, nil
}
type command struct {
conn net.Conn
connected bool
command string
endpoint *transport.Endpoint
}
// Start executes the command sending the required message to the TCP connection
func (c *command) Start() error {
cmd := endpointToCommand(c.command, c.endpoint)
e := pktline.NewEncoder(c.conn)
return e.Encode([]byte(cmd))
}
func (c *command) connect() error {
if c.connected {
return transport.ErrAlreadyConnected
}
var err error
c.conn, err = net.Dial("tcp", c.getHostWithPort())
if err != nil {
return err
}
c.connected = true
return nil
}
func (c *command) getHostWithPort() string {
host := c.endpoint.Host
port := c.endpoint.Port
if port <= 0 {
port = DefaultPort
}
return fmt.Sprintf("%s:%d", host, port)
}
// StderrPipe git protocol doesn't have any dedicated error channel
func (c *command) StderrPipe() (io.Reader, error) {
return nil, nil
}
// StdinPipe return the underlying connection as WriteCloser, wrapped to prevent
// call to the Close function from the connection, a command execution in git
// protocol can't be closed or killed
func (c *command) StdinPipe() (io.WriteCloser, error) {
return ioutil.WriteNopCloser(c.conn), nil
}
// StdoutPipe return the underlying connection as Reader
func (c *command) StdoutPipe() (io.Reader, error) {
return c.conn, nil
}
func endpointToCommand(cmd string, ep *transport.Endpoint) string {
host := ep.Host
if ep.Port != DefaultPort {
host = fmt.Sprintf("%s:%d", ep.Host, ep.Port)
}
return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, host, 0)
}
// Close closes the TCP connection and connection.
func (c *command) Close() error {
if !c.connected {
return nil
}
c.connected = false
return c.conn.Close()
}

View File

@ -0,0 +1,281 @@
// Package http implements the HTTP transport protocol.
package http
import (
"bytes"
"fmt"
"net"
"net/http"
"strconv"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
"gopkg.in/src-d/go-git.v4/plumbing/transport"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
// it requires a bytes.Buffer, because we need to know the length
func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string, requestType string) {
req.Header.Add("User-Agent", "git/1.0")
req.Header.Add("Host", host) // host:port
if content == nil {
req.Header.Add("Accept", "*/*")
return
}
req.Header.Add("Accept", fmt.Sprintf("application/x-%s-result", requestType))
req.Header.Add("Content-Type", fmt.Sprintf("application/x-%s-request", requestType))
req.Header.Add("Content-Length", strconv.Itoa(content.Len()))
}
const infoRefsPath = "/info/refs"
func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, err error) {
url := fmt.Sprintf(
"%s%s?service=%s",
s.endpoint.String(), infoRefsPath, serviceName,
)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, err
}
s.ApplyAuthToRequest(req)
applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName)
res, err := s.client.Do(req)
if err != nil {
return nil, err
}
s.ModifyEndpointIfRedirect(res)
defer ioutil.CheckClose(res.Body, &err)
if err = NewErr(res); err != nil {
return nil, err
}
ar := packp.NewAdvRefs()
if err = ar.Decode(res.Body); err != nil {
if err == packp.ErrEmptyAdvRefs {
err = transport.ErrEmptyRemoteRepository
}
return nil, err
}
transport.FilterUnsupportedCapabilities(ar.Capabilities)
s.advRefs = ar
return ar, nil
}
type client struct {
c *http.Client
}
// DefaultClient is the default HTTP client, which uses `http.DefaultClient`.
var DefaultClient = NewClient(nil)
// NewClient creates a new client with a custom net/http client.
// See `InstallProtocol` to install and override default http client.
// Unless a properly initialized client is given, it will fall back into
// `http.DefaultClient`.
//
// Note that for HTTP client cannot distinguist between private repositories and
// unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired`
// for both.
func NewClient(c *http.Client) transport.Transport {
if c == nil {
return &client{http.DefaultClient}
}
return &client{
c: c,
}
}
func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
transport.UploadPackSession, error) {
return newUploadPackSession(c.c, ep, auth)
}
func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (
transport.ReceivePackSession, error) {
return newReceivePackSession(c.c, ep, auth)
}
type session struct {
auth AuthMethod
client *http.Client
endpoint *transport.Endpoint
advRefs *packp.AdvRefs
}
func newSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) {
s := &session{
auth: basicAuthFromEndpoint(ep),
client: c,
endpoint: ep,
}
if auth != nil {
a, ok := auth.(AuthMethod)
if !ok {
return nil, transport.ErrInvalidAuthMethod
}
s.auth = a
}
return s, nil
}
func (s *session) ApplyAuthToRequest(req *http.Request) {
if s.auth == nil {
return
}
s.auth.setAuth(req)
}
func (s *session) ModifyEndpointIfRedirect(res *http.Response) {
if res.Request == nil {
return
}
r := res.Request
if !strings.HasSuffix(r.URL.Path, infoRefsPath) {
return
}
h, p, err := net.SplitHostPort(r.URL.Host)
if err != nil {
h = r.URL.Host
}
if p != "" {
port, err := strconv.Atoi(p)
if err == nil {
s.endpoint.Port = port
}
}
s.endpoint.Host = h
s.endpoint.Protocol = r.URL.Scheme
s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)]
}
func (*session) Close() error {
return nil
}
// AuthMethod is concrete implementation of common.AuthMethod for HTTP services
type AuthMethod interface {
transport.AuthMethod
setAuth(r *http.Request)
}
func basicAuthFromEndpoint(ep *transport.Endpoint) *BasicAuth {
u := ep.User
if u == "" {
return nil
}
return &BasicAuth{u, ep.Password}
}
// BasicAuth represent a HTTP basic auth
type BasicAuth struct {
Username, Password string
}
func (a *BasicAuth) setAuth(r *http.Request) {
if a == nil {
return
}
r.SetBasicAuth(a.Username, a.Password)
}
// Name is name of the auth
func (a *BasicAuth) Name() string {
return "http-basic-auth"
}
func (a *BasicAuth) String() string {
masked := "*******"
if a.Password == "" {
masked = "<empty>"
}
return fmt.Sprintf("%s - %s:%s", a.Name(), a.Username, masked)
}
// TokenAuth implements an http.AuthMethod that can be used with http transport
// to authenticate with HTTP token authentication (also known as bearer
// authentication).
//
// IMPORTANT: If you are looking to use OAuth tokens with popular servers (e.g.
// GitHub, Bitbucket, GitLab) you should use BasicAuth instead. These servers
// use basic HTTP authentication, with the OAuth token as user or password.
// Check the documentation of your git server for details.
type TokenAuth struct {
Token string
}
func (a *TokenAuth) setAuth(r *http.Request) {
if a == nil {
return
}
r.Header.Add("Authorization", fmt.Sprintf("Bearer %s", a.Token))
}
// Name is name of the auth
func (a *TokenAuth) Name() string {
return "http-token-auth"
}
func (a *TokenAuth) String() string {
masked := "*******"
if a.Token == "" {
masked = "<empty>"
}
return fmt.Sprintf("%s - %s", a.Name(), masked)
}
// Err is a dedicated error to return errors based on status code
type Err struct {
Response *http.Response
}
// NewErr returns a new Err based on a http response
func NewErr(r *http.Response) error {
if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices {
return nil
}
switch r.StatusCode {
case http.StatusUnauthorized:
return transport.ErrAuthenticationRequired
case http.StatusForbidden:
return transport.ErrAuthorizationFailed
case http.StatusNotFound:
return transport.ErrRepositoryNotFound
}
return plumbing.NewUnexpectedError(&Err{r})
}
// StatusCode returns the status code of the response
func (e *Err) StatusCode() int {
return e.Response.StatusCode
}
func (e *Err) Error() string {
return fmt.Sprintf("unexpected requesting %q status code: %d",
e.Response.Request.URL, e.Response.StatusCode,
)
}

Some files were not shown because too many files have changed in this diff Show More