feat(dir2config): defaults

This commit is contained in:
Mikaël Cluseau
2019-02-28 19:27:09 +11:00
parent d2b212ae6b
commit ea6fce68e1
383 changed files with 74236 additions and 41 deletions

View File

@ -0,0 +1,476 @@
package index
import (
"bytes"
"crypto/sha1"
"errors"
"hash"
"io"
"io/ioutil"
"strconv"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/utils/binary"
)
var (
// DecodeVersionSupported is the range of supported index versions
DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4}
// ErrMalformedSignature is returned by Decode when the index header file is
// malformed
ErrMalformedSignature = errors.New("malformed index signature file")
// ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with
// the read content
ErrInvalidChecksum = errors.New("invalid checksum")
errUnknownExtension = errors.New("unknown extension")
)
const (
entryHeaderLength = 62
entryExtended = 0x4000
entryValid = 0x8000
nameMask = 0xfff
intentToAddMask = 1 << 13
skipWorkTreeMask = 1 << 14
)
// A Decoder reads and decodes index files from an input stream.
type Decoder struct {
r io.Reader
hash hash.Hash
lastEntry *Entry
}
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
h := sha1.New()
return &Decoder{
r: io.TeeReader(r, h),
hash: h,
}
}
// Decode reads the whole index object from its input and stores it in the
// value pointed to by idx.
func (d *Decoder) Decode(idx *Index) error {
var err error
idx.Version, err = validateHeader(d.r)
if err != nil {
return err
}
entryCount, err := binary.ReadUint32(d.r)
if err != nil {
return err
}
if err := d.readEntries(idx, int(entryCount)); err != nil {
return err
}
return d.readExtensions(idx)
}
func (d *Decoder) readEntries(idx *Index, count int) error {
for i := 0; i < count; i++ {
e, err := d.readEntry(idx)
if err != nil {
return err
}
d.lastEntry = e
idx.Entries = append(idx.Entries, e)
}
return nil
}
func (d *Decoder) readEntry(idx *Index) (*Entry, error) {
e := &Entry{}
var msec, mnsec, sec, nsec uint32
var flags uint16
flow := []interface{}{
&sec, &nsec,
&msec, &mnsec,
&e.Dev,
&e.Inode,
&e.Mode,
&e.UID,
&e.GID,
&e.Size,
&e.Hash,
&flags,
}
if err := binary.Read(d.r, flow...); err != nil {
return nil, err
}
read := entryHeaderLength
if sec != 0 || nsec != 0 {
e.CreatedAt = time.Unix(int64(sec), int64(nsec))
}
if msec != 0 || mnsec != 0 {
e.ModifiedAt = time.Unix(int64(msec), int64(mnsec))
}
e.Stage = Stage(flags>>12) & 0x3
if flags&entryExtended != 0 {
extended, err := binary.ReadUint16(d.r)
if err != nil {
return nil, err
}
read += 2
e.IntentToAdd = extended&intentToAddMask != 0
e.SkipWorktree = extended&skipWorkTreeMask != 0
}
if err := d.readEntryName(idx, e, flags); err != nil {
return nil, err
}
return e, d.padEntry(idx, e, read)
}
func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error {
var name string
var err error
switch idx.Version {
case 2, 3:
len := flags & nameMask
name, err = d.doReadEntryName(len)
case 4:
name, err = d.doReadEntryNameV4()
default:
return ErrUnsupportedVersion
}
if err != nil {
return err
}
e.Name = name
return nil
}
func (d *Decoder) doReadEntryNameV4() (string, error) {
l, err := binary.ReadVariableWidthInt(d.r)
if err != nil {
return "", err
}
var base string
if d.lastEntry != nil {
base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)]
}
name, err := binary.ReadUntil(d.r, '\x00')
if err != nil {
return "", err
}
return base + string(name), nil
}
func (d *Decoder) doReadEntryName(len uint16) (string, error) {
name := make([]byte, len)
if err := binary.Read(d.r, &name); err != nil {
return "", err
}
return string(name), nil
}
// Index entries are padded out to the next 8 byte alignment
// for historical reasons related to how C Git read the files.
func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error {
if idx.Version == 4 {
return nil
}
entrySize := read + len(e.Name)
padLen := 8 - entrySize%8
_, err := io.CopyN(ioutil.Discard, d.r, int64(padLen))
return err
}
func (d *Decoder) readExtensions(idx *Index) error {
// TODO: support 'Split index' and 'Untracked cache' extensions, take in
// count that they are not supported by jgit or libgit
var expected []byte
var err error
var header [4]byte
for {
expected = d.hash.Sum(nil)
var n int
if n, err = io.ReadFull(d.r, header[:]); err != nil {
if n == 0 {
err = io.EOF
}
break
}
err = d.readExtension(idx, header[:])
if err != nil {
break
}
}
if err != errUnknownExtension {
return err
}
return d.readChecksum(expected, header)
}
func (d *Decoder) readExtension(idx *Index, header []byte) error {
switch {
case bytes.Equal(header, treeExtSignature):
r, err := d.getExtensionReader()
if err != nil {
return err
}
idx.Cache = &Tree{}
d := &treeExtensionDecoder{r}
if err := d.Decode(idx.Cache); err != nil {
return err
}
case bytes.Equal(header, resolveUndoExtSignature):
r, err := d.getExtensionReader()
if err != nil {
return err
}
idx.ResolveUndo = &ResolveUndo{}
d := &resolveUndoDecoder{r}
if err := d.Decode(idx.ResolveUndo); err != nil {
return err
}
case bytes.Equal(header, endOfIndexEntryExtSignature):
r, err := d.getExtensionReader()
if err != nil {
return err
}
idx.EndOfIndexEntry = &EndOfIndexEntry{}
d := &endOfIndexEntryDecoder{r}
if err := d.Decode(idx.EndOfIndexEntry); err != nil {
return err
}
default:
return errUnknownExtension
}
return nil
}
func (d *Decoder) getExtensionReader() (io.Reader, error) {
len, err := binary.ReadUint32(d.r)
if err != nil {
return nil, err
}
return &io.LimitedReader{R: d.r, N: int64(len)}, nil
}
func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error {
var h plumbing.Hash
copy(h[:4], alreadyRead[:])
if err := binary.Read(d.r, h[4:]); err != nil {
return err
}
if !bytes.Equal(h[:], expected) {
return ErrInvalidChecksum
}
return nil
}
func validateHeader(r io.Reader) (version uint32, err error) {
var s = make([]byte, 4)
if _, err := io.ReadFull(r, s); err != nil {
return 0, err
}
if !bytes.Equal(s, indexSignature) {
return 0, ErrMalformedSignature
}
version, err = binary.ReadUint32(r)
if err != nil {
return 0, err
}
if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max {
return 0, ErrUnsupportedVersion
}
return
}
type treeExtensionDecoder struct {
r io.Reader
}
func (d *treeExtensionDecoder) Decode(t *Tree) error {
for {
e, err := d.readEntry()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
if e == nil {
continue
}
t.Entries = append(t.Entries, *e)
}
}
func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) {
e := &TreeEntry{}
path, err := binary.ReadUntil(d.r, '\x00')
if err != nil {
return nil, err
}
e.Path = string(path)
count, err := binary.ReadUntil(d.r, ' ')
if err != nil {
return nil, err
}
i, err := strconv.Atoi(string(count))
if err != nil {
return nil, err
}
// An entry can be in an invalidated state and is represented by having a
// negative number in the entry_count field.
if i == -1 {
return nil, nil
}
e.Entries = i
trees, err := binary.ReadUntil(d.r, '\n')
if err != nil {
return nil, err
}
i, err = strconv.Atoi(string(trees))
if err != nil {
return nil, err
}
e.Trees = i
if err := binary.Read(d.r, &e.Hash); err != nil {
return nil, err
}
return e, nil
}
type resolveUndoDecoder struct {
r io.Reader
}
func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error {
for {
e, err := d.readEntry()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
ru.Entries = append(ru.Entries, *e)
}
}
func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) {
e := &ResolveUndoEntry{
Stages: make(map[Stage]plumbing.Hash),
}
path, err := binary.ReadUntil(d.r, '\x00')
if err != nil {
return nil, err
}
e.Path = string(path)
for i := 0; i < 3; i++ {
if err := d.readStage(e, Stage(i+1)); err != nil {
return nil, err
}
}
for s := range e.Stages {
var hash plumbing.Hash
if err := binary.Read(d.r, hash[:]); err != nil {
return nil, err
}
e.Stages[s] = hash
}
return e, nil
}
func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error {
ascii, err := binary.ReadUntil(d.r, '\x00')
if err != nil {
return err
}
stage, err := strconv.ParseInt(string(ascii), 8, 64)
if err != nil {
return err
}
if stage != 0 {
e.Stages[s] = plumbing.ZeroHash
}
return nil
}
type endOfIndexEntryDecoder struct {
r io.Reader
}
func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
var err error
e.Offset, err = binary.ReadUint32(d.r)
if err != nil {
return err
}
return binary.Read(d.r, &e.Hash)
}

View File

@ -0,0 +1,360 @@
// Package index implements encoding and decoding of index format files.
//
// Git index format
// ================
//
// == The Git index file has the following format
//
// All binary numbers are in network byte order. Version 2 is described
// here unless stated otherwise.
//
// - A 12-byte header consisting of
//
// 4-byte signature:
// The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache")
//
// 4-byte version number:
// The current supported versions are 2, 3 and 4.
//
// 32-bit number of index entries.
//
// - A number of sorted index entries (see below).
//
// - Extensions
//
// Extensions are identified by signature. Optional extensions can
// be ignored if Git does not understand them.
//
// Git currently supports cached tree and resolve undo extensions.
//
// 4-byte extension signature. If the first byte is 'A'..'Z' the
// extension is optional and can be ignored.
//
// 32-bit size of the extension
//
// Extension data
//
// - 160-bit SHA-1 over the content of the index file before this
// checksum.
//
// == Index entry
//
// Index entries are sorted in ascending order on the name field,
// interpreted as a string of unsigned bytes (i.e. memcmp() order, no
// localization, no special casing of directory separator '/'). Entries
// with the same name are sorted by their stage field.
//
// 32-bit ctime seconds, the last time a file's metadata changed
// this is stat(2) data
//
// 32-bit ctime nanosecond fractions
// this is stat(2) data
//
// 32-bit mtime seconds, the last time a file's data changed
// this is stat(2) data
//
// 32-bit mtime nanosecond fractions
// this is stat(2) data
//
// 32-bit dev
// this is stat(2) data
//
// 32-bit ino
// this is stat(2) data
//
// 32-bit mode, split into (high to low bits)
//
// 4-bit object type
// valid values in binary are 1000 (regular file), 1010 (symbolic link)
// and 1110 (gitlink)
//
// 3-bit unused
//
// 9-bit unix permission. Only 0755 and 0644 are valid for regular files.
// Symbolic links and gitlinks have value 0 in this field.
//
// 32-bit uid
// this is stat(2) data
//
// 32-bit gid
// this is stat(2) data
//
// 32-bit file size
// This is the on-disk size from stat(2), truncated to 32-bit.
//
// 160-bit SHA-1 for the represented object
//
// A 16-bit 'flags' field split into (high to low bits)
//
// 1-bit assume-valid flag
//
// 1-bit extended flag (must be zero in version 2)
//
// 2-bit stage (during merge)
//
// 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF
// is stored in this field.
//
// (Version 3 or later) A 16-bit field, only applicable if the
// "extended flag" above is 1, split into (high to low bits).
//
// 1-bit reserved for future
//
// 1-bit skip-worktree flag (used by sparse checkout)
//
// 1-bit intent-to-add flag (used by "git add -N")
//
// 13-bit unused, must be zero
//
// Entry path name (variable length) relative to top level directory
// (without leading slash). '/' is used as path separator. The special
// path components ".", ".." and ".git" (without quotes) are disallowed.
// Trailing slash is also disallowed.
//
// The exact encoding is undefined, but the '.' and '/' characters
// are encoded in 7-bit ASCII and the encoding cannot contain a NUL
// byte (iow, this is a UNIX pathname).
//
// (Version 4) In version 4, the entry path name is prefix-compressed
// relative to the path name for the previous entry (the very first
// entry is encoded as if the path name for the previous entry is an
// empty string). At the beginning of an entry, an integer N in the
// variable width encoding (the same encoding as the offset is encoded
// for OFS_DELTA pack entries; see pack-format.txt) is stored, followed
// by a NUL-terminated string S. Removing N bytes from the end of the
// path name for the previous entry, and replacing it with the string S
// yields the path name for this entry.
//
// 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes
// while keeping the name NUL-terminated.
//
// (Version 4) In version 4, the padding after the pathname does not
// exist.
//
// Interpretation of index entries in split index mode is completely
// different. See below for details.
//
// == Extensions
//
// === Cached tree
//
// Cached tree extension contains pre-computed hashes for trees that can
// be derived from the index. It helps speed up tree object generation
// from index for a new commit.
//
// When a path is updated in index, the path must be invalidated and
// removed from tree cache.
//
// The signature for this extension is { 'T', 'R', 'E', 'E' }.
//
// A series of entries fill the entire extension; each of which
// consists of:
//
// - NUL-terminated path component (relative to its parent directory);
//
// - ASCII decimal number of entries in the index that is covered by the
// tree this entry represents (entry_count);
//
// - A space (ASCII 32);
//
// - ASCII decimal number that represents the number of subtrees this
// tree has;
//
// - A newline (ASCII 10); and
//
// - 160-bit object name for the object that would result from writing
// this span of index as a tree.
//
// An entry can be in an invalidated state and is represented by having
// a negative number in the entry_count field. In this case, there is no
// object name and the next entry starts immediately after the newline.
// When writing an invalid entry, -1 should always be used as entry_count.
//
// The entries are written out in the top-down, depth-first order. The
// first entry represents the root level of the repository, followed by the
// first subtree--let's call this A--of the root level (with its name
// relative to the root level), followed by the first subtree of A (with
// its name relative to A), ...
//
// === Resolve undo
//
// A conflict is represented in the index as a set of higher stage entries.
// When a conflict is resolved (e.g. with "git add path"), these higher
// stage entries will be removed and a stage-0 entry with proper resolution
// is added.
//
// When these higher stage entries are removed, they are saved in the
// resolve undo extension, so that conflicts can be recreated (e.g. with
// "git checkout -m"), in case users want to redo a conflict resolution
// from scratch.
//
// The signature for this extension is { 'R', 'E', 'U', 'C' }.
//
// A series of entries fill the entire extension; each of which
// consists of:
//
// - NUL-terminated pathname the entry describes (relative to the root of
// the repository, i.e. full pathname);
//
// - Three NUL-terminated ASCII octal numbers, entry mode of entries in
// stage 1 to 3 (a missing stage is represented by "0" in this field);
// and
//
// - At most three 160-bit object names of the entry in stages from 1 to 3
// (nothing is written for a missing stage).
//
// === Split index
//
// In split index mode, the majority of index entries could be stored
// in a separate file. This extension records the changes to be made on
// top of that to produce the final index.
//
// The signature for this extension is { 'l', 'i', 'n', 'k' }.
//
// The extension consists of:
//
// - 160-bit SHA-1 of the shared index file. The shared index file path
// is $GIT_DIR/sharedindex.<SHA-1>. If all 160 bits are zero, the
// index does not require a shared index file.
//
// - An ewah-encoded delete bitmap, each bit represents an entry in the
// shared index. If a bit is set, its corresponding entry in the
// shared index will be removed from the final index. Note, because
// a delete operation changes index entry positions, but we do need
// original positions in replace phase, it's best to just mark
// entries for removal, then do a mass deletion after replacement.
//
// - An ewah-encoded replace bitmap, each bit represents an entry in
// the shared index. If a bit is set, its corresponding entry in the
// shared index will be replaced with an entry in this index
// file. All replaced entries are stored in sorted order in this
// index. The first "1" bit in the replace bitmap corresponds to the
// first index entry, the second "1" bit to the second entry and so
// on. Replaced entries may have empty path names to save space.
//
// The remaining index entries after replaced ones will be added to the
// final index. These added entries are also sorted by entry name then
// stage.
//
// == Untracked cache
//
// Untracked cache saves the untracked file list and necessary data to
// verify the cache. The signature for this extension is { 'U', 'N',
// 'T', 'R' }.
//
// The extension starts with
//
// - A sequence of NUL-terminated strings, preceded by the size of the
// sequence in variable width encoding. Each string describes the
// environment where the cache can be used.
//
// - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from
// ctime field until "file size".
//
// - Stat data of plumbing.excludesfile
//
// - 32-bit dir_flags (see struct dir_struct)
//
// - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file
// does not exist.
//
// - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does
// not exist.
//
// - NUL-terminated string of per-dir exclude file name. This usually
// is ".gitignore".
//
// - The number of following directory blocks, variable width
// encoding. If this number is zero, the extension ends here with a
// following NUL.
//
// - A number of directory blocks in depth-first-search order, each
// consists of
//
// - The number of untracked entries, variable width encoding.
//
// - The number of sub-directory blocks, variable width encoding.
//
// - The directory name terminated by NUL.
//
// - A number of untracked file/dir names terminated by NUL.
//
// The remaining data of each directory block is grouped by type:
//
// - An ewah bitmap, the n-th bit marks whether the n-th directory has
// valid untracked cache entries.
//
// - An ewah bitmap, the n-th bit records "check-only" bit of
// read_directory_recursive() for the n-th directory.
//
// - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data
// is valid for the n-th directory and exists in the next data.
//
// - An array of stat data. The n-th data corresponds with the n-th
// "one" bit in the previous ewah bitmap.
//
// - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit
// in the previous ewah bitmap.
//
// - One NUL.
//
// == File System Monitor cache
//
// The file system monitor cache tracks files for which the core.fsmonitor
// hook has told us about changes. The signature for this extension is
// { 'F', 'S', 'M', 'N' }.
//
// The extension starts with
//
// - 32-bit version number: the current supported version is 1.
//
// - 64-bit time: the extension data reflects all changes through the given
// time which is stored as the nanoseconds elapsed since midnight,
// January 1, 1970.
//
// - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap.
//
// - An ewah bitmap, the n-th bit indicates whether the n-th index entry
// is not CE_FSMONITOR_VALID.
//
// == End of Index Entry
//
// The End of Index Entry (EOIE) is used to locate the end of the variable
// length index entries and the begining of the extensions. Code can take
// advantage of this to quickly locate the index extensions without having
// to parse through all of the index entries.
//
// Because it must be able to be loaded before the variable length cache
// entries and other index extensions, this extension must be written last.
// The signature for this extension is { 'E', 'O', 'I', 'E' }.
//
// The extension consists of:
//
// - 32-bit offset to the end of the index entries
//
// - 160-bit SHA-1 over the extension types and their sizes (but not
// their contents). E.g. if we have "TREE" extension that is N-bytes
// long, "REUC" extension that is M-bytes long, followed by "EOIE",
// then the hash would be:
//
// SHA-1("TREE" + <binary representation of N> +
// "REUC" + <binary representation of M>)
//
// == Index Entry Offset Table
//
// The Index Entry Offset Table (IEOT) is used to help address the CPU
// cost of loading the index by enabling multi-threading the process of
// converting cache entries from the on-disk format to the in-memory format.
// The signature for this extension is { 'I', 'E', 'O', 'T' }.
//
// The extension consists of:
//
// - 32-bit version (currently 1)
//
// - A number of index offset entries each consisting of:
//
// - 32-bit offset from the begining of the file to the first cache entry
// in this block of entries.
//
// - 32-bit count of cache entries in this blockpackage index
package index

View File

@ -0,0 +1,150 @@
package index
import (
"bytes"
"crypto/sha1"
"errors"
"hash"
"io"
"sort"
"time"
"gopkg.in/src-d/go-git.v4/utils/binary"
)
var (
// EncodeVersionSupported is the range of supported index versions
EncodeVersionSupported uint32 = 2
// ErrInvalidTimestamp is returned by Encode if a Index with a Entry with
// negative timestamp values
ErrInvalidTimestamp = errors.New("negative timestamps are not allowed")
)
// An Encoder writes an Index to an output stream.
type Encoder struct {
w io.Writer
hash hash.Hash
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
}
// Encode writes the Index to the stream of the encoder.
func (e *Encoder) Encode(idx *Index) error {
// TODO: support versions v3 and v4
// TODO: support extensions
if idx.Version != EncodeVersionSupported {
return ErrUnsupportedVersion
}
if err := e.encodeHeader(idx); err != nil {
return err
}
if err := e.encodeEntries(idx); err != nil {
return err
}
return e.encodeFooter()
}
func (e *Encoder) encodeHeader(idx *Index) error {
return binary.Write(e.w,
indexSignature,
idx.Version,
uint32(len(idx.Entries)),
)
}
func (e *Encoder) encodeEntries(idx *Index) error {
sort.Sort(byName(idx.Entries))
for _, entry := range idx.Entries {
if err := e.encodeEntry(entry); err != nil {
return err
}
wrote := entryHeaderLength + len(entry.Name)
if err := e.padEntry(wrote); err != nil {
return err
}
}
return nil
}
func (e *Encoder) encodeEntry(entry *Entry) error {
if entry.IntentToAdd || entry.SkipWorktree {
return ErrUnsupportedVersion
}
sec, nsec, err := e.timeToUint32(&entry.CreatedAt)
if err != nil {
return err
}
msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt)
if err != nil {
return err
}
flags := uint16(entry.Stage&0x3) << 12
if l := len(entry.Name); l < nameMask {
flags |= uint16(l)
} else {
flags |= nameMask
}
flow := []interface{}{
sec, nsec,
msec, mnsec,
entry.Dev,
entry.Inode,
entry.Mode,
entry.UID,
entry.GID,
entry.Size,
entry.Hash[:],
flags,
}
if err := binary.Write(e.w, flow...); err != nil {
return err
}
return binary.Write(e.w, []byte(entry.Name))
}
func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) {
if t.IsZero() {
return 0, 0, nil
}
if t.Unix() < 0 || t.UnixNano() < 0 {
return 0, 0, ErrInvalidTimestamp
}
return uint32(t.Unix()), uint32(t.Nanosecond()), nil
}
func (e *Encoder) padEntry(wrote int) error {
padLen := 8 - wrote%8
_, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen))
return err
}
func (e *Encoder) encodeFooter() error {
return binary.Write(e.w, e.hash.Sum(nil))
}
type byName []*Entry
func (l byName) Len() int { return len(l) }
func (l byName) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l byName) Less(i, j int) bool { return l[i].Name < l[j].Name }

View File

@ -0,0 +1,213 @@
package index
import (
"bytes"
"errors"
"fmt"
"path/filepath"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/filemode"
)
var (
// ErrUnsupportedVersion is returned by Decode when the index file version
// is not supported.
ErrUnsupportedVersion = errors.New("unsupported version")
// ErrEntryNotFound is returned by Index.Entry, if an entry is not found.
ErrEntryNotFound = errors.New("entry not found")
indexSignature = []byte{'D', 'I', 'R', 'C'}
treeExtSignature = []byte{'T', 'R', 'E', 'E'}
resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'}
endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'}
)
// Stage during merge
type Stage int
const (
// Merged is the default stage, fully merged
Merged Stage = 1
// AncestorMode is the base revision
AncestorMode Stage = 1
// OurMode is the first tree revision, ours
OurMode Stage = 2
// TheirMode is the second tree revision, theirs
TheirMode Stage = 3
)
// Index contains the information about which objects are currently checked out
// in the worktree, having information about the working files. Changes in
// worktree are detected using this Index. The Index is also used during merges
type Index struct {
// Version is index version
Version uint32
// Entries collection of entries represented by this Index. The order of
// this collection is not guaranteed
Entries []*Entry
// Cache represents the 'Cached tree' extension
Cache *Tree
// ResolveUndo represents the 'Resolve undo' extension
ResolveUndo *ResolveUndo
// EndOfIndexEntry represents the 'End of Index Entry' extension
EndOfIndexEntry *EndOfIndexEntry
}
// Add creates a new Entry and returns it. The caller should first check that
// another entry with the same path does not exist.
func (i *Index) Add(path string) *Entry {
e := &Entry{
Name: filepath.ToSlash(path),
}
i.Entries = append(i.Entries, e)
return e
}
// Entry returns the entry that match the given path, if any.
func (i *Index) Entry(path string) (*Entry, error) {
path = filepath.ToSlash(path)
for _, e := range i.Entries {
if e.Name == path {
return e, nil
}
}
return nil, ErrEntryNotFound
}
// Remove remove the entry that match the give path and returns deleted entry.
func (i *Index) Remove(path string) (*Entry, error) {
path = filepath.ToSlash(path)
for index, e := range i.Entries {
if e.Name == path {
i.Entries = append(i.Entries[:index], i.Entries[index+1:]...)
return e, nil
}
}
return nil, ErrEntryNotFound
}
// Glob returns the all entries matching pattern or nil if there is no matching
// entry. The syntax of patterns is the same as in filepath.Glob.
func (i *Index) Glob(pattern string) (matches []*Entry, err error) {
pattern = filepath.ToSlash(pattern)
for _, e := range i.Entries {
m, err := match(pattern, e.Name)
if err != nil {
return nil, err
}
if m {
matches = append(matches, e)
}
}
return
}
// String is equivalent to `git ls-files --stage --debug`
func (i *Index) String() string {
buf := bytes.NewBuffer(nil)
for _, e := range i.Entries {
buf.WriteString(e.String())
}
return buf.String()
}
// Entry represents a single file (or stage of a file) in the cache. An entry
// represents exactly one stage of a file. If a file path is unmerged then
// multiple Entry instances may appear for the same path name.
type Entry struct {
// Hash is the SHA1 of the represented file
Hash plumbing.Hash
// Name is the Entry path name relative to top level directory
Name string
// CreatedAt time when the tracked path was created
CreatedAt time.Time
// ModifiedAt time when the tracked path was changed
ModifiedAt time.Time
// Dev and Inode of the tracked path
Dev, Inode uint32
// Mode of the path
Mode filemode.FileMode
// UID and GID, userid and group id of the owner
UID, GID uint32
// Size is the length in bytes for regular files
Size uint32
// Stage on a merge is defines what stage is representing this entry
// https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging
Stage Stage
// SkipWorktree used in sparse checkouts
// https://git-scm.com/docs/git-read-tree#_sparse_checkout
SkipWorktree bool
// IntentToAdd record only the fact that the path will be added later
// https://git-scm.com/docs/git-add ("git add -N")
IntentToAdd bool
}
func (e Entry) String() string {
buf := bytes.NewBuffer(nil)
fmt.Fprintf(buf, "%06o %s %d\t%s\n", e.Mode, e.Hash, e.Stage, e.Name)
fmt.Fprintf(buf, " ctime: %d:%d\n", e.CreatedAt.Unix(), e.CreatedAt.Nanosecond())
fmt.Fprintf(buf, " mtime: %d:%d\n", e.ModifiedAt.Unix(), e.ModifiedAt.Nanosecond())
fmt.Fprintf(buf, " dev: %d\tino: %d\n", e.Dev, e.Inode)
fmt.Fprintf(buf, " uid: %d\tgid: %d\n", e.UID, e.GID)
fmt.Fprintf(buf, " size: %d\tflags: %x\n", e.Size, 0)
return buf.String()
}
// Tree contains pre-computed hashes for trees that can be derived from the
// index. It helps speed up tree object generation from index for a new commit.
type Tree struct {
Entries []TreeEntry
}
// TreeEntry entry of a cached Tree
type TreeEntry struct {
// Path component (relative to its parent directory)
Path string
// Entries is the number of entries in the index that is covered by the tree
// this entry represents.
Entries int
// Trees is the number that represents the number of subtrees this tree has
Trees int
// Hash object name for the object that would result from writing this span
// of index as a tree.
Hash plumbing.Hash
}
// ResolveUndo is used when a conflict is resolved (e.g. with "git add path"),
// these higher stage entries are removed and a stage-0 entry with proper
// resolution is added. When these higher stage entries are removed, they are
// saved in the resolve undo extension.
type ResolveUndo struct {
Entries []ResolveUndoEntry
}
// ResolveUndoEntry contains the information about a conflict when is resolved
type ResolveUndoEntry struct {
Path string
Stages map[Stage]plumbing.Hash
}
// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of
// the variable length index entries and the begining of the extensions. Code
// can take advantage of this to quickly locate the index extensions without
// having to parse through all of the index entries.
//
// Because it must be able to be loaded before the variable length cache
// entries and other index extensions, this extension must be written last.
type EndOfIndexEntry struct {
// Offset to the end of the index entries
Offset uint32
// Hash is a SHA-1 over the extension types and their sizes (but not
// their contents).
Hash plumbing.Hash
}

View File

@ -0,0 +1,186 @@
package index
import (
"path/filepath"
"runtime"
"unicode/utf8"
)
// match is filepath.Match with support to match fullpath and not only filenames
// code from:
// https://github.com/golang/go/blob/39852bf4cce6927e01d0136c7843f65a801738cb/src/path/filepath/match.go#L44-L224
func match(pattern, name string) (matched bool, err error) {
Pattern:
for len(pattern) > 0 {
var star bool
var chunk string
star, chunk, pattern = scanChunk(pattern)
// Look for match at current position.
t, ok, err := matchChunk(chunk, name)
// if we're the last chunk, make sure we've exhausted the name
// otherwise we'll give a false result even if we could still match
// using the star
if ok && (len(t) == 0 || len(pattern) > 0) {
name = t
continue
}
if err != nil {
return false, err
}
if star {
// Look for match skipping i+1 bytes.
// Cannot skip /.
for i := 0; i < len(name); i++ {
t, ok, err := matchChunk(chunk, name[i+1:])
if ok {
// if we're the last chunk, make sure we exhausted the name
if len(pattern) == 0 && len(t) > 0 {
continue
}
name = t
continue Pattern
}
if err != nil {
return false, err
}
}
}
return false, nil
}
return len(name) == 0, nil
}
// scanChunk gets the next segment of pattern, which is a non-star string
// possibly preceded by a star.
func scanChunk(pattern string) (star bool, chunk, rest string) {
for len(pattern) > 0 && pattern[0] == '*' {
pattern = pattern[1:]
star = true
}
inrange := false
var i int
Scan:
for i = 0; i < len(pattern); i++ {
switch pattern[i] {
case '\\':
if runtime.GOOS != "windows" {
// error check handled in matchChunk: bad pattern.
if i+1 < len(pattern) {
i++
}
}
case '[':
inrange = true
case ']':
inrange = false
case '*':
if !inrange {
break Scan
}
}
}
return star, pattern[0:i], pattern[i:]
}
// matchChunk checks whether chunk matches the beginning of s.
// If so, it returns the remainder of s (after the match).
// Chunk is all single-character operators: literals, char classes, and ?.
func matchChunk(chunk, s string) (rest string, ok bool, err error) {
for len(chunk) > 0 {
if len(s) == 0 {
return
}
switch chunk[0] {
case '[':
// character class
r, n := utf8.DecodeRuneInString(s)
s = s[n:]
chunk = chunk[1:]
// We can't end right after '[', we're expecting at least
// a closing bracket and possibly a caret.
if len(chunk) == 0 {
err = filepath.ErrBadPattern
return
}
// possibly negated
negated := chunk[0] == '^'
if negated {
chunk = chunk[1:]
}
// parse all ranges
match := false
nrange := 0
for {
if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
chunk = chunk[1:]
break
}
var lo, hi rune
if lo, chunk, err = getEsc(chunk); err != nil {
return
}
hi = lo
if chunk[0] == '-' {
if hi, chunk, err = getEsc(chunk[1:]); err != nil {
return
}
}
if lo <= r && r <= hi {
match = true
}
nrange++
}
if match == negated {
return
}
case '?':
_, n := utf8.DecodeRuneInString(s)
s = s[n:]
chunk = chunk[1:]
case '\\':
if runtime.GOOS != "windows" {
chunk = chunk[1:]
if len(chunk) == 0 {
err = filepath.ErrBadPattern
return
}
}
fallthrough
default:
if chunk[0] != s[0] {
return
}
s = s[1:]
chunk = chunk[1:]
}
}
return s, true, nil
}
// getEsc gets a possibly-escaped character from chunk, for a character class.
func getEsc(chunk string) (r rune, nchunk string, err error) {
if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
err = filepath.ErrBadPattern
return
}
if chunk[0] == '\\' && runtime.GOOS != "windows" {
chunk = chunk[1:]
if len(chunk) == 0 {
err = filepath.ErrBadPattern
return
}
}
r, n := utf8.DecodeRuneInString(chunk)
if r == utf8.RuneError && n == 1 {
err = filepath.ErrBadPattern
}
nchunk = chunk[n:]
if len(nchunk) == 0 {
err = filepath.ErrBadPattern
}
return
}