feat(dir2config): defaults
This commit is contained in:
68
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go
generated
vendored
Normal file
68
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
package packfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/storer"
|
||||
"gopkg.in/src-d/go-git.v4/utils/ioutil"
|
||||
)
|
||||
|
||||
var signature = []byte{'P', 'A', 'C', 'K'}
|
||||
|
||||
const (
|
||||
// VersionSupported is the packfile version supported by this package
|
||||
VersionSupported uint32 = 2
|
||||
|
||||
firstLengthBits = uint8(4) // the first byte into object header has 4 bits to store the length
|
||||
lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length
|
||||
maskFirstLength = 15 // 0000 1111
|
||||
maskContinue = 0x80 // 1000 0000
|
||||
maskLength = uint8(127) // 0111 1111
|
||||
maskType = uint8(112) // 0111 0000
|
||||
)
|
||||
|
||||
// UpdateObjectStorage updates the storer with the objects in the given
|
||||
// packfile.
|
||||
func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error {
|
||||
if pw, ok := s.(storer.PackfileWriter); ok {
|
||||
return WritePackfileToObjectStorage(pw, packfile)
|
||||
}
|
||||
|
||||
p, err := NewParserWithStorage(NewScanner(packfile), s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = p.Parse()
|
||||
return err
|
||||
}
|
||||
|
||||
// WritePackfileToObjectStorage writes all the packfile objects into the given
|
||||
// object storage.
|
||||
func WritePackfileToObjectStorage(
|
||||
sw storer.PackfileWriter,
|
||||
packfile io.Reader,
|
||||
) (err error) {
|
||||
w, err := sw.PackfileWriter()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(w, &err)
|
||||
|
||||
var n int64
|
||||
n, err = io.Copy(w, packfile)
|
||||
if err == nil && n == 0 {
|
||||
return ErrEmptyPackfile
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return bytes.NewBuffer(nil)
|
||||
},
|
||||
}
|
297
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_index.go
generated
vendored
Normal file
297
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_index.go
generated
vendored
Normal file
@ -0,0 +1,297 @@
|
||||
package packfile
|
||||
|
||||
const blksz = 16
|
||||
const maxChainLength = 64
|
||||
|
||||
// deltaIndex is a modified version of JGit's DeltaIndex adapted to our current
|
||||
// design.
|
||||
type deltaIndex struct {
|
||||
table []int
|
||||
entries []int
|
||||
mask int
|
||||
}
|
||||
|
||||
func (idx *deltaIndex) init(buf []byte) {
|
||||
scanner := newDeltaIndexScanner(buf, len(buf))
|
||||
idx.mask = scanner.mask
|
||||
idx.table = scanner.table
|
||||
idx.entries = make([]int, countEntries(scanner)+1)
|
||||
idx.copyEntries(scanner)
|
||||
}
|
||||
|
||||
// findMatch returns the offset of src where the block starting at tgtOffset
|
||||
// is and the length of the match. A length of 0 means there was no match. A
|
||||
// length of -1 means the src length is lower than the blksz and whatever
|
||||
// other positive length is the length of the match in bytes.
|
||||
func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l int) {
|
||||
if len(tgt) < tgtOffset+s {
|
||||
return 0, len(tgt) - tgtOffset
|
||||
}
|
||||
|
||||
if len(src) < blksz {
|
||||
return 0, -1
|
||||
}
|
||||
|
||||
if len(tgt) >= tgtOffset+s && len(src) >= blksz {
|
||||
h := hashBlock(tgt, tgtOffset)
|
||||
tIdx := h & idx.mask
|
||||
eIdx := idx.table[tIdx]
|
||||
if eIdx != 0 {
|
||||
srcOffset = idx.entries[eIdx]
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
l = matchLength(src, tgt, tgtOffset, srcOffset)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func matchLength(src, tgt []byte, otgt, osrc int) (l int) {
|
||||
lensrc := len(src)
|
||||
lentgt := len(tgt)
|
||||
for (osrc < lensrc && otgt < lentgt) && src[osrc] == tgt[otgt] {
|
||||
l++
|
||||
osrc++
|
||||
otgt++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func countEntries(scan *deltaIndexScanner) (cnt int) {
|
||||
// Figure out exactly how many entries we need. As we do the
|
||||
// enumeration truncate any delta chains longer than what we
|
||||
// are willing to scan during encode. This keeps the encode
|
||||
// logic linear in the size of the input rather than quadratic.
|
||||
for i := 0; i < len(scan.table); i++ {
|
||||
h := scan.table[i]
|
||||
if h == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
size := 0
|
||||
for {
|
||||
size++
|
||||
if size == maxChainLength {
|
||||
scan.next[h] = 0
|
||||
break
|
||||
}
|
||||
h = scan.next[h]
|
||||
|
||||
if h == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
cnt += size
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (idx *deltaIndex) copyEntries(scanner *deltaIndexScanner) {
|
||||
// Rebuild the entries list from the scanner, positioning all
|
||||
// blocks in the same hash chain next to each other. We can
|
||||
// then later discard the next list, along with the scanner.
|
||||
//
|
||||
next := 1
|
||||
for i := 0; i < len(idx.table); i++ {
|
||||
h := idx.table[i]
|
||||
if h == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
idx.table[i] = next
|
||||
for {
|
||||
idx.entries[next] = scanner.entries[h]
|
||||
next++
|
||||
h = scanner.next[h]
|
||||
|
||||
if h == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type deltaIndexScanner struct {
|
||||
table []int
|
||||
entries []int
|
||||
next []int
|
||||
mask int
|
||||
count int
|
||||
}
|
||||
|
||||
func newDeltaIndexScanner(buf []byte, size int) *deltaIndexScanner {
|
||||
size -= size % blksz
|
||||
worstCaseBlockCnt := size / blksz
|
||||
if worstCaseBlockCnt < 1 {
|
||||
return new(deltaIndexScanner)
|
||||
}
|
||||
|
||||
tableSize := tableSize(worstCaseBlockCnt)
|
||||
scanner := &deltaIndexScanner{
|
||||
table: make([]int, tableSize),
|
||||
mask: tableSize - 1,
|
||||
entries: make([]int, worstCaseBlockCnt+1),
|
||||
next: make([]int, worstCaseBlockCnt+1),
|
||||
}
|
||||
|
||||
scanner.scan(buf, size)
|
||||
return scanner
|
||||
}
|
||||
|
||||
// slightly modified version of JGit's DeltaIndexScanner. We store the offset on the entries
|
||||
// instead of the entries and the key, so we avoid operations to retrieve the offset later, as
|
||||
// we don't use the key.
|
||||
// See: https://github.com/eclipse/jgit/blob/005e5feb4ecd08c4e4d141a38b9e7942accb3212/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/pack/DeltaIndexScanner.java
|
||||
func (s *deltaIndexScanner) scan(buf []byte, end int) {
|
||||
lastHash := 0
|
||||
ptr := end - blksz
|
||||
|
||||
for {
|
||||
key := hashBlock(buf, ptr)
|
||||
tIdx := key & s.mask
|
||||
head := s.table[tIdx]
|
||||
if head != 0 && lastHash == key {
|
||||
s.entries[head] = ptr
|
||||
} else {
|
||||
s.count++
|
||||
eIdx := s.count
|
||||
s.entries[eIdx] = ptr
|
||||
s.next[eIdx] = head
|
||||
s.table[tIdx] = eIdx
|
||||
}
|
||||
|
||||
lastHash = key
|
||||
ptr -= blksz
|
||||
|
||||
if 0 > ptr {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func tableSize(worstCaseBlockCnt int) int {
|
||||
shift := 32 - leadingZeros(uint32(worstCaseBlockCnt))
|
||||
sz := 1 << uint(shift-1)
|
||||
if sz < worstCaseBlockCnt {
|
||||
sz <<= 1
|
||||
}
|
||||
return sz
|
||||
}
|
||||
|
||||
// use https://golang.org/pkg/math/bits/#LeadingZeros32 in the future
|
||||
func leadingZeros(x uint32) (n int) {
|
||||
if x >= 1<<16 {
|
||||
x >>= 16
|
||||
n = 16
|
||||
}
|
||||
if x >= 1<<8 {
|
||||
x >>= 8
|
||||
n += 8
|
||||
}
|
||||
n += int(len8tab[x])
|
||||
return 32 - n
|
||||
}
|
||||
|
||||
var len8tab = [256]uint8{
|
||||
0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04,
|
||||
0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
|
||||
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
|
||||
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06,
|
||||
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
|
||||
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
|
||||
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
|
||||
0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
|
||||
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
|
||||
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
|
||||
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
|
||||
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
|
||||
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
|
||||
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
|
||||
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
|
||||
0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
|
||||
}
|
||||
|
||||
func hashBlock(raw []byte, ptr int) int {
|
||||
// The first 4 steps collapse out into a 4 byte big-endian decode,
|
||||
// with a larger right shift as we combined shift lefts together.
|
||||
//
|
||||
hash := ((uint32(raw[ptr]) & 0xff) << 24) |
|
||||
((uint32(raw[ptr+1]) & 0xff) << 16) |
|
||||
((uint32(raw[ptr+2]) & 0xff) << 8) |
|
||||
(uint32(raw[ptr+3]) & 0xff)
|
||||
hash ^= T[hash>>31]
|
||||
|
||||
hash = ((hash << 8) | (uint32(raw[ptr+4]) & 0xff)) ^ T[hash>>23]
|
||||
hash = ((hash << 8) | (uint32(raw[ptr+5]) & 0xff)) ^ T[hash>>23]
|
||||
hash = ((hash << 8) | (uint32(raw[ptr+6]) & 0xff)) ^ T[hash>>23]
|
||||
hash = ((hash << 8) | (uint32(raw[ptr+7]) & 0xff)) ^ T[hash>>23]
|
||||
|
||||
hash = ((hash << 8) | (uint32(raw[ptr+8]) & 0xff)) ^ T[hash>>23]
|
||||
hash = ((hash << 8) | (uint32(raw[ptr+9]) & 0xff)) ^ T[hash>>23]
|
||||
hash = ((hash << 8) | (uint32(raw[ptr+10]) & 0xff)) ^ T[hash>>23]
|
||||
hash = ((hash << 8) | (uint32(raw[ptr+11]) & 0xff)) ^ T[hash>>23]
|
||||
|
||||
hash = ((hash << 8) | (uint32(raw[ptr+12]) & 0xff)) ^ T[hash>>23]
|
||||
hash = ((hash << 8) | (uint32(raw[ptr+13]) & 0xff)) ^ T[hash>>23]
|
||||
hash = ((hash << 8) | (uint32(raw[ptr+14]) & 0xff)) ^ T[hash>>23]
|
||||
hash = ((hash << 8) | (uint32(raw[ptr+15]) & 0xff)) ^ T[hash>>23]
|
||||
|
||||
return int(hash)
|
||||
}
|
||||
|
||||
var T = []uint32{0x00000000, 0xd4c6b32d, 0x7d4bd577,
|
||||
0xa98d665a, 0x2e5119c3, 0xfa97aaee, 0x531accb4, 0x87dc7f99,
|
||||
0x5ca23386, 0x886480ab, 0x21e9e6f1, 0xf52f55dc, 0x72f32a45,
|
||||
0xa6359968, 0x0fb8ff32, 0xdb7e4c1f, 0x6d82d421, 0xb944670c,
|
||||
0x10c90156, 0xc40fb27b, 0x43d3cde2, 0x97157ecf, 0x3e981895,
|
||||
0xea5eabb8, 0x3120e7a7, 0xe5e6548a, 0x4c6b32d0, 0x98ad81fd,
|
||||
0x1f71fe64, 0xcbb74d49, 0x623a2b13, 0xb6fc983e, 0x0fc31b6f,
|
||||
0xdb05a842, 0x7288ce18, 0xa64e7d35, 0x219202ac, 0xf554b181,
|
||||
0x5cd9d7db, 0x881f64f6, 0x536128e9, 0x87a79bc4, 0x2e2afd9e,
|
||||
0xfaec4eb3, 0x7d30312a, 0xa9f68207, 0x007be45d, 0xd4bd5770,
|
||||
0x6241cf4e, 0xb6877c63, 0x1f0a1a39, 0xcbcca914, 0x4c10d68d,
|
||||
0x98d665a0, 0x315b03fa, 0xe59db0d7, 0x3ee3fcc8, 0xea254fe5,
|
||||
0x43a829bf, 0x976e9a92, 0x10b2e50b, 0xc4745626, 0x6df9307c,
|
||||
0xb93f8351, 0x1f8636de, 0xcb4085f3, 0x62cde3a9, 0xb60b5084,
|
||||
0x31d72f1d, 0xe5119c30, 0x4c9cfa6a, 0x985a4947, 0x43240558,
|
||||
0x97e2b675, 0x3e6fd02f, 0xeaa96302, 0x6d751c9b, 0xb9b3afb6,
|
||||
0x103ec9ec, 0xc4f87ac1, 0x7204e2ff, 0xa6c251d2, 0x0f4f3788,
|
||||
0xdb8984a5, 0x5c55fb3c, 0x88934811, 0x211e2e4b, 0xf5d89d66,
|
||||
0x2ea6d179, 0xfa606254, 0x53ed040e, 0x872bb723, 0x00f7c8ba,
|
||||
0xd4317b97, 0x7dbc1dcd, 0xa97aaee0, 0x10452db1, 0xc4839e9c,
|
||||
0x6d0ef8c6, 0xb9c84beb, 0x3e143472, 0xead2875f, 0x435fe105,
|
||||
0x97995228, 0x4ce71e37, 0x9821ad1a, 0x31accb40, 0xe56a786d,
|
||||
0x62b607f4, 0xb670b4d9, 0x1ffdd283, 0xcb3b61ae, 0x7dc7f990,
|
||||
0xa9014abd, 0x008c2ce7, 0xd44a9fca, 0x5396e053, 0x8750537e,
|
||||
0x2edd3524, 0xfa1b8609, 0x2165ca16, 0xf5a3793b, 0x5c2e1f61,
|
||||
0x88e8ac4c, 0x0f34d3d5, 0xdbf260f8, 0x727f06a2, 0xa6b9b58f,
|
||||
0x3f0c6dbc, 0xebcade91, 0x4247b8cb, 0x96810be6, 0x115d747f,
|
||||
0xc59bc752, 0x6c16a108, 0xb8d01225, 0x63ae5e3a, 0xb768ed17,
|
||||
0x1ee58b4d, 0xca233860, 0x4dff47f9, 0x9939f4d4, 0x30b4928e,
|
||||
0xe47221a3, 0x528eb99d, 0x86480ab0, 0x2fc56cea, 0xfb03dfc7,
|
||||
0x7cdfa05e, 0xa8191373, 0x01947529, 0xd552c604, 0x0e2c8a1b,
|
||||
0xdaea3936, 0x73675f6c, 0xa7a1ec41, 0x207d93d8, 0xf4bb20f5,
|
||||
0x5d3646af, 0x89f0f582, 0x30cf76d3, 0xe409c5fe, 0x4d84a3a4,
|
||||
0x99421089, 0x1e9e6f10, 0xca58dc3d, 0x63d5ba67, 0xb713094a,
|
||||
0x6c6d4555, 0xb8abf678, 0x11269022, 0xc5e0230f, 0x423c5c96,
|
||||
0x96faefbb, 0x3f7789e1, 0xebb13acc, 0x5d4da2f2, 0x898b11df,
|
||||
0x20067785, 0xf4c0c4a8, 0x731cbb31, 0xa7da081c, 0x0e576e46,
|
||||
0xda91dd6b, 0x01ef9174, 0xd5292259, 0x7ca44403, 0xa862f72e,
|
||||
0x2fbe88b7, 0xfb783b9a, 0x52f55dc0, 0x8633eeed, 0x208a5b62,
|
||||
0xf44ce84f, 0x5dc18e15, 0x89073d38, 0x0edb42a1, 0xda1df18c,
|
||||
0x739097d6, 0xa75624fb, 0x7c2868e4, 0xa8eedbc9, 0x0163bd93,
|
||||
0xd5a50ebe, 0x52797127, 0x86bfc20a, 0x2f32a450, 0xfbf4177d,
|
||||
0x4d088f43, 0x99ce3c6e, 0x30435a34, 0xe485e919, 0x63599680,
|
||||
0xb79f25ad, 0x1e1243f7, 0xcad4f0da, 0x11aabcc5, 0xc56c0fe8,
|
||||
0x6ce169b2, 0xb827da9f, 0x3ffba506, 0xeb3d162b, 0x42b07071,
|
||||
0x9676c35c, 0x2f49400d, 0xfb8ff320, 0x5202957a, 0x86c42657,
|
||||
0x011859ce, 0xd5deeae3, 0x7c538cb9, 0xa8953f94, 0x73eb738b,
|
||||
0xa72dc0a6, 0x0ea0a6fc, 0xda6615d1, 0x5dba6a48, 0x897cd965,
|
||||
0x20f1bf3f, 0xf4370c12, 0x42cb942c, 0x960d2701, 0x3f80415b,
|
||||
0xeb46f276, 0x6c9a8def, 0xb85c3ec2, 0x11d15898, 0xc517ebb5,
|
||||
0x1e69a7aa, 0xcaaf1487, 0x632272dd, 0xb7e4c1f0, 0x3038be69,
|
||||
0xe4fe0d44, 0x4d736b1e, 0x99b5d833,
|
||||
}
|
369
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_selector.go
generated
vendored
Normal file
369
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_selector.go
generated
vendored
Normal file
@ -0,0 +1,369 @@
|
||||
package packfile
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/storer"
|
||||
)
|
||||
|
||||
const (
|
||||
// deltas based on deltas, how many steps we can do.
|
||||
// 50 is the default value used in JGit
|
||||
maxDepth = int64(50)
|
||||
)
|
||||
|
||||
// applyDelta is the set of object types that we should apply deltas
|
||||
var applyDelta = map[plumbing.ObjectType]bool{
|
||||
plumbing.BlobObject: true,
|
||||
plumbing.TreeObject: true,
|
||||
}
|
||||
|
||||
type deltaSelector struct {
|
||||
storer storer.EncodedObjectStorer
|
||||
}
|
||||
|
||||
func newDeltaSelector(s storer.EncodedObjectStorer) *deltaSelector {
|
||||
return &deltaSelector{s}
|
||||
}
|
||||
|
||||
// ObjectsToPack creates a list of ObjectToPack from the hashes
|
||||
// provided, creating deltas if it's suitable, using an specific
|
||||
// internal logic. `packWindow` specifies the size of the sliding
|
||||
// window used to compare objects for delta compression; 0 turns off
|
||||
// delta compression entirely.
|
||||
func (dw *deltaSelector) ObjectsToPack(
|
||||
hashes []plumbing.Hash,
|
||||
packWindow uint,
|
||||
) ([]*ObjectToPack, error) {
|
||||
otp, err := dw.objectsToPack(hashes, packWindow)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if packWindow == 0 {
|
||||
return otp, nil
|
||||
}
|
||||
|
||||
dw.sort(otp)
|
||||
|
||||
var objectGroups [][]*ObjectToPack
|
||||
var prev *ObjectToPack
|
||||
i := -1
|
||||
for _, obj := range otp {
|
||||
if prev == nil || prev.Type() != obj.Type() {
|
||||
objectGroups = append(objectGroups, []*ObjectToPack{obj})
|
||||
i++
|
||||
prev = obj
|
||||
} else {
|
||||
objectGroups[i] = append(objectGroups[i], obj)
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var once sync.Once
|
||||
for _, objs := range objectGroups {
|
||||
objs := objs
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
if walkErr := dw.walk(objs, packWindow); walkErr != nil {
|
||||
once.Do(func() {
|
||||
err = walkErr
|
||||
})
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return otp, nil
|
||||
}
|
||||
|
||||
func (dw *deltaSelector) objectsToPack(
|
||||
hashes []plumbing.Hash,
|
||||
packWindow uint,
|
||||
) ([]*ObjectToPack, error) {
|
||||
var objectsToPack []*ObjectToPack
|
||||
for _, h := range hashes {
|
||||
var o plumbing.EncodedObject
|
||||
var err error
|
||||
if packWindow == 0 {
|
||||
o, err = dw.encodedObject(h)
|
||||
} else {
|
||||
o, err = dw.encodedDeltaObject(h)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
otp := newObjectToPack(o)
|
||||
if _, ok := o.(plumbing.DeltaObject); ok {
|
||||
otp.CleanOriginal()
|
||||
}
|
||||
|
||||
objectsToPack = append(objectsToPack, otp)
|
||||
}
|
||||
|
||||
if packWindow == 0 {
|
||||
return objectsToPack, nil
|
||||
}
|
||||
|
||||
if err := dw.fixAndBreakChains(objectsToPack); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return objectsToPack, nil
|
||||
}
|
||||
|
||||
func (dw *deltaSelector) encodedDeltaObject(h plumbing.Hash) (plumbing.EncodedObject, error) {
|
||||
edos, ok := dw.storer.(storer.DeltaObjectStorer)
|
||||
if !ok {
|
||||
return dw.encodedObject(h)
|
||||
}
|
||||
|
||||
return edos.DeltaObject(plumbing.AnyObject, h)
|
||||
}
|
||||
|
||||
func (dw *deltaSelector) encodedObject(h plumbing.Hash) (plumbing.EncodedObject, error) {
|
||||
return dw.storer.EncodedObject(plumbing.AnyObject, h)
|
||||
}
|
||||
|
||||
func (dw *deltaSelector) fixAndBreakChains(objectsToPack []*ObjectToPack) error {
|
||||
m := make(map[plumbing.Hash]*ObjectToPack, len(objectsToPack))
|
||||
for _, otp := range objectsToPack {
|
||||
m[otp.Hash()] = otp
|
||||
}
|
||||
|
||||
for _, otp := range objectsToPack {
|
||||
if err := dw.fixAndBreakChainsOne(m, otp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dw *deltaSelector) fixAndBreakChainsOne(objectsToPack map[plumbing.Hash]*ObjectToPack, otp *ObjectToPack) error {
|
||||
if !otp.Object.Type().IsDelta() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initial ObjectToPack instances might have a delta assigned to Object
|
||||
// but no actual base initially. Once Base is assigned to a delta, it means
|
||||
// we already fixed it.
|
||||
if otp.Base != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
do, ok := otp.Object.(plumbing.DeltaObject)
|
||||
if !ok {
|
||||
// if this is not a DeltaObject, then we cannot retrieve its base,
|
||||
// so we have to break the delta chain here.
|
||||
return dw.undeltify(otp)
|
||||
}
|
||||
|
||||
base, ok := objectsToPack[do.BaseHash()]
|
||||
if !ok {
|
||||
// The base of the delta is not in our list of objects to pack, so
|
||||
// we break the chain.
|
||||
return dw.undeltify(otp)
|
||||
}
|
||||
|
||||
if err := dw.fixAndBreakChainsOne(objectsToPack, base); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otp.SetDelta(base, otp.Object)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dw *deltaSelector) restoreOriginal(otp *ObjectToPack) error {
|
||||
if otp.Original != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !otp.Object.Type().IsDelta() {
|
||||
return nil
|
||||
}
|
||||
|
||||
obj, err := dw.encodedObject(otp.Hash())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otp.SetOriginal(obj)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// undeltify undeltifies an *ObjectToPack by retrieving the original object from
|
||||
// the storer and resetting it.
|
||||
func (dw *deltaSelector) undeltify(otp *ObjectToPack) error {
|
||||
if err := dw.restoreOriginal(otp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otp.Object = otp.Original
|
||||
otp.Depth = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) {
|
||||
sort.Sort(byTypeAndSize(objectsToPack))
|
||||
}
|
||||
|
||||
func (dw *deltaSelector) walk(
|
||||
objectsToPack []*ObjectToPack,
|
||||
packWindow uint,
|
||||
) error {
|
||||
indexMap := make(map[plumbing.Hash]*deltaIndex)
|
||||
for i := 0; i < len(objectsToPack); i++ {
|
||||
// Clean up the index map and reconstructed delta objects for anything
|
||||
// outside our pack window, to save memory.
|
||||
if i > int(packWindow) {
|
||||
obj := objectsToPack[i-int(packWindow)]
|
||||
|
||||
delete(indexMap, obj.Hash())
|
||||
|
||||
if obj.IsDelta() {
|
||||
obj.SaveOriginalMetadata()
|
||||
obj.CleanOriginal()
|
||||
}
|
||||
}
|
||||
|
||||
target := objectsToPack[i]
|
||||
|
||||
// If we already have a delta, we don't try to find a new one for this
|
||||
// object. This happens when a delta is set to be reused from an existing
|
||||
// packfile.
|
||||
if target.IsDelta() {
|
||||
continue
|
||||
}
|
||||
|
||||
// We only want to create deltas from specific types.
|
||||
if !applyDelta[target.Type()] {
|
||||
continue
|
||||
}
|
||||
|
||||
for j := i - 1; j >= 0 && i-j < int(packWindow); j-- {
|
||||
base := objectsToPack[j]
|
||||
// Objects must use only the same type as their delta base.
|
||||
// Since objectsToPack is sorted by type and size, once we find
|
||||
// a different type, we know we won't find more of them.
|
||||
if base.Type() != target.Type() {
|
||||
break
|
||||
}
|
||||
|
||||
if err := dw.tryToDeltify(indexMap, base, target); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dw *deltaSelector) tryToDeltify(indexMap map[plumbing.Hash]*deltaIndex, base, target *ObjectToPack) error {
|
||||
// Original object might not be present if we're reusing a delta, so we
|
||||
// ensure it is restored.
|
||||
if err := dw.restoreOriginal(target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := dw.restoreOriginal(base); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the sizes are radically different, this is a bad pairing.
|
||||
if target.Size() < base.Size()>>4 {
|
||||
return nil
|
||||
}
|
||||
|
||||
msz := dw.deltaSizeLimit(
|
||||
target.Object.Size(),
|
||||
base.Depth,
|
||||
target.Depth,
|
||||
target.IsDelta(),
|
||||
)
|
||||
|
||||
// Nearly impossible to fit useful delta.
|
||||
if msz <= 8 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we have to insert a lot to make this work, find another.
|
||||
if base.Size()-target.Size() > msz {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := indexMap[base.Hash()]; !ok {
|
||||
indexMap[base.Hash()] = new(deltaIndex)
|
||||
}
|
||||
|
||||
// Now we can generate the delta using originals
|
||||
delta, err := getDelta(indexMap[base.Hash()], base.Original, target.Original)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if delta better than target
|
||||
if delta.Size() < msz {
|
||||
target.SetDelta(base, delta)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dw *deltaSelector) deltaSizeLimit(targetSize int64, baseDepth int,
|
||||
targetDepth int, targetDelta bool) int64 {
|
||||
if !targetDelta {
|
||||
// Any delta should be no more than 50% of the original size
|
||||
// (for text files deflate of whole form should shrink 50%).
|
||||
n := targetSize >> 1
|
||||
|
||||
// Evenly distribute delta size limits over allowed depth.
|
||||
// If src is non-delta (depth = 0), delta <= 50% of original.
|
||||
// If src is almost at limit (9/10), delta <= 10% of original.
|
||||
return n * (maxDepth - int64(baseDepth)) / maxDepth
|
||||
}
|
||||
|
||||
// With a delta base chosen any new delta must be "better".
|
||||
// Retain the distribution described above.
|
||||
d := int64(targetDepth)
|
||||
n := targetSize
|
||||
|
||||
// If target depth is bigger than maxDepth, this delta is not suitable to be used.
|
||||
if d >= maxDepth {
|
||||
return 0
|
||||
}
|
||||
|
||||
// If src is whole (depth=0) and base is near limit (depth=9/10)
|
||||
// any delta using src can be 10x larger and still be better.
|
||||
//
|
||||
// If src is near limit (depth=9/10) and base is whole (depth=0)
|
||||
// a new delta dependent on src must be 1/10th the size.
|
||||
return n * (maxDepth - int64(baseDepth)) / (maxDepth - d)
|
||||
}
|
||||
|
||||
type byTypeAndSize []*ObjectToPack
|
||||
|
||||
func (a byTypeAndSize) Len() int { return len(a) }
|
||||
|
||||
func (a byTypeAndSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
func (a byTypeAndSize) Less(i, j int) bool {
|
||||
if a[i].Type() < a[j].Type() {
|
||||
return false
|
||||
}
|
||||
|
||||
if a[i].Type() > a[j].Type() {
|
||||
return true
|
||||
}
|
||||
|
||||
return a[i].Size() > a[j].Size()
|
||||
}
|
201
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff_delta.go
generated
vendored
Normal file
201
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff_delta.go
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
package packfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
)
|
||||
|
||||
// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and
|
||||
// https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
|
||||
// for more info
|
||||
|
||||
const (
|
||||
// Standard chunk size used to generate fingerprints
|
||||
s = 16
|
||||
|
||||
// https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428
|
||||
// Max size of a copy operation (64KB)
|
||||
maxCopySize = 64 * 1024
|
||||
)
|
||||
|
||||
// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object,
|
||||
// will be loaded into memory to be able to create the delta object.
|
||||
// To generate target again, you will need the obtained object and "base" one.
|
||||
// Error will be returned if base or target object cannot be read.
|
||||
func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) {
|
||||
return getDelta(new(deltaIndex), base, target)
|
||||
}
|
||||
|
||||
func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) {
|
||||
br, err := base.Reader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer br.Close()
|
||||
tr, err := target.Reader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tr.Close()
|
||||
|
||||
bb := bufPool.Get().(*bytes.Buffer)
|
||||
bb.Reset()
|
||||
defer bufPool.Put(bb)
|
||||
|
||||
_, err = bb.ReadFrom(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tb := bufPool.Get().(*bytes.Buffer)
|
||||
tb.Reset()
|
||||
defer bufPool.Put(tb)
|
||||
|
||||
_, err = tb.ReadFrom(tr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db := diffDelta(index, bb.Bytes(), tb.Bytes())
|
||||
delta := &plumbing.MemoryObject{}
|
||||
_, err = delta.Write(db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
delta.SetSize(int64(len(db)))
|
||||
delta.SetType(plumbing.OFSDeltaObject)
|
||||
|
||||
return delta, nil
|
||||
}
|
||||
|
||||
// DiffDelta returns the delta that transforms src into tgt.
|
||||
func DiffDelta(src, tgt []byte) []byte {
|
||||
return diffDelta(new(deltaIndex), src, tgt)
|
||||
}
|
||||
|
||||
func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
buf.Write(deltaEncodeSize(len(src)))
|
||||
buf.Write(deltaEncodeSize(len(tgt)))
|
||||
|
||||
if len(index.entries) == 0 {
|
||||
index.init(src)
|
||||
}
|
||||
|
||||
ibuf := bufPool.Get().(*bytes.Buffer)
|
||||
ibuf.Reset()
|
||||
for i := 0; i < len(tgt); i++ {
|
||||
offset, l := index.findMatch(src, tgt, i)
|
||||
|
||||
if l == 0 {
|
||||
// couldn't find a match, just write the current byte and continue
|
||||
ibuf.WriteByte(tgt[i])
|
||||
} else if l < 0 {
|
||||
// src is less than blksz, copy the rest of the target to avoid
|
||||
// calls to findMatch
|
||||
for ; i < len(tgt); i++ {
|
||||
ibuf.WriteByte(tgt[i])
|
||||
}
|
||||
} else if l < s {
|
||||
// remaining target is less than blksz, copy what's left of it
|
||||
// and avoid calls to findMatch
|
||||
for j := i; j < i+l; j++ {
|
||||
ibuf.WriteByte(tgt[j])
|
||||
}
|
||||
i += l - 1
|
||||
} else {
|
||||
encodeInsertOperation(ibuf, buf)
|
||||
|
||||
rl := l
|
||||
aOffset := offset
|
||||
for rl > 0 {
|
||||
if rl < maxCopySize {
|
||||
buf.Write(encodeCopyOperation(aOffset, rl))
|
||||
break
|
||||
}
|
||||
|
||||
buf.Write(encodeCopyOperation(aOffset, maxCopySize))
|
||||
rl -= maxCopySize
|
||||
aOffset += maxCopySize
|
||||
}
|
||||
|
||||
i += l - 1
|
||||
}
|
||||
}
|
||||
|
||||
encodeInsertOperation(ibuf, buf)
|
||||
bytes := buf.Bytes()
|
||||
|
||||
bufPool.Put(buf)
|
||||
bufPool.Put(ibuf)
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
func encodeInsertOperation(ibuf, buf *bytes.Buffer) {
|
||||
if ibuf.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
b := ibuf.Bytes()
|
||||
s := ibuf.Len()
|
||||
o := 0
|
||||
for {
|
||||
if s <= 127 {
|
||||
break
|
||||
}
|
||||
buf.WriteByte(byte(127))
|
||||
buf.Write(b[o : o+127])
|
||||
s -= 127
|
||||
o += 127
|
||||
}
|
||||
buf.WriteByte(byte(s))
|
||||
buf.Write(b[o : o+s])
|
||||
|
||||
ibuf.Reset()
|
||||
}
|
||||
|
||||
func deltaEncodeSize(size int) []byte {
|
||||
var ret []byte
|
||||
c := size & 0x7f
|
||||
size >>= 7
|
||||
for {
|
||||
if size == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
ret = append(ret, byte(c|0x80))
|
||||
c = size & 0x7f
|
||||
size >>= 7
|
||||
}
|
||||
ret = append(ret, byte(c))
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func encodeCopyOperation(offset, length int) []byte {
|
||||
code := 0x80
|
||||
var opcodes []byte
|
||||
|
||||
var i uint
|
||||
for i = 0; i < 4; i++ {
|
||||
f := 0xff << (i * 8)
|
||||
if offset&f != 0 {
|
||||
opcodes = append(opcodes, byte(offset&f>>(i*8)))
|
||||
code |= 0x01 << i
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < 3; i++ {
|
||||
f := 0xff << (i * 8)
|
||||
if length&f != 0 {
|
||||
opcodes = append(opcodes, byte(length&f>>(i*8)))
|
||||
code |= 0x10 << i
|
||||
}
|
||||
}
|
||||
|
||||
return append([]byte{byte(code)}, opcodes...)
|
||||
}
|
39
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/doc.go
generated
vendored
Normal file
39
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/doc.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Package packfile implements encoding and decoding of packfile format.
|
||||
//
|
||||
// == pack-*.pack files have the following format:
|
||||
//
|
||||
// - A header appears at the beginning and consists of the following:
|
||||
//
|
||||
// 4-byte signature:
|
||||
// The signature is: {'P', 'A', 'C', 'K'}
|
||||
//
|
||||
// 4-byte version number (network byte order):
|
||||
// GIT currently accepts version number 2 or 3 but
|
||||
// generates version 2 only.
|
||||
//
|
||||
// 4-byte number of objects contained in the pack (network byte order)
|
||||
//
|
||||
// Observation: we cannot have more than 4G versions ;-) and
|
||||
// more than 4G objects in a pack.
|
||||
//
|
||||
// - The header is followed by number of object entries, each of
|
||||
// which looks like this:
|
||||
//
|
||||
// (undeltified representation)
|
||||
// n-byte type and length (3-bit type, (n-1)*7+4-bit length)
|
||||
// compressed data
|
||||
//
|
||||
// (deltified representation)
|
||||
// n-byte type and length (3-bit type, (n-1)*7+4-bit length)
|
||||
// 20-byte base object name
|
||||
// compressed delta data
|
||||
//
|
||||
// Observation: length of each object is encoded in a variable
|
||||
// length format and is not constrained to 32-bit or anything.
|
||||
//
|
||||
// - The trailer records 20-byte SHA1 checksum of all of the above.
|
||||
//
|
||||
//
|
||||
// Source:
|
||||
// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt
|
||||
package packfile
|
219
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/encoder.go
generated
vendored
Normal file
219
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/encoder.go
generated
vendored
Normal file
@ -0,0 +1,219 @@
|
||||
package packfile
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/storer"
|
||||
"gopkg.in/src-d/go-git.v4/utils/binary"
|
||||
)
|
||||
|
||||
// Encoder gets the data from the storage and write it into the writer in PACK
|
||||
// format
|
||||
type Encoder struct {
|
||||
selector *deltaSelector
|
||||
w *offsetWriter
|
||||
zw *zlib.Writer
|
||||
hasher plumbing.Hasher
|
||||
|
||||
useRefDeltas bool
|
||||
}
|
||||
|
||||
// NewEncoder creates a new packfile encoder using a specific Writer and
|
||||
// EncodedObjectStorer. By default deltas used to generate the packfile will be
|
||||
// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true.
|
||||
func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder {
|
||||
h := plumbing.Hasher{
|
||||
Hash: sha1.New(),
|
||||
}
|
||||
mw := io.MultiWriter(w, h)
|
||||
ow := newOffsetWriter(mw)
|
||||
zw := zlib.NewWriter(mw)
|
||||
return &Encoder{
|
||||
selector: newDeltaSelector(s),
|
||||
w: ow,
|
||||
zw: zw,
|
||||
hasher: h,
|
||||
useRefDeltas: useRefDeltas,
|
||||
}
|
||||
}
|
||||
|
||||
// Encode creates a packfile containing all the objects referenced in
|
||||
// hashes and writes it to the writer in the Encoder. `packWindow`
|
||||
// specifies the size of the sliding window used to compare objects
|
||||
// for delta compression; 0 turns off delta compression entirely.
|
||||
func (e *Encoder) Encode(
|
||||
hashes []plumbing.Hash,
|
||||
packWindow uint,
|
||||
) (plumbing.Hash, error) {
|
||||
objects, err := e.selector.ObjectsToPack(hashes, packWindow)
|
||||
if err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
return e.encode(objects)
|
||||
}
|
||||
|
||||
func (e *Encoder) encode(objects []*ObjectToPack) (plumbing.Hash, error) {
|
||||
if err := e.head(len(objects)); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
for _, o := range objects {
|
||||
if err := e.entry(o); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
}
|
||||
|
||||
return e.footer()
|
||||
}
|
||||
|
||||
func (e *Encoder) head(numEntries int) error {
|
||||
return binary.Write(
|
||||
e.w,
|
||||
signature,
|
||||
int32(VersionSupported),
|
||||
int32(numEntries),
|
||||
)
|
||||
}
|
||||
|
||||
func (e *Encoder) entry(o *ObjectToPack) error {
|
||||
if o.WantWrite() {
|
||||
// A cycle exists in this delta chain. This should only occur if a
|
||||
// selected object representation disappeared during writing
|
||||
// (for example due to a concurrent repack) and a different base
|
||||
// was chosen, forcing a cycle. Select something other than a
|
||||
// delta, and write this object.
|
||||
e.selector.restoreOriginal(o)
|
||||
o.BackToOriginal()
|
||||
}
|
||||
|
||||
if o.IsWritten() {
|
||||
return nil
|
||||
}
|
||||
|
||||
o.MarkWantWrite()
|
||||
|
||||
if err := e.writeBaseIfDelta(o); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We need to check if we already write that object due a cyclic delta chain
|
||||
if o.IsWritten() {
|
||||
return nil
|
||||
}
|
||||
|
||||
o.Offset = e.w.Offset()
|
||||
|
||||
if o.IsDelta() {
|
||||
if err := e.writeDeltaHeader(o); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := e.entryHead(o.Type(), o.Size()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
e.zw.Reset(e.w)
|
||||
or, err := o.Object.Reader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(e.zw, or)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return e.zw.Close()
|
||||
}
|
||||
|
||||
func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error {
|
||||
if o.IsDelta() && !o.Base.IsWritten() {
|
||||
// We must write base first
|
||||
return e.entry(o.Base)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) writeDeltaHeader(o *ObjectToPack) error {
|
||||
// Write offset deltas by default
|
||||
t := plumbing.OFSDeltaObject
|
||||
if e.useRefDeltas {
|
||||
t = plumbing.REFDeltaObject
|
||||
}
|
||||
|
||||
if err := e.entryHead(t, o.Object.Size()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if e.useRefDeltas {
|
||||
return e.writeRefDeltaHeader(o.Base.Hash())
|
||||
} else {
|
||||
return e.writeOfsDeltaHeader(o)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Encoder) writeRefDeltaHeader(base plumbing.Hash) error {
|
||||
return binary.Write(e.w, base)
|
||||
}
|
||||
|
||||
func (e *Encoder) writeOfsDeltaHeader(o *ObjectToPack) error {
|
||||
// for OFS_DELTA, offset of the base is interpreted as negative offset
|
||||
// relative to the type-byte of the header of the ofs-delta entry.
|
||||
relativeOffset := o.Offset - o.Base.Offset
|
||||
if relativeOffset <= 0 {
|
||||
return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset)
|
||||
}
|
||||
|
||||
return binary.WriteVariableWidthInt(e.w, relativeOffset)
|
||||
}
|
||||
|
||||
func (e *Encoder) entryHead(typeNum plumbing.ObjectType, size int64) error {
|
||||
t := int64(typeNum)
|
||||
header := []byte{}
|
||||
c := (t << firstLengthBits) | (size & maskFirstLength)
|
||||
size >>= firstLengthBits
|
||||
for {
|
||||
if size == 0 {
|
||||
break
|
||||
}
|
||||
header = append(header, byte(c|maskContinue))
|
||||
c = size & int64(maskLength)
|
||||
size >>= lengthBits
|
||||
}
|
||||
|
||||
header = append(header, byte(c))
|
||||
_, err := e.w.Write(header)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *Encoder) footer() (plumbing.Hash, error) {
|
||||
h := e.hasher.Sum()
|
||||
return h, binary.Write(e.w, h)
|
||||
}
|
||||
|
||||
type offsetWriter struct {
|
||||
w io.Writer
|
||||
offset int64
|
||||
}
|
||||
|
||||
func newOffsetWriter(w io.Writer) *offsetWriter {
|
||||
return &offsetWriter{w: w}
|
||||
}
|
||||
|
||||
func (ow *offsetWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = ow.w.Write(p)
|
||||
ow.offset += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (ow *offsetWriter) Offset() int64 {
|
||||
return ow.offset
|
||||
}
|
30
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/error.go
generated
vendored
Normal file
30
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/error.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
package packfile
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Error specifies errors returned during packfile parsing.
|
||||
type Error struct {
|
||||
reason, details string
|
||||
}
|
||||
|
||||
// NewError returns a new error.
|
||||
func NewError(reason string) *Error {
|
||||
return &Error{reason: reason}
|
||||
}
|
||||
|
||||
// Error returns a text representation of the error.
|
||||
func (e *Error) Error() string {
|
||||
if e.details == "" {
|
||||
return e.reason
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s: %s", e.reason, e.details)
|
||||
}
|
||||
|
||||
// AddDetails adds details to an error, with additional text.
|
||||
func (e *Error) AddDetails(format string, args ...interface{}) *Error {
|
||||
return &Error{
|
||||
reason: e.reason,
|
||||
details: fmt.Sprintf(format, args...),
|
||||
}
|
||||
}
|
116
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/fsobject.go
generated
vendored
Normal file
116
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/fsobject.go
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
package packfile
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
billy "gopkg.in/src-d/go-billy.v4"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/cache"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
|
||||
)
|
||||
|
||||
// FSObject is an object from the packfile on the filesystem.
|
||||
type FSObject struct {
|
||||
hash plumbing.Hash
|
||||
h *ObjectHeader
|
||||
offset int64
|
||||
size int64
|
||||
typ plumbing.ObjectType
|
||||
index idxfile.Index
|
||||
fs billy.Filesystem
|
||||
path string
|
||||
cache cache.Object
|
||||
}
|
||||
|
||||
// NewFSObject creates a new filesystem object.
|
||||
func NewFSObject(
|
||||
hash plumbing.Hash,
|
||||
finalType plumbing.ObjectType,
|
||||
offset int64,
|
||||
contentSize int64,
|
||||
index idxfile.Index,
|
||||
fs billy.Filesystem,
|
||||
path string,
|
||||
cache cache.Object,
|
||||
) *FSObject {
|
||||
return &FSObject{
|
||||
hash: hash,
|
||||
offset: offset,
|
||||
size: contentSize,
|
||||
typ: finalType,
|
||||
index: index,
|
||||
fs: fs,
|
||||
path: path,
|
||||
cache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
// Reader implements the plumbing.EncodedObject interface.
|
||||
func (o *FSObject) Reader() (io.ReadCloser, error) {
|
||||
obj, ok := o.cache.Get(o.hash)
|
||||
if ok && obj != o {
|
||||
reader, err := obj.Reader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
f, err := o.fs.Open(o.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := NewPackfileWithCache(o.index, nil, f, o.cache)
|
||||
r, err := p.getObjectContent(o.offset)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// SetSize implements the plumbing.EncodedObject interface. This method
|
||||
// is a noop.
|
||||
func (o *FSObject) SetSize(int64) {}
|
||||
|
||||
// SetType implements the plumbing.EncodedObject interface. This method is
|
||||
// a noop.
|
||||
func (o *FSObject) SetType(plumbing.ObjectType) {}
|
||||
|
||||
// Hash implements the plumbing.EncodedObject interface.
|
||||
func (o *FSObject) Hash() plumbing.Hash { return o.hash }
|
||||
|
||||
// Size implements the plumbing.EncodedObject interface.
|
||||
func (o *FSObject) Size() int64 { return o.size }
|
||||
|
||||
// Type implements the plumbing.EncodedObject interface.
|
||||
func (o *FSObject) Type() plumbing.ObjectType {
|
||||
return o.typ
|
||||
}
|
||||
|
||||
// Writer implements the plumbing.EncodedObject interface. This method always
|
||||
// returns a nil writer.
|
||||
func (o *FSObject) Writer() (io.WriteCloser, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type objectReader struct {
|
||||
io.ReadCloser
|
||||
f billy.File
|
||||
}
|
||||
|
||||
func (r *objectReader) Close() error {
|
||||
if err := r.ReadCloser.Close(); err != nil {
|
||||
_ = r.f.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
return r.f.Close()
|
||||
}
|
164
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/object_pack.go
generated
vendored
Normal file
164
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/object_pack.go
generated
vendored
Normal file
@ -0,0 +1,164 @@
|
||||
package packfile
|
||||
|
||||
import (
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
)
|
||||
|
||||
// ObjectToPack is a representation of an object that is going to be into a
|
||||
// pack file.
|
||||
type ObjectToPack struct {
|
||||
// The main object to pack, it could be any object, including deltas
|
||||
Object plumbing.EncodedObject
|
||||
// Base is the object that a delta is based on (it could be also another delta).
|
||||
// If the main object is not a delta, Base will be null
|
||||
Base *ObjectToPack
|
||||
// Original is the object that we can generate applying the delta to
|
||||
// Base, or the same object as Object in the case of a non-delta
|
||||
// object.
|
||||
Original plumbing.EncodedObject
|
||||
// Depth is the amount of deltas needed to resolve to obtain Original
|
||||
// (delta based on delta based on ...)
|
||||
Depth int
|
||||
|
||||
// offset in pack when object has been already written, or 0 if it
|
||||
// has not been written yet
|
||||
Offset int64
|
||||
|
||||
// Information from the original object
|
||||
resolvedOriginal bool
|
||||
originalType plumbing.ObjectType
|
||||
originalSize int64
|
||||
originalHash plumbing.Hash
|
||||
}
|
||||
|
||||
// newObjectToPack creates a correct ObjectToPack based on a non-delta object
|
||||
func newObjectToPack(o plumbing.EncodedObject) *ObjectToPack {
|
||||
return &ObjectToPack{
|
||||
Object: o,
|
||||
Original: o,
|
||||
}
|
||||
}
|
||||
|
||||
// newDeltaObjectToPack creates a correct ObjectToPack for a delta object, based on
|
||||
// his base (could be another delta), the delta target (in this case called original),
|
||||
// and the delta Object itself
|
||||
func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedObject) *ObjectToPack {
|
||||
return &ObjectToPack{
|
||||
Object: delta,
|
||||
Base: base,
|
||||
Original: original,
|
||||
Depth: base.Depth + 1,
|
||||
}
|
||||
}
|
||||
|
||||
// BackToOriginal converts that ObjectToPack to a non-deltified object if it was one
|
||||
func (o *ObjectToPack) BackToOriginal() {
|
||||
if o.IsDelta() && o.Original != nil {
|
||||
o.Object = o.Original
|
||||
o.Base = nil
|
||||
o.Depth = 0
|
||||
}
|
||||
}
|
||||
|
||||
// IsWritten returns if that ObjectToPack was
|
||||
// already written into the packfile or not
|
||||
func (o *ObjectToPack) IsWritten() bool {
|
||||
return o.Offset > 1
|
||||
}
|
||||
|
||||
// MarkWantWrite marks this ObjectToPack as WantWrite
|
||||
// to avoid delta chain loops
|
||||
func (o *ObjectToPack) MarkWantWrite() {
|
||||
o.Offset = 1
|
||||
}
|
||||
|
||||
// WantWrite checks if this ObjectToPack was marked as WantWrite before
|
||||
func (o *ObjectToPack) WantWrite() bool {
|
||||
return o.Offset == 1
|
||||
}
|
||||
|
||||
// SetOriginal sets both Original and saves size, type and hash. If object
|
||||
// is nil Original is set but previous resolved values are kept
|
||||
func (o *ObjectToPack) SetOriginal(obj plumbing.EncodedObject) {
|
||||
o.Original = obj
|
||||
o.SaveOriginalMetadata()
|
||||
}
|
||||
|
||||
// SaveOriginalMetadata saves size, type and hash of Original object
|
||||
func (o *ObjectToPack) SaveOriginalMetadata() {
|
||||
if o.Original != nil {
|
||||
o.originalSize = o.Original.Size()
|
||||
o.originalType = o.Original.Type()
|
||||
o.originalHash = o.Original.Hash()
|
||||
o.resolvedOriginal = true
|
||||
}
|
||||
}
|
||||
|
||||
// CleanOriginal sets Original to nil
|
||||
func (o *ObjectToPack) CleanOriginal() {
|
||||
o.Original = nil
|
||||
}
|
||||
|
||||
func (o *ObjectToPack) Type() plumbing.ObjectType {
|
||||
if o.Original != nil {
|
||||
return o.Original.Type()
|
||||
}
|
||||
|
||||
if o.resolvedOriginal {
|
||||
return o.originalType
|
||||
}
|
||||
|
||||
if o.Base != nil {
|
||||
return o.Base.Type()
|
||||
}
|
||||
|
||||
if o.Object != nil {
|
||||
return o.Object.Type()
|
||||
}
|
||||
|
||||
panic("cannot get type")
|
||||
}
|
||||
|
||||
func (o *ObjectToPack) Hash() plumbing.Hash {
|
||||
if o.Original != nil {
|
||||
return o.Original.Hash()
|
||||
}
|
||||
|
||||
if o.resolvedOriginal {
|
||||
return o.originalHash
|
||||
}
|
||||
|
||||
do, ok := o.Object.(plumbing.DeltaObject)
|
||||
if ok {
|
||||
return do.ActualHash()
|
||||
}
|
||||
|
||||
panic("cannot get hash")
|
||||
}
|
||||
|
||||
func (o *ObjectToPack) Size() int64 {
|
||||
if o.Original != nil {
|
||||
return o.Original.Size()
|
||||
}
|
||||
|
||||
if o.resolvedOriginal {
|
||||
return o.originalSize
|
||||
}
|
||||
|
||||
do, ok := o.Object.(plumbing.DeltaObject)
|
||||
if ok {
|
||||
return do.ActualSize()
|
||||
}
|
||||
|
||||
panic("cannot get ObjectToPack size")
|
||||
}
|
||||
|
||||
func (o *ObjectToPack) IsDelta() bool {
|
||||
return o.Base != nil
|
||||
}
|
||||
|
||||
func (o *ObjectToPack) SetDelta(base *ObjectToPack, delta plumbing.EncodedObject) {
|
||||
o.Object = delta
|
||||
o.Base = base
|
||||
o.Depth = base.Depth + 1
|
||||
}
|
487
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go
generated
vendored
Normal file
487
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go
generated
vendored
Normal file
@ -0,0 +1,487 @@
|
||||
package packfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
billy "gopkg.in/src-d/go-billy.v4"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/cache"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/storer"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidObject is returned by Decode when an invalid object is
|
||||
// found in the packfile.
|
||||
ErrInvalidObject = NewError("invalid git object")
|
||||
// ErrZLib is returned by Decode when there was an error unzipping
|
||||
// the packfile contents.
|
||||
ErrZLib = NewError("zlib reading error")
|
||||
)
|
||||
|
||||
// When reading small objects from packfile it is beneficial to do so at
|
||||
// once to exploit the buffered I/O. In many cases the objects are so small
|
||||
// that they were already loaded to memory when the object header was
|
||||
// loaded from the packfile. Wrapping in FSObject would cause this buffered
|
||||
// data to be thrown away and then re-read later, with the additional
|
||||
// seeking causing reloads from disk. Objects smaller than this threshold
|
||||
// are now always read into memory and stored in cache instead of being
|
||||
// wrapped in FSObject.
|
||||
const smallObjectThreshold = 16 * 1024
|
||||
|
||||
// Packfile allows retrieving information from inside a packfile.
|
||||
type Packfile struct {
|
||||
idxfile.Index
|
||||
fs billy.Filesystem
|
||||
file billy.File
|
||||
s *Scanner
|
||||
deltaBaseCache cache.Object
|
||||
offsetToType map[int64]plumbing.ObjectType
|
||||
}
|
||||
|
||||
// NewPackfileWithCache creates a new Packfile with the given object cache.
|
||||
// If the filesystem is provided, the packfile will return FSObjects, otherwise
|
||||
// it will return MemoryObjects.
|
||||
func NewPackfileWithCache(
|
||||
index idxfile.Index,
|
||||
fs billy.Filesystem,
|
||||
file billy.File,
|
||||
cache cache.Object,
|
||||
) *Packfile {
|
||||
s := NewScanner(file)
|
||||
return &Packfile{
|
||||
index,
|
||||
fs,
|
||||
file,
|
||||
s,
|
||||
cache,
|
||||
make(map[int64]plumbing.ObjectType),
|
||||
}
|
||||
}
|
||||
|
||||
// NewPackfile returns a packfile representation for the given packfile file
|
||||
// and packfile idx.
|
||||
// If the filesystem is provided, the packfile will return FSObjects, otherwise
|
||||
// it will return MemoryObjects.
|
||||
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile {
|
||||
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault())
|
||||
}
|
||||
|
||||
// Get retrieves the encoded object in the packfile with the given hash.
|
||||
func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) {
|
||||
offset, err := p.FindOffset(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.GetByOffset(offset)
|
||||
}
|
||||
|
||||
// GetByOffset retrieves the encoded object from the packfile with the given
|
||||
// offset.
|
||||
func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
|
||||
hash, err := p.FindHash(o)
|
||||
if err == nil {
|
||||
if obj, ok := p.deltaBaseCache.Get(hash); ok {
|
||||
return obj, nil
|
||||
}
|
||||
}
|
||||
|
||||
return p.objectAtOffset(o)
|
||||
}
|
||||
|
||||
// GetSizeByOffset retrieves the size of the encoded object from the
|
||||
// packfile with the given offset.
|
||||
func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) {
|
||||
if _, err := p.s.SeekFromStart(o); err != nil {
|
||||
if err == io.EOF || isInvalid(err) {
|
||||
return 0, plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
h, err := p.nextObjectHeader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return p.getObjectSize(h)
|
||||
}
|
||||
|
||||
func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) {
|
||||
h, err := p.s.SeekObjectHeader(offset)
|
||||
p.s.pendingObject = nil
|
||||
return h, err
|
||||
}
|
||||
|
||||
func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
|
||||
h, err := p.s.NextObjectHeader()
|
||||
p.s.pendingObject = nil
|
||||
return h, err
|
||||
}
|
||||
|
||||
func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
|
||||
switch h.Type {
|
||||
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
|
||||
return h.Length, nil
|
||||
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
defer bufPool.Put(buf)
|
||||
|
||||
if _, _, err := p.s.NextObject(buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
delta := buf.Bytes()
|
||||
_, delta = decodeLEB128(delta) // skip src size
|
||||
sz, _ := decodeLEB128(delta)
|
||||
return int64(sz), nil
|
||||
default:
|
||||
return 0, ErrInvalidObject.AddDetails("type %q", h.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err error) {
|
||||
switch h.Type {
|
||||
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
|
||||
return h.Type, nil
|
||||
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
|
||||
var offset int64
|
||||
if h.Type == plumbing.REFDeltaObject {
|
||||
offset, err = p.FindOffset(h.Reference)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
offset = h.OffsetReference
|
||||
}
|
||||
|
||||
if baseType, ok := p.offsetToType[offset]; ok {
|
||||
typ = baseType
|
||||
} else {
|
||||
h, err = p.objectHeaderAtOffset(offset)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
typ, err = p.getObjectType(h)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
default:
|
||||
err = ErrInvalidObject.AddDetails("type %q", h.Type)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) {
|
||||
h, err := p.objectHeaderAtOffset(offset)
|
||||
if err != nil {
|
||||
if err == io.EOF || isInvalid(err) {
|
||||
return nil, plumbing.ErrObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we have no filesystem, we will return a MemoryObject instead
|
||||
// of an FSObject.
|
||||
if p.fs == nil {
|
||||
return p.getNextObject(h)
|
||||
}
|
||||
|
||||
// If the object is not a delta and it's small enough then read it
|
||||
// completely into memory now since it is already read from disk
|
||||
// into buffer anyway.
|
||||
if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
|
||||
return p.getNextObject(h)
|
||||
}
|
||||
|
||||
hash, err := p.FindHash(h.Offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
size, err := p.getObjectSize(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
typ, err := p.getObjectType(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.offsetToType[h.Offset] = typ
|
||||
|
||||
return NewFSObject(
|
||||
hash,
|
||||
typ,
|
||||
h.Offset,
|
||||
size,
|
||||
p.Index,
|
||||
p.fs,
|
||||
p.file.Name(),
|
||||
p.deltaBaseCache,
|
||||
), nil
|
||||
}
|
||||
|
||||
func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
|
||||
ref, err := p.FindHash(offset)
|
||||
if err == nil {
|
||||
obj, ok := p.cacheGet(ref)
|
||||
if ok {
|
||||
reader, err := obj.Reader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
}
|
||||
|
||||
h, err := p.objectHeaderAtOffset(offset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, err := p.getNextObject(h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return obj.Reader()
|
||||
}
|
||||
|
||||
func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
|
||||
var obj = new(plumbing.MemoryObject)
|
||||
obj.SetSize(h.Length)
|
||||
obj.SetType(h.Type)
|
||||
|
||||
var err error
|
||||
switch h.Type {
|
||||
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
|
||||
err = p.fillRegularObjectContent(obj)
|
||||
case plumbing.REFDeltaObject:
|
||||
err = p.fillREFDeltaObjectContent(obj, h.Reference)
|
||||
case plumbing.OFSDeltaObject:
|
||||
err = p.fillOFSDeltaObjectContent(obj, h.OffsetReference)
|
||||
default:
|
||||
err = ErrInvalidObject.AddDetails("type %q", h.Type)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error {
|
||||
w, err := obj.Writer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, _, err = p.s.NextObject(w)
|
||||
p.cachePut(obj)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
_, _, err := p.s.NextObject(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
base, ok := p.cacheGet(ref)
|
||||
if !ok {
|
||||
base, err = p.Get(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
obj.SetType(base.Type())
|
||||
err = ApplyDelta(obj, base, buf.Bytes())
|
||||
p.cachePut(obj)
|
||||
bufPool.Put(buf)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
_, _, err := p.s.NextObject(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var base plumbing.EncodedObject
|
||||
var ok bool
|
||||
hash, err := p.FindHash(offset)
|
||||
if err == nil {
|
||||
base, ok = p.cacheGet(hash)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
base, err = p.GetByOffset(offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
obj.SetType(base.Type())
|
||||
err = ApplyDelta(obj, base, buf.Bytes())
|
||||
p.cachePut(obj)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Packfile) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) {
|
||||
if p.deltaBaseCache == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return p.deltaBaseCache.Get(h)
|
||||
}
|
||||
|
||||
func (p *Packfile) cachePut(obj plumbing.EncodedObject) {
|
||||
if p.deltaBaseCache == nil {
|
||||
return
|
||||
}
|
||||
|
||||
p.deltaBaseCache.Put(obj)
|
||||
}
|
||||
|
||||
// GetAll returns an iterator with all encoded objects in the packfile.
|
||||
// The iterator returned is not thread-safe, it should be used in the same
|
||||
// thread as the Packfile instance.
|
||||
func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) {
|
||||
return p.GetByType(plumbing.AnyObject)
|
||||
}
|
||||
|
||||
// GetByType returns all the objects of the given type.
|
||||
func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) {
|
||||
switch typ {
|
||||
case plumbing.AnyObject,
|
||||
plumbing.BlobObject,
|
||||
plumbing.TreeObject,
|
||||
plumbing.CommitObject,
|
||||
plumbing.TagObject:
|
||||
entries, err := p.EntriesByOffset()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &objectIter{
|
||||
// Easiest way to provide an object decoder is just to pass a Packfile
|
||||
// instance. To not mess with the seeks, it's a new instance with a
|
||||
// different scanner but the same cache and offset to hash map for
|
||||
// reusing as much cache as possible.
|
||||
p: p,
|
||||
iter: entries,
|
||||
typ: typ,
|
||||
}, nil
|
||||
default:
|
||||
return nil, plumbing.ErrInvalidType
|
||||
}
|
||||
}
|
||||
|
||||
// ID returns the ID of the packfile, which is the checksum at the end of it.
|
||||
func (p *Packfile) ID() (plumbing.Hash, error) {
|
||||
prev, err := p.file.Seek(-20, io.SeekEnd)
|
||||
if err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
var hash plumbing.Hash
|
||||
if _, err := io.ReadFull(p.file, hash[:]); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
if _, err := p.file.Seek(prev, io.SeekStart); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// Close the packfile and its resources.
|
||||
func (p *Packfile) Close() error {
|
||||
closer, ok := p.file.(io.Closer)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return closer.Close()
|
||||
}
|
||||
|
||||
type objectIter struct {
|
||||
p *Packfile
|
||||
typ plumbing.ObjectType
|
||||
iter idxfile.EntryIter
|
||||
}
|
||||
|
||||
func (i *objectIter) Next() (plumbing.EncodedObject, error) {
|
||||
for {
|
||||
e, err := i.iter.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, err := i.p.GetByOffset(int64(e.Offset))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if i.typ == plumbing.AnyObject || obj.Type() == i.typ {
|
||||
return obj, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error {
|
||||
for {
|
||||
o, err := i.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f(o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (i *objectIter) Close() {
|
||||
i.iter.Close()
|
||||
}
|
||||
|
||||
// isInvalid checks whether an error is an os.PathError with an os.ErrInvalid
|
||||
// error inside. It also checks for the windows error, which is different from
|
||||
// os.ErrInvalid.
|
||||
func isInvalid(err error) bool {
|
||||
pe, ok := err.(*os.PathError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
errstr := pe.Err.Error()
|
||||
return errstr == errInvalidUnix || errstr == errInvalidWindows
|
||||
}
|
||||
|
||||
// errInvalidWindows is the Windows equivalent to os.ErrInvalid
|
||||
const errInvalidWindows = "The parameter is incorrect."
|
||||
|
||||
var errInvalidUnix = os.ErrInvalid.Error()
|
483
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/parser.go
generated
vendored
Normal file
483
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/parser.go
generated
vendored
Normal file
@ -0,0 +1,483 @@
|
||||
package packfile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/cache"
|
||||
"gopkg.in/src-d/go-git.v4/plumbing/storer"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrReferenceDeltaNotFound is returned when the reference delta is not
|
||||
// found.
|
||||
ErrReferenceDeltaNotFound = errors.New("reference delta not found")
|
||||
|
||||
// ErrNotSeekableSource is returned when the source for the parser is not
|
||||
// seekable and a storage was not provided, so it can't be parsed.
|
||||
ErrNotSeekableSource = errors.New("parser source is not seekable and storage was not provided")
|
||||
|
||||
// ErrDeltaNotCached is returned when the delta could not be found in cache.
|
||||
ErrDeltaNotCached = errors.New("delta could not be found in cache")
|
||||
)
|
||||
|
||||
// Observer interface is implemented by index encoders.
|
||||
type Observer interface {
|
||||
// OnHeader is called when a new packfile is opened.
|
||||
OnHeader(count uint32) error
|
||||
// OnInflatedObjectHeader is called for each object header read.
|
||||
OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error
|
||||
// OnInflatedObjectContent is called for each decoded object.
|
||||
OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error
|
||||
// OnFooter is called when decoding is done.
|
||||
OnFooter(h plumbing.Hash) error
|
||||
}
|
||||
|
||||
// Parser decodes a packfile and calls any observer associated to it. Is used
|
||||
// to generate indexes.
|
||||
type Parser struct {
|
||||
storage storer.EncodedObjectStorer
|
||||
scanner *Scanner
|
||||
count uint32
|
||||
oi []*objectInfo
|
||||
oiByHash map[plumbing.Hash]*objectInfo
|
||||
oiByOffset map[int64]*objectInfo
|
||||
hashOffset map[plumbing.Hash]int64
|
||||
checksum plumbing.Hash
|
||||
|
||||
cache *cache.BufferLRU
|
||||
// delta content by offset, only used if source is not seekable
|
||||
deltas map[int64][]byte
|
||||
|
||||
ob []Observer
|
||||
}
|
||||
|
||||
// NewParser creates a new Parser. The Scanner source must be seekable.
|
||||
// If it's not, NewParserWithStorage should be used instead.
|
||||
func NewParser(scanner *Scanner, ob ...Observer) (*Parser, error) {
|
||||
return NewParserWithStorage(scanner, nil, ob...)
|
||||
}
|
||||
|
||||
// NewParserWithStorage creates a new Parser. The scanner source must either
|
||||
// be seekable or a storage must be provided.
|
||||
func NewParserWithStorage(
|
||||
scanner *Scanner,
|
||||
storage storer.EncodedObjectStorer,
|
||||
ob ...Observer,
|
||||
) (*Parser, error) {
|
||||
if !scanner.IsSeekable && storage == nil {
|
||||
return nil, ErrNotSeekableSource
|
||||
}
|
||||
|
||||
var deltas map[int64][]byte
|
||||
if !scanner.IsSeekable {
|
||||
deltas = make(map[int64][]byte)
|
||||
}
|
||||
|
||||
return &Parser{
|
||||
storage: storage,
|
||||
scanner: scanner,
|
||||
ob: ob,
|
||||
count: 0,
|
||||
cache: cache.NewBufferLRUDefault(),
|
||||
deltas: deltas,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Parser) forEachObserver(f func(o Observer) error) error {
|
||||
for _, o := range p.ob {
|
||||
if err := f(o); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) onHeader(count uint32) error {
|
||||
return p.forEachObserver(func(o Observer) error {
|
||||
return o.OnHeader(count)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Parser) onInflatedObjectHeader(
|
||||
t plumbing.ObjectType,
|
||||
objSize int64,
|
||||
pos int64,
|
||||
) error {
|
||||
return p.forEachObserver(func(o Observer) error {
|
||||
return o.OnInflatedObjectHeader(t, objSize, pos)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Parser) onInflatedObjectContent(
|
||||
h plumbing.Hash,
|
||||
pos int64,
|
||||
crc uint32,
|
||||
content []byte,
|
||||
) error {
|
||||
return p.forEachObserver(func(o Observer) error {
|
||||
return o.OnInflatedObjectContent(h, pos, crc, content)
|
||||
})
|
||||
}
|
||||
|
||||
func (p *Parser) onFooter(h plumbing.Hash) error {
|
||||
return p.forEachObserver(func(o Observer) error {
|
||||
return o.OnFooter(h)
|
||||
})
|
||||
}
|
||||
|
||||
// Parse start decoding phase of the packfile.
|
||||
func (p *Parser) Parse() (plumbing.Hash, error) {
|
||||
if err := p.init(); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
if err := p.indexObjects(); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
var err error
|
||||
p.checksum, err = p.scanner.Checksum()
|
||||
if err != nil && err != io.EOF {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
if err := p.resolveDeltas(); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
if err := p.onFooter(p.checksum); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
return p.checksum, nil
|
||||
}
|
||||
|
||||
func (p *Parser) init() error {
|
||||
_, c, err := p.scanner.Header()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.onHeader(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.count = c
|
||||
p.oiByHash = make(map[plumbing.Hash]*objectInfo, p.count)
|
||||
p.oiByOffset = make(map[int64]*objectInfo, p.count)
|
||||
p.oi = make([]*objectInfo, p.count)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) indexObjects() error {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
for i := uint32(0); i < p.count; i++ {
|
||||
buf.Reset()
|
||||
|
||||
oh, err := p.scanner.NextObjectHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
delta := false
|
||||
var ota *objectInfo
|
||||
switch t := oh.Type; t {
|
||||
case plumbing.OFSDeltaObject:
|
||||
delta = true
|
||||
|
||||
parent, ok := p.oiByOffset[oh.OffsetReference]
|
||||
if !ok {
|
||||
return plumbing.ErrObjectNotFound
|
||||
}
|
||||
|
||||
ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
|
||||
parent.Children = append(parent.Children, ota)
|
||||
case plumbing.REFDeltaObject:
|
||||
delta = true
|
||||
parent, ok := p.oiByHash[oh.Reference]
|
||||
if !ok {
|
||||
// can't find referenced object in this pack file
|
||||
// this must be a "thin" pack.
|
||||
parent = &objectInfo{ //Placeholder parent
|
||||
SHA1: oh.Reference,
|
||||
ExternalRef: true, // mark as an external reference that must be resolved
|
||||
Type: plumbing.AnyObject,
|
||||
DiskType: plumbing.AnyObject,
|
||||
}
|
||||
p.oiByHash[oh.Reference] = parent
|
||||
}
|
||||
ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
|
||||
parent.Children = append(parent.Children, ota)
|
||||
|
||||
default:
|
||||
ota = newBaseObject(oh.Offset, oh.Length, t)
|
||||
}
|
||||
|
||||
_, crc, err := p.scanner.NextObject(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ota.Crc32 = crc
|
||||
ota.Length = oh.Length
|
||||
|
||||
data := buf.Bytes()
|
||||
if !delta {
|
||||
sha1, err := getSHA1(ota.Type, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ota.SHA1 = sha1
|
||||
p.oiByHash[ota.SHA1] = ota
|
||||
}
|
||||
|
||||
if p.storage != nil && !delta {
|
||||
obj := new(plumbing.MemoryObject)
|
||||
obj.SetSize(oh.Length)
|
||||
obj.SetType(oh.Type)
|
||||
if _, err := obj.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := p.storage.SetEncodedObject(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if delta && !p.scanner.IsSeekable {
|
||||
p.deltas[oh.Offset] = make([]byte, len(data))
|
||||
copy(p.deltas[oh.Offset], data)
|
||||
}
|
||||
|
||||
p.oiByOffset[oh.Offset] = ota
|
||||
p.oi[i] = ota
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) resolveDeltas() error {
|
||||
for _, obj := range p.oi {
|
||||
content, err := p.get(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !obj.IsDelta() && len(obj.Children) > 0 {
|
||||
for _, child := range obj.Children {
|
||||
if _, err := p.resolveObject(child, content); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the delta from the cache.
|
||||
if obj.DiskType.IsDelta() && !p.scanner.IsSeekable {
|
||||
delete(p.deltas, obj.Offset)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) get(o *objectInfo) (b []byte, err error) {
|
||||
var ok bool
|
||||
if !o.ExternalRef { // skip cache check for placeholder parents
|
||||
b, ok = p.cache.Get(o.Offset)
|
||||
}
|
||||
|
||||
// If it's not on the cache and is not a delta we can try to find it in the
|
||||
// storage, if there's one. External refs must enter here.
|
||||
if !ok && p.storage != nil && !o.Type.IsDelta() {
|
||||
e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o.Type = e.Type()
|
||||
|
||||
r, err := e.Reader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b = make([]byte, e.Size())
|
||||
if _, err = r.Read(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if b != nil {
|
||||
return b, nil
|
||||
}
|
||||
|
||||
if o.ExternalRef {
|
||||
// we were not able to resolve a ref in a thin pack
|
||||
return nil, ErrReferenceDeltaNotFound
|
||||
}
|
||||
|
||||
var data []byte
|
||||
if o.DiskType.IsDelta() {
|
||||
base, err := p.get(o.Parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err = p.resolveObject(o, base)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
data, err = p.readData(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(o.Children) > 0 {
|
||||
p.cache.Put(o.Offset, data)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (p *Parser) resolveObject(
|
||||
o *objectInfo,
|
||||
base []byte,
|
||||
) ([]byte, error) {
|
||||
if !o.DiskType.IsDelta() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
data, err := p.readData(o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err = applyPatchBase(o, data, base)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.storage != nil {
|
||||
obj := new(plumbing.MemoryObject)
|
||||
obj.SetSize(o.Size())
|
||||
obj.SetType(o.Type)
|
||||
if _, err := obj.Write(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := p.storage.SetEncodedObject(obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (p *Parser) readData(o *objectInfo) ([]byte, error) {
|
||||
if !p.scanner.IsSeekable && o.DiskType.IsDelta() {
|
||||
data, ok := p.deltas[o.Offset]
|
||||
if !ok {
|
||||
return nil, ErrDeltaNotCached
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
if _, _, err := p.scanner.NextObject(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
|
||||
patched, err := PatchDelta(base, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ota.SHA1 == plumbing.ZeroHash {
|
||||
ota.Type = ota.Parent.Type
|
||||
sha1, err := getSHA1(ota.Type, patched)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ota.SHA1 = sha1
|
||||
ota.Length = int64(len(patched))
|
||||
}
|
||||
|
||||
return patched, nil
|
||||
}
|
||||
|
||||
func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) {
|
||||
hasher := plumbing.NewHasher(t, int64(len(data)))
|
||||
if _, err := hasher.Write(data); err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
return hasher.Sum(), nil
|
||||
}
|
||||
|
||||
type objectInfo struct {
|
||||
Offset int64
|
||||
Length int64
|
||||
Type plumbing.ObjectType
|
||||
DiskType plumbing.ObjectType
|
||||
ExternalRef bool // indicates this is an external reference in a thin pack file
|
||||
|
||||
Crc32 uint32
|
||||
|
||||
Parent *objectInfo
|
||||
Children []*objectInfo
|
||||
SHA1 plumbing.Hash
|
||||
}
|
||||
|
||||
func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo {
|
||||
return newDeltaObject(offset, length, t, nil)
|
||||
}
|
||||
|
||||
func newDeltaObject(
|
||||
offset, length int64,
|
||||
t plumbing.ObjectType,
|
||||
parent *objectInfo,
|
||||
) *objectInfo {
|
||||
obj := &objectInfo{
|
||||
Offset: offset,
|
||||
Length: length,
|
||||
Type: t,
|
||||
DiskType: t,
|
||||
Crc32: 0,
|
||||
Parent: parent,
|
||||
}
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func (o *objectInfo) IsDelta() bool {
|
||||
return o.Type.IsDelta()
|
||||
}
|
||||
|
||||
func (o *objectInfo) Size() int64 {
|
||||
return o.Length
|
||||
}
|
229
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/patch_delta.go
generated
vendored
Normal file
229
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/patch_delta.go
generated
vendored
Normal file
@ -0,0 +1,229 @@
|
||||
package packfile
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
)
|
||||
|
||||
// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
|
||||
// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c,
|
||||
// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js
|
||||
// for details about the delta format.
|
||||
|
||||
const deltaSizeMin = 4
|
||||
|
||||
// ApplyDelta writes to target the result of applying the modification deltas in delta to base.
|
||||
func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) error {
|
||||
r, err := base.Reader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w, err := target.Writer()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
src, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dst, err := PatchDelta(src, delta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
target.SetSize(int64(len(dst)))
|
||||
|
||||
_, err = w.Write(dst)
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidDelta = errors.New("invalid delta")
|
||||
ErrDeltaCmd = errors.New("wrong delta command")
|
||||
)
|
||||
|
||||
// PatchDelta returns the result of applying the modification deltas in delta to src.
|
||||
// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command
|
||||
// is not copy from source or copy from delta (ErrDeltaCmd).
|
||||
func PatchDelta(src, delta []byte) ([]byte, error) {
|
||||
if len(delta) < deltaSizeMin {
|
||||
return nil, ErrInvalidDelta
|
||||
}
|
||||
|
||||
srcSz, delta := decodeLEB128(delta)
|
||||
if srcSz != uint(len(src)) {
|
||||
return nil, ErrInvalidDelta
|
||||
}
|
||||
|
||||
targetSz, delta := decodeLEB128(delta)
|
||||
remainingTargetSz := targetSz
|
||||
|
||||
var cmd byte
|
||||
dest := make([]byte, 0, targetSz)
|
||||
for {
|
||||
if len(delta) == 0 {
|
||||
return nil, ErrInvalidDelta
|
||||
}
|
||||
|
||||
cmd = delta[0]
|
||||
delta = delta[1:]
|
||||
if isCopyFromSrc(cmd) {
|
||||
var offset, sz uint
|
||||
var err error
|
||||
offset, delta, err = decodeOffset(cmd, delta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sz, delta, err = decodeSize(cmd, delta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if invalidSize(sz, targetSz) ||
|
||||
invalidOffsetSize(offset, sz, srcSz) {
|
||||
break
|
||||
}
|
||||
dest = append(dest, src[offset:offset+sz]...)
|
||||
remainingTargetSz -= sz
|
||||
} else if isCopyFromDelta(cmd) {
|
||||
sz := uint(cmd) // cmd is the size itself
|
||||
if invalidSize(sz, targetSz) {
|
||||
return nil, ErrInvalidDelta
|
||||
}
|
||||
|
||||
if uint(len(delta)) < sz {
|
||||
return nil, ErrInvalidDelta
|
||||
}
|
||||
|
||||
dest = append(dest, delta[0:sz]...)
|
||||
remainingTargetSz -= sz
|
||||
delta = delta[sz:]
|
||||
} else {
|
||||
return nil, ErrDeltaCmd
|
||||
}
|
||||
|
||||
if remainingTargetSz <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return dest, nil
|
||||
}
|
||||
|
||||
// Decodes a number encoded as an unsigned LEB128 at the start of some
|
||||
// binary data and returns the decoded number and the rest of the
|
||||
// stream.
|
||||
//
|
||||
// This must be called twice on the delta data buffer, first to get the
|
||||
// expected source buffer size, and again to get the target buffer size.
|
||||
func decodeLEB128(input []byte) (uint, []byte) {
|
||||
var num, sz uint
|
||||
var b byte
|
||||
for {
|
||||
b = input[sz]
|
||||
num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
|
||||
sz++
|
||||
|
||||
if uint(b)&continuation == 0 || sz == uint(len(input)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return num, input[sz:]
|
||||
}
|
||||
|
||||
const (
|
||||
payload = 0x7f // 0111 1111
|
||||
continuation = 0x80 // 1000 0000
|
||||
)
|
||||
|
||||
func isCopyFromSrc(cmd byte) bool {
|
||||
return (cmd & 0x80) != 0
|
||||
}
|
||||
|
||||
func isCopyFromDelta(cmd byte) bool {
|
||||
return (cmd&0x80) == 0 && cmd != 0
|
||||
}
|
||||
|
||||
func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
|
||||
var offset uint
|
||||
if (cmd & 0x01) != 0 {
|
||||
if len(delta) == 0 {
|
||||
return 0, nil, ErrInvalidDelta
|
||||
}
|
||||
offset = uint(delta[0])
|
||||
delta = delta[1:]
|
||||
}
|
||||
if (cmd & 0x02) != 0 {
|
||||
if len(delta) == 0 {
|
||||
return 0, nil, ErrInvalidDelta
|
||||
}
|
||||
offset |= uint(delta[0]) << 8
|
||||
delta = delta[1:]
|
||||
}
|
||||
if (cmd & 0x04) != 0 {
|
||||
if len(delta) == 0 {
|
||||
return 0, nil, ErrInvalidDelta
|
||||
}
|
||||
offset |= uint(delta[0]) << 16
|
||||
delta = delta[1:]
|
||||
}
|
||||
if (cmd & 0x08) != 0 {
|
||||
if len(delta) == 0 {
|
||||
return 0, nil, ErrInvalidDelta
|
||||
}
|
||||
offset |= uint(delta[0]) << 24
|
||||
delta = delta[1:]
|
||||
}
|
||||
|
||||
return offset, delta, nil
|
||||
}
|
||||
|
||||
func decodeSize(cmd byte, delta []byte) (uint, []byte, error) {
|
||||
var sz uint
|
||||
if (cmd & 0x10) != 0 {
|
||||
if len(delta) == 0 {
|
||||
return 0, nil, ErrInvalidDelta
|
||||
}
|
||||
sz = uint(delta[0])
|
||||
delta = delta[1:]
|
||||
}
|
||||
if (cmd & 0x20) != 0 {
|
||||
if len(delta) == 0 {
|
||||
return 0, nil, ErrInvalidDelta
|
||||
}
|
||||
sz |= uint(delta[0]) << 8
|
||||
delta = delta[1:]
|
||||
}
|
||||
if (cmd & 0x40) != 0 {
|
||||
if len(delta) == 0 {
|
||||
return 0, nil, ErrInvalidDelta
|
||||
}
|
||||
sz |= uint(delta[0]) << 16
|
||||
delta = delta[1:]
|
||||
}
|
||||
if sz == 0 {
|
||||
sz = 0x10000
|
||||
}
|
||||
|
||||
return sz, delta, nil
|
||||
}
|
||||
|
||||
func invalidSize(sz, targetSz uint) bool {
|
||||
return sz > targetSz
|
||||
}
|
||||
|
||||
func invalidOffsetSize(offset, sz, srcSz uint) bool {
|
||||
return sumOverflows(offset, sz) ||
|
||||
offset+sz > srcSz
|
||||
}
|
||||
|
||||
func sumOverflows(a, b uint) bool {
|
||||
return a+b < a
|
||||
}
|
487
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go
generated
vendored
Normal file
487
vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go
generated
vendored
Normal file
@ -0,0 +1,487 @@
|
||||
package packfile
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
stdioutil "io/ioutil"
|
||||
"sync"
|
||||
|
||||
"gopkg.in/src-d/go-git.v4/plumbing"
|
||||
"gopkg.in/src-d/go-git.v4/utils/binary"
|
||||
"gopkg.in/src-d/go-git.v4/utils/ioutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile
|
||||
ErrEmptyPackfile = NewError("empty packfile")
|
||||
// ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect.
|
||||
ErrBadSignature = NewError("malformed pack file signature")
|
||||
// ErrUnsupportedVersion is returned by ReadHeader when the packfile version is
|
||||
// different than VersionSupported.
|
||||
ErrUnsupportedVersion = NewError("unsupported packfile version")
|
||||
// ErrSeekNotSupported returned if seek is not support
|
||||
ErrSeekNotSupported = NewError("not seek support")
|
||||
)
|
||||
|
||||
// ObjectHeader contains the information related to the object, this information
|
||||
// is collected from the previous bytes to the content of the object.
|
||||
type ObjectHeader struct {
|
||||
Type plumbing.ObjectType
|
||||
Offset int64
|
||||
Length int64
|
||||
Reference plumbing.Hash
|
||||
OffsetReference int64
|
||||
}
|
||||
|
||||
type Scanner struct {
|
||||
r reader
|
||||
zr readerResetter
|
||||
crc hash.Hash32
|
||||
|
||||
// pendingObject is used to detect if an object has been read, or still
|
||||
// is waiting to be read
|
||||
pendingObject *ObjectHeader
|
||||
version, objects uint32
|
||||
|
||||
// lsSeekable says if this scanner can do Seek or not, to have a Scanner
|
||||
// seekable a r implementing io.Seeker is required
|
||||
IsSeekable bool
|
||||
}
|
||||
|
||||
// NewScanner returns a new Scanner based on a reader, if the given reader
|
||||
// implements io.ReadSeeker the Scanner will be also Seekable
|
||||
func NewScanner(r io.Reader) *Scanner {
|
||||
seeker, ok := r.(io.ReadSeeker)
|
||||
if !ok {
|
||||
seeker = &trackableReader{Reader: r}
|
||||
}
|
||||
|
||||
crc := crc32.NewIEEE()
|
||||
return &Scanner{
|
||||
r: newTeeReader(newByteReadSeeker(seeker), crc),
|
||||
crc: crc,
|
||||
IsSeekable: ok,
|
||||
}
|
||||
}
|
||||
|
||||
// Header reads the whole packfile header (signature, version and object count).
|
||||
// It returns the version and the object count and performs checks on the
|
||||
// validity of the signature and the version fields.
|
||||
func (s *Scanner) Header() (version, objects uint32, err error) {
|
||||
if s.version != 0 {
|
||||
return s.version, s.objects, nil
|
||||
}
|
||||
|
||||
sig, err := s.readSignature()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = ErrEmptyPackfile
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if !s.isValidSignature(sig) {
|
||||
err = ErrBadSignature
|
||||
return
|
||||
}
|
||||
|
||||
version, err = s.readVersion()
|
||||
s.version = version
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !s.isSupportedVersion(version) {
|
||||
err = ErrUnsupportedVersion.AddDetails("%d", version)
|
||||
return
|
||||
}
|
||||
|
||||
objects, err = s.readCount()
|
||||
s.objects = objects
|
||||
return
|
||||
}
|
||||
|
||||
// readSignature reads an returns the signature field in the packfile.
|
||||
func (s *Scanner) readSignature() ([]byte, error) {
|
||||
var sig = make([]byte, 4)
|
||||
if _, err := io.ReadFull(s.r, sig); err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
return sig, nil
|
||||
}
|
||||
|
||||
// isValidSignature returns if sig is a valid packfile signature.
|
||||
func (s *Scanner) isValidSignature(sig []byte) bool {
|
||||
return bytes.Equal(sig, signature)
|
||||
}
|
||||
|
||||
// readVersion reads and returns the version field of a packfile.
|
||||
func (s *Scanner) readVersion() (uint32, error) {
|
||||
return binary.ReadUint32(s.r)
|
||||
}
|
||||
|
||||
// isSupportedVersion returns whether version v is supported by the parser.
|
||||
// The current supported version is VersionSupported, defined above.
|
||||
func (s *Scanner) isSupportedVersion(v uint32) bool {
|
||||
return v == VersionSupported
|
||||
}
|
||||
|
||||
// readCount reads and returns the count of objects field of a packfile.
|
||||
func (s *Scanner) readCount() (uint32, error) {
|
||||
return binary.ReadUint32(s.r)
|
||||
}
|
||||
|
||||
// SeekObjectHeader seeks to specified offset and returns the ObjectHeader
|
||||
// for the next object in the reader
|
||||
func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) {
|
||||
// if seeking we assume that you are not interested in the header
|
||||
if s.version == 0 {
|
||||
s.version = VersionSupported
|
||||
}
|
||||
|
||||
if _, err := s.r.Seek(offset, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h, err := s.nextObjectHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h.Offset = offset
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// NextObjectHeader returns the ObjectHeader for the next object in the reader
|
||||
func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
|
||||
if err := s.doPending(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
offset, err := s.r.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h, err := s.nextObjectHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h.Offset = offset
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// nextObjectHeader returns the ObjectHeader for the next object in the reader
|
||||
// without the Offset field
|
||||
func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
|
||||
defer s.Flush()
|
||||
|
||||
s.crc.Reset()
|
||||
|
||||
h := &ObjectHeader{}
|
||||
s.pendingObject = h
|
||||
|
||||
var err error
|
||||
h.Offset, err = s.r.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h.Type, h.Length, err = s.readObjectTypeAndLength()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch h.Type {
|
||||
case plumbing.OFSDeltaObject:
|
||||
no, err := binary.ReadVariableWidthInt(s.r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h.OffsetReference = h.Offset - no
|
||||
case plumbing.REFDeltaObject:
|
||||
var err error
|
||||
h.Reference, err = binary.ReadHash(s.r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (s *Scanner) doPending() error {
|
||||
if s.version == 0 {
|
||||
var err error
|
||||
s.version, s.objects, err = s.Header()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return s.discardObjectIfNeeded()
|
||||
}
|
||||
|
||||
func (s *Scanner) discardObjectIfNeeded() error {
|
||||
if s.pendingObject == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
h := s.pendingObject
|
||||
n, _, err := s.NextObject(stdioutil.Discard)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != h.Length {
|
||||
return fmt.Errorf(
|
||||
"error discarding object, discarded %d, expected %d",
|
||||
n, h.Length,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadObjectTypeAndLength reads and returns the object type and the
|
||||
// length field from an object entry in a packfile.
|
||||
func (s *Scanner) readObjectTypeAndLength() (plumbing.ObjectType, int64, error) {
|
||||
t, c, err := s.readType()
|
||||
if err != nil {
|
||||
return t, 0, err
|
||||
}
|
||||
|
||||
l, err := s.readLength(c)
|
||||
|
||||
return t, l, err
|
||||
}
|
||||
|
||||
func (s *Scanner) readType() (plumbing.ObjectType, byte, error) {
|
||||
var c byte
|
||||
var err error
|
||||
if c, err = s.r.ReadByte(); err != nil {
|
||||
return plumbing.ObjectType(0), 0, err
|
||||
}
|
||||
|
||||
typ := parseType(c)
|
||||
|
||||
return typ, c, nil
|
||||
}
|
||||
|
||||
func parseType(b byte) plumbing.ObjectType {
|
||||
return plumbing.ObjectType((b & maskType) >> firstLengthBits)
|
||||
}
|
||||
|
||||
// the length is codified in the last 4 bits of the first byte and in
|
||||
// the last 7 bits of subsequent bytes. Last byte has a 0 MSB.
|
||||
func (s *Scanner) readLength(first byte) (int64, error) {
|
||||
length := int64(first & maskFirstLength)
|
||||
|
||||
c := first
|
||||
shift := firstLengthBits
|
||||
var err error
|
||||
for c&maskContinue > 0 {
|
||||
if c, err = s.r.ReadByte(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
length += int64(c&maskLength) << shift
|
||||
shift += lengthBits
|
||||
}
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
// NextObject writes the content of the next object into the reader, returns
|
||||
// the number of bytes written, the CRC32 of the content and an error, if any
|
||||
func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) {
|
||||
defer s.crc.Reset()
|
||||
|
||||
s.pendingObject = nil
|
||||
written, err = s.copyObject(w)
|
||||
s.Flush()
|
||||
crc32 = s.crc.Sum32()
|
||||
return
|
||||
}
|
||||
|
||||
// ReadRegularObject reads and write a non-deltified object
|
||||
// from it zlib stream in an object entry in the packfile.
|
||||
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
|
||||
if s.zr == nil {
|
||||
var zr io.ReadCloser
|
||||
zr, err = zlib.NewReader(s.r)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("zlib initialization error: %s", err)
|
||||
}
|
||||
|
||||
s.zr = zr.(readerResetter)
|
||||
} else {
|
||||
if err = s.zr.Reset(s.r, nil); err != nil {
|
||||
return 0, fmt.Errorf("zlib reset error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
defer ioutil.CheckClose(s.zr, &err)
|
||||
buf := byteSlicePool.Get().([]byte)
|
||||
n, err = io.CopyBuffer(w, s.zr, buf)
|
||||
byteSlicePool.Put(buf)
|
||||
return
|
||||
}
|
||||
|
||||
var byteSlicePool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return make([]byte, 32*1024)
|
||||
},
|
||||
}
|
||||
|
||||
// SeekFromStart sets a new offset from start, returns the old position before
|
||||
// the change.
|
||||
func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
|
||||
// if seeking we assume that you are not interested in the header
|
||||
if s.version == 0 {
|
||||
s.version = VersionSupported
|
||||
}
|
||||
|
||||
previous, err = s.r.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
_, err = s.r.Seek(offset, io.SeekStart)
|
||||
return previous, err
|
||||
}
|
||||
|
||||
// Checksum returns the checksum of the packfile
|
||||
func (s *Scanner) Checksum() (plumbing.Hash, error) {
|
||||
err := s.discardObjectIfNeeded()
|
||||
if err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
return binary.ReadHash(s.r)
|
||||
}
|
||||
|
||||
// Close reads the reader until io.EOF
|
||||
func (s *Scanner) Close() error {
|
||||
buf := byteSlicePool.Get().([]byte)
|
||||
_, err := io.CopyBuffer(stdioutil.Discard, s.r, buf)
|
||||
byteSlicePool.Put(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush finishes writing the buffer to crc hasher in case we are using
|
||||
// a teeReader. Otherwise it is a no-op.
|
||||
func (s *Scanner) Flush() error {
|
||||
tee, ok := s.r.(*teeReader)
|
||||
if ok {
|
||||
return tee.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type trackableReader struct {
|
||||
count int64
|
||||
io.Reader
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p.
|
||||
func (r *trackableReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.Reader.Read(p)
|
||||
r.count += int64(n)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Seek only supports io.SeekCurrent, any other operation fails
|
||||
func (r *trackableReader) Seek(offset int64, whence int) (int64, error) {
|
||||
if whence != io.SeekCurrent {
|
||||
return -1, ErrSeekNotSupported
|
||||
}
|
||||
|
||||
return r.count, nil
|
||||
}
|
||||
|
||||
func newByteReadSeeker(r io.ReadSeeker) *bufferedSeeker {
|
||||
return &bufferedSeeker{
|
||||
r: r,
|
||||
Reader: *bufio.NewReader(r),
|
||||
}
|
||||
}
|
||||
|
||||
type bufferedSeeker struct {
|
||||
r io.ReadSeeker
|
||||
bufio.Reader
|
||||
}
|
||||
|
||||
func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
if whence == io.SeekCurrent && offset == 0 {
|
||||
current, err := r.r.Seek(offset, whence)
|
||||
if err != nil {
|
||||
return current, err
|
||||
}
|
||||
|
||||
return current - int64(r.Buffered()), nil
|
||||
}
|
||||
|
||||
defer r.Reader.Reset(r.r)
|
||||
return r.r.Seek(offset, whence)
|
||||
}
|
||||
|
||||
type readerResetter interface {
|
||||
io.ReadCloser
|
||||
zlib.Resetter
|
||||
}
|
||||
|
||||
type reader interface {
|
||||
io.Reader
|
||||
io.ByteReader
|
||||
io.Seeker
|
||||
}
|
||||
|
||||
type teeReader struct {
|
||||
reader
|
||||
w hash.Hash32
|
||||
bufWriter *bufio.Writer
|
||||
}
|
||||
|
||||
func newTeeReader(r reader, h hash.Hash32) *teeReader {
|
||||
return &teeReader{
|
||||
reader: r,
|
||||
w: h,
|
||||
bufWriter: bufio.NewWriter(h),
|
||||
}
|
||||
}
|
||||
|
||||
func (r *teeReader) Read(p []byte) (n int, err error) {
|
||||
r.Flush()
|
||||
|
||||
n, err = r.reader.Read(p)
|
||||
if n > 0 {
|
||||
if n, err := r.w.Write(p[:n]); err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *teeReader) ReadByte() (b byte, err error) {
|
||||
b, err = r.reader.ReadByte()
|
||||
if err == nil {
|
||||
return b, r.bufWriter.WriteByte(b)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *teeReader) Flush() (err error) {
|
||||
return r.bufWriter.Flush()
|
||||
}
|
Reference in New Issue
Block a user