feat(dir2config): defaults

This commit is contained in:
Mikaël Cluseau
2019-02-28 19:27:09 +11:00
parent d2b212ae6b
commit ea6fce68e1
383 changed files with 74236 additions and 41 deletions

View File

@ -0,0 +1,61 @@
package filesystem
import (
stdioutil "io/ioutil"
"os"
"gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
type ConfigStorage struct {
dir *dotgit.DotGit
}
func (c *ConfigStorage) Config() (conf *config.Config, err error) {
cfg := config.NewConfig()
f, err := c.dir.Config()
if err != nil {
if os.IsNotExist(err) {
return cfg, nil
}
return nil, err
}
defer ioutil.CheckClose(f, &err)
b, err := stdioutil.ReadAll(f)
if err != nil {
return nil, err
}
if err = cfg.Unmarshal(b); err != nil {
return nil, err
}
return cfg, err
}
func (c *ConfigStorage) SetConfig(cfg *config.Config) (err error) {
if err = cfg.Validate(); err != nil {
return err
}
f, err := c.dir.ConfigWriter()
if err != nil {
return err
}
defer ioutil.CheckClose(f, &err)
b, err := cfg.Marshal()
if err != nil {
return err
}
_, err = f.Write(b)
return err
}

View File

@ -0,0 +1,37 @@
package filesystem
import (
"gopkg.in/src-d/go-git.v4/plumbing"
)
type deltaObject struct {
plumbing.EncodedObject
base plumbing.Hash
hash plumbing.Hash
size int64
}
func newDeltaObject(
obj plumbing.EncodedObject,
hash plumbing.Hash,
base plumbing.Hash,
size int64) plumbing.DeltaObject {
return &deltaObject{
EncodedObject: obj,
hash: hash,
base: base,
size: size,
}
}
func (o *deltaObject) BaseHash() plumbing.Hash {
return o.base
}
func (o *deltaObject) ActualSize() int64 {
return o.size
}
func (o *deltaObject) ActualHash() plumbing.Hash {
return o.hash
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,81 @@
package dotgit
import (
"io"
"os"
"runtime"
"gopkg.in/src-d/go-billy.v4"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
func (d *DotGit) openAndLockPackedRefsMode() int {
if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
return os.O_RDWR
}
return os.O_RDONLY
}
func (d *DotGit) rewritePackedRefsWhileLocked(
tmp billy.File, pr billy.File) error {
// Try plain rename. If we aren't using the bare Windows filesystem as the
// storage layer, we might be able to get away with a rename over a locked
// file.
err := d.fs.Rename(tmp.Name(), pr.Name())
if err == nil {
return nil
}
// If we are in a filesystem that does not support rename (e.g. sivafs)
// a full copy is done.
if err == billy.ErrNotSupported {
return d.copyNewFile(tmp, pr)
}
if runtime.GOOS != "windows" {
return err
}
// Otherwise, Windows doesn't let us rename over a locked file, so
// we have to do a straight copy. Unfortunately this could result
// in a partially-written file if the process fails before the
// copy completes.
return d.copyToExistingFile(tmp, pr)
}
func (d *DotGit) copyToExistingFile(tmp, pr billy.File) error {
_, err := pr.Seek(0, io.SeekStart)
if err != nil {
return err
}
err = pr.Truncate(0)
if err != nil {
return err
}
_, err = tmp.Seek(0, io.SeekStart)
if err != nil {
return err
}
_, err = io.Copy(pr, tmp)
return err
}
func (d *DotGit) copyNewFile(tmp billy.File, pr billy.File) (err error) {
prWrite, err := d.fs.Create(pr.Name())
if err != nil {
return err
}
defer ioutil.CheckClose(prWrite, &err)
_, err = tmp.Seek(0, io.SeekStart)
if err != nil {
return err
}
_, err = io.Copy(prWrite, tmp)
return err
}

View File

@ -0,0 +1,90 @@
package dotgit
import (
"fmt"
"os"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
"gopkg.in/src-d/go-billy.v4"
)
func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) {
if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
return d.setRefRwfs(fileName, content, old)
}
return d.setRefNorwfs(fileName, content, old)
}
func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) {
// If we are not checking an old ref, just truncate the file.
mode := os.O_RDWR | os.O_CREATE
if old == nil {
mode |= os.O_TRUNC
}
f, err := d.fs.OpenFile(fileName, mode, 0666)
if err != nil {
return err
}
defer ioutil.CheckClose(f, &err)
// Lock is unlocked by the deferred Close above. This is because Unlock
// does not imply a fsync and thus there would be a race between
// Unlock+Close and other concurrent writers. Adding Sync to go-billy
// could work, but this is better (and avoids superfluous syncs).
err = f.Lock()
if err != nil {
return err
}
// this is a no-op to call even when old is nil.
err = d.checkReferenceAndTruncate(f, old)
if err != nil {
return err
}
_, err = f.Write([]byte(content))
return err
}
// There are some filesystems that don't support opening files in RDWD mode.
// In these filesystems the standard SetRef function can not be used as it
// reads the reference file to check that it's not modified before updating it.
//
// This version of the function writes the reference without extra checks
// making it compatible with these simple filesystems. This is usually not
// a problem as they should be accessed by only one process at a time.
func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error {
_, err := d.fs.Stat(fileName)
if err == nil && old != nil {
fRead, err := d.fs.Open(fileName)
if err != nil {
return err
}
ref, err := d.readReferenceFrom(fRead, old.Name().String())
fRead.Close()
if err != nil {
return err
}
if ref.Hash() != old.Hash() {
return fmt.Errorf("reference has changed concurrently")
}
}
f, err := d.fs.Create(fileName)
if err != nil {
return err
}
defer f.Close()
_, err = f.Write([]byte(content))
return err
}

View File

@ -0,0 +1,284 @@
package dotgit
import (
"fmt"
"io"
"sync/atomic"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
"gopkg.in/src-d/go-git.v4/plumbing/format/objfile"
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
"gopkg.in/src-d/go-billy.v4"
)
// PackWriter is a io.Writer that generates the packfile index simultaneously,
// a packfile.Decoder is used with a file reader to read the file being written
// this operation is synchronized with the write operations.
// The packfile is written in a temp file, when Close is called this file
// is renamed/moved (depends on the Filesystem implementation) to the final
// location, if the PackWriter is not used, nothing is written
type PackWriter struct {
Notify func(plumbing.Hash, *idxfile.Writer)
fs billy.Filesystem
fr, fw billy.File
synced *syncedReader
checksum plumbing.Hash
parser *packfile.Parser
writer *idxfile.Writer
result chan error
}
func newPackWrite(fs billy.Filesystem) (*PackWriter, error) {
fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_")
if err != nil {
return nil, err
}
fr, err := fs.Open(fw.Name())
if err != nil {
return nil, err
}
writer := &PackWriter{
fs: fs,
fw: fw,
fr: fr,
synced: newSyncedReader(fw, fr),
result: make(chan error),
}
go writer.buildIndex()
return writer, nil
}
func (w *PackWriter) buildIndex() {
s := packfile.NewScanner(w.synced)
w.writer = new(idxfile.Writer)
var err error
w.parser, err = packfile.NewParser(s, w.writer)
if err != nil {
w.result <- err
return
}
checksum, err := w.parser.Parse()
if err != nil {
w.result <- err
return
}
w.checksum = checksum
w.result <- err
}
// waitBuildIndex waits until buildIndex function finishes, this can terminate
// with a packfile.ErrEmptyPackfile, this means that nothing was written so we
// ignore the error
func (w *PackWriter) waitBuildIndex() error {
err := <-w.result
if err == packfile.ErrEmptyPackfile {
return nil
}
return err
}
func (w *PackWriter) Write(p []byte) (int, error) {
return w.synced.Write(p)
}
// Close closes all the file descriptors and save the final packfile, if nothing
// was written, the tempfiles are deleted without writing a packfile.
func (w *PackWriter) Close() error {
defer func() {
if w.Notify != nil && w.writer != nil && w.writer.Finished() {
w.Notify(w.checksum, w.writer)
}
close(w.result)
}()
if err := w.synced.Close(); err != nil {
return err
}
if err := w.waitBuildIndex(); err != nil {
return err
}
if err := w.fr.Close(); err != nil {
return err
}
if err := w.fw.Close(); err != nil {
return err
}
if w.writer == nil || !w.writer.Finished() {
return w.clean()
}
return w.save()
}
func (w *PackWriter) clean() error {
return w.fs.Remove(w.fw.Name())
}
func (w *PackWriter) save() error {
base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum))
idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base))
if err != nil {
return err
}
if err := w.encodeIdx(idx); err != nil {
return err
}
if err := idx.Close(); err != nil {
return err
}
return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base))
}
func (w *PackWriter) encodeIdx(writer io.Writer) error {
idx, err := w.writer.Index()
if err != nil {
return err
}
e := idxfile.NewEncoder(writer)
_, err = e.Encode(idx)
return err
}
type syncedReader struct {
w io.Writer
r io.ReadSeeker
blocked, done uint32
written, read uint64
news chan bool
}
func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader {
return &syncedReader{
w: w,
r: r,
news: make(chan bool),
}
}
func (s *syncedReader) Write(p []byte) (n int, err error) {
defer func() {
written := atomic.AddUint64(&s.written, uint64(n))
read := atomic.LoadUint64(&s.read)
if written > read {
s.wake()
}
}()
n, err = s.w.Write(p)
return
}
func (s *syncedReader) Read(p []byte) (n int, err error) {
defer func() { atomic.AddUint64(&s.read, uint64(n)) }()
for {
s.sleep()
n, err = s.r.Read(p)
if err == io.EOF && !s.isDone() && n == 0 {
continue
}
break
}
return
}
func (s *syncedReader) isDone() bool {
return atomic.LoadUint32(&s.done) == 1
}
func (s *syncedReader) isBlocked() bool {
return atomic.LoadUint32(&s.blocked) == 1
}
func (s *syncedReader) wake() {
if s.isBlocked() {
atomic.StoreUint32(&s.blocked, 0)
s.news <- true
}
}
func (s *syncedReader) sleep() {
read := atomic.LoadUint64(&s.read)
written := atomic.LoadUint64(&s.written)
if read >= written {
atomic.StoreUint32(&s.blocked, 1)
<-s.news
}
}
func (s *syncedReader) Seek(offset int64, whence int) (int64, error) {
if whence == io.SeekCurrent {
return s.r.Seek(offset, whence)
}
p, err := s.r.Seek(offset, whence)
atomic.StoreUint64(&s.read, uint64(p))
return p, err
}
func (s *syncedReader) Close() error {
atomic.StoreUint32(&s.done, 1)
close(s.news)
return nil
}
type ObjectWriter struct {
objfile.Writer
fs billy.Filesystem
f billy.File
}
func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) {
f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_")
if err != nil {
return nil, err
}
return &ObjectWriter{
Writer: (*objfile.NewWriter(f)),
fs: fs,
f: f,
}, nil
}
func (w *ObjectWriter) Close() error {
if err := w.Writer.Close(); err != nil {
return err
}
if err := w.f.Close(); err != nil {
return err
}
return w.save()
}
func (w *ObjectWriter) save() error {
hash := w.Hash().String()
file := w.fs.Join(objectsPath, hash[0:2], hash[2:40])
return w.fs.Rename(w.f.Name(), file)
}

View File

@ -0,0 +1,47 @@
package filesystem
import (
"os"
"gopkg.in/src-d/go-git.v4/plumbing/format/index"
"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
type IndexStorage struct {
dir *dotgit.DotGit
}
func (s *IndexStorage) SetIndex(idx *index.Index) (err error) {
f, err := s.dir.IndexWriter()
if err != nil {
return err
}
defer ioutil.CheckClose(f, &err)
e := index.NewEncoder(f)
err = e.Encode(idx)
return err
}
func (s *IndexStorage) Index() (i *index.Index, err error) {
idx := &index.Index{
Version: 2,
}
f, err := s.dir.Index()
if err != nil {
if os.IsNotExist(err) {
return idx, nil
}
return nil, err
}
defer ioutil.CheckClose(f, &err)
d := index.NewDecoder(f)
err = d.Decode(idx)
return idx, err
}

View File

@ -0,0 +1,20 @@
package filesystem
import (
"gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/storage"
"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
)
type ModuleStorage struct {
dir *dotgit.DotGit
}
func (s *ModuleStorage) Module(name string) (storage.Storer, error) {
fs, err := s.dir.Module(name)
if err != nil {
return nil, err
}
return NewStorage(fs, cache.NewObjectLRUDefault()), nil
}

View File

@ -0,0 +1,748 @@
package filesystem
import (
"io"
"os"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
"gopkg.in/src-d/go-git.v4/plumbing/format/objfile"
"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
"gopkg.in/src-d/go-billy.v4"
)
type ObjectStorage struct {
options Options
// objectCache is an object cache uses to cache delta's bases and also recently
// loaded loose objects
objectCache cache.Object
dir *dotgit.DotGit
index map[plumbing.Hash]idxfile.Index
}
// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage {
return NewObjectStorageWithOptions(dir, objectCache, Options{})
}
// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options
func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage {
return &ObjectStorage{
options: ops,
objectCache: objectCache,
dir: dir,
}
}
func (s *ObjectStorage) requireIndex() error {
if s.index != nil {
return nil
}
s.index = make(map[plumbing.Hash]idxfile.Index)
packs, err := s.dir.ObjectPacks()
if err != nil {
return err
}
for _, h := range packs {
if err := s.loadIdxFile(h); err != nil {
return err
}
}
return nil
}
// Reindex indexes again all packfiles. Useful if git changed packfiles externally
func (s *ObjectStorage) Reindex() {
s.index = nil
}
func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) {
f, err := s.dir.ObjectPackIdx(h)
if err != nil {
return err
}
defer ioutil.CheckClose(f, &err)
idxf := idxfile.NewMemoryIndex()
d := idxfile.NewDecoder(f)
if err = d.Decode(idxf); err != nil {
return err
}
s.index[h] = idxf
return err
}
func (s *ObjectStorage) NewEncodedObject() plumbing.EncodedObject {
return &plumbing.MemoryObject{}
}
func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) {
if err := s.requireIndex(); err != nil {
return nil, err
}
w, err := s.dir.NewObjectPack()
if err != nil {
return nil, err
}
w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) {
index, err := writer.Index()
if err == nil {
s.index[h] = index
}
}
return w, nil
}
// SetEncodedObject adds a new object to the storage.
func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.Hash, err error) {
if o.Type() == plumbing.OFSDeltaObject || o.Type() == plumbing.REFDeltaObject {
return plumbing.ZeroHash, plumbing.ErrInvalidType
}
ow, err := s.dir.NewObject()
if err != nil {
return plumbing.ZeroHash, err
}
defer ioutil.CheckClose(ow, &err)
or, err := o.Reader()
if err != nil {
return plumbing.ZeroHash, err
}
defer ioutil.CheckClose(or, &err)
if err = ow.WriteHeader(o.Type(), o.Size()); err != nil {
return plumbing.ZeroHash, err
}
if _, err = io.Copy(ow, or); err != nil {
return plumbing.ZeroHash, err
}
return o.Hash(), err
}
// HasEncodedObject returns nil if the object exists, without actually
// reading the object data from storage.
func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) {
// Check unpacked objects
f, err := s.dir.Object(h)
if err != nil {
if !os.IsNotExist(err) {
return err
}
// Fall through to check packed objects.
} else {
defer ioutil.CheckClose(f, &err)
return nil
}
// Check packed objects.
if err := s.requireIndex(); err != nil {
return err
}
_, _, offset := s.findObjectInPackfile(h)
if offset == -1 {
return plumbing.ErrObjectNotFound
}
return nil
}
func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) (
size int64, err error) {
f, err := s.dir.Object(h)
if err != nil {
if os.IsNotExist(err) {
return 0, plumbing.ErrObjectNotFound
}
return 0, err
}
r, err := objfile.NewReader(f)
if err != nil {
return 0, err
}
defer ioutil.CheckClose(r, &err)
_, size, err = r.Header()
return size, err
}
func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
size int64, err error) {
if err := s.requireIndex(); err != nil {
return 0, err
}
pack, _, offset := s.findObjectInPackfile(h)
if offset == -1 {
return 0, plumbing.ErrObjectNotFound
}
f, err := s.dir.ObjectPack(pack)
if err != nil {
return 0, err
}
defer ioutil.CheckClose(f, &err)
idx := s.index[pack]
hash, err := idx.FindHash(offset)
if err == nil {
obj, ok := s.objectCache.Get(hash)
if ok {
return obj.Size(), nil
}
} else if err != nil && err != plumbing.ErrObjectNotFound {
return 0, err
}
var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
}
return p.GetSizeByOffset(offset)
}
// EncodedObjectSize returns the plaintext size of the given object,
// without actually reading the full object data from storage.
func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (
size int64, err error) {
size, err = s.encodedObjectSizeFromUnpacked(h)
if err != nil && err != plumbing.ErrObjectNotFound {
return 0, err
} else if err == nil {
return size, nil
}
return s.encodedObjectSizeFromPackfile(h)
}
// EncodedObject returns the object with the given hash, by searching for it in
// the packfile and the git object directories.
func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
var obj plumbing.EncodedObject
var err error
if s.index != nil {
obj, err = s.getFromPackfile(h, false)
if err == plumbing.ErrObjectNotFound {
obj, err = s.getFromUnpacked(h)
}
} else {
obj, err = s.getFromUnpacked(h)
if err == plumbing.ErrObjectNotFound {
obj, err = s.getFromPackfile(h, false)
}
}
// If the error is still object not found, check if it's a shared object
// repository.
if err == plumbing.ErrObjectNotFound {
dotgits, e := s.dir.Alternates()
if e == nil {
// Create a new object storage with the DotGit(s) and check for the
// required hash object. Skip when not found.
for _, dg := range dotgits {
o := NewObjectStorage(dg, s.objectCache)
enobj, enerr := o.EncodedObject(t, h)
if enerr != nil {
continue
}
return enobj, nil
}
}
}
if err != nil {
return nil, err
}
if plumbing.AnyObject != t && obj.Type() != t {
return nil, plumbing.ErrObjectNotFound
}
return obj, nil
}
// DeltaObject returns the object with the given hash, by searching for
// it in the packfile and the git object directories.
func (s *ObjectStorage) DeltaObject(t plumbing.ObjectType,
h plumbing.Hash) (plumbing.EncodedObject, error) {
obj, err := s.getFromUnpacked(h)
if err == plumbing.ErrObjectNotFound {
obj, err = s.getFromPackfile(h, true)
}
if err != nil {
return nil, err
}
if plumbing.AnyObject != t && obj.Type() != t {
return nil, plumbing.ErrObjectNotFound
}
return obj, nil
}
func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedObject, err error) {
f, err := s.dir.Object(h)
if err != nil {
if os.IsNotExist(err) {
return nil, plumbing.ErrObjectNotFound
}
return nil, err
}
defer ioutil.CheckClose(f, &err)
if cacheObj, found := s.objectCache.Get(h); found {
return cacheObj, nil
}
obj = s.NewEncodedObject()
r, err := objfile.NewReader(f)
if err != nil {
return nil, err
}
defer ioutil.CheckClose(r, &err)
t, size, err := r.Header()
if err != nil {
return nil, err
}
obj.SetType(t)
obj.SetSize(size)
w, err := obj.Writer()
if err != nil {
return nil, err
}
s.objectCache.Put(obj)
_, err = io.Copy(w, r)
return obj, err
}
// Get returns the object with the given hash, by searching for it in
// the packfile.
func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
plumbing.EncodedObject, error) {
if err := s.requireIndex(); err != nil {
return nil, err
}
pack, hash, offset := s.findObjectInPackfile(h)
if offset == -1 {
return nil, plumbing.ErrObjectNotFound
}
f, err := s.dir.ObjectPack(pack)
if err != nil {
return nil, err
}
if !s.options.KeepDescriptors {
defer ioutil.CheckClose(f, &err)
}
idx := s.index[pack]
if canBeDelta {
return s.decodeDeltaObjectAt(f, idx, offset, hash)
}
return s.decodeObjectAt(f, idx, offset)
}
func (s *ObjectStorage) decodeObjectAt(
f billy.File,
idx idxfile.Index,
offset int64,
) (plumbing.EncodedObject, error) {
hash, err := idx.FindHash(offset)
if err == nil {
obj, ok := s.objectCache.Get(hash)
if ok {
return obj, nil
}
}
if err != nil && err != plumbing.ErrObjectNotFound {
return nil, err
}
var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
}
return p.GetByOffset(offset)
}
func (s *ObjectStorage) decodeDeltaObjectAt(
f billy.File,
idx idxfile.Index,
offset int64,
hash plumbing.Hash,
) (plumbing.EncodedObject, error) {
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, err
}
p := packfile.NewScanner(f)
header, err := p.SeekObjectHeader(offset)
if err != nil {
return nil, err
}
var (
base plumbing.Hash
)
switch header.Type {
case plumbing.REFDeltaObject:
base = header.Reference
case plumbing.OFSDeltaObject:
base, err = idx.FindHash(header.OffsetReference)
if err != nil {
return nil, err
}
default:
return s.decodeObjectAt(f, idx, offset)
}
obj := &plumbing.MemoryObject{}
obj.SetType(header.Type)
w, err := obj.Writer()
if err != nil {
return nil, err
}
if _, _, err := p.NextObject(w); err != nil {
return nil, err
}
return newDeltaObject(obj, hash, base, header.Length), nil
}
func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) {
for packfile, index := range s.index {
offset, err := index.FindOffset(h)
if err == nil {
return packfile, h, offset
}
}
return plumbing.ZeroHash, plumbing.ZeroHash, -1
}
// IterEncodedObjects returns an iterator for all the objects in the packfile
// with the given type.
func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) {
objects, err := s.dir.Objects()
if err != nil {
return nil, err
}
seen := make(map[plumbing.Hash]struct{})
var iters []storer.EncodedObjectIter
if len(objects) != 0 {
iters = append(iters, &objectsIter{s: s, t: t, h: objects})
seen = hashListAsMap(objects)
}
packi, err := s.buildPackfileIters(t, seen)
if err != nil {
return nil, err
}
iters = append(iters, packi)
return storer.NewMultiEncodedObjectIter(iters), nil
}
func (s *ObjectStorage) buildPackfileIters(
t plumbing.ObjectType,
seen map[plumbing.Hash]struct{},
) (storer.EncodedObjectIter, error) {
if err := s.requireIndex(); err != nil {
return nil, err
}
packs, err := s.dir.ObjectPacks()
if err != nil {
return nil, err
}
return &lazyPackfilesIter{
hashes: packs,
open: func(h plumbing.Hash) (storer.EncodedObjectIter, error) {
pack, err := s.dir.ObjectPack(h)
if err != nil {
return nil, err
}
return newPackfileIter(
s.dir.Fs(), pack, t, seen, s.index[h],
s.objectCache, s.options.KeepDescriptors,
)
},
}, nil
}
// Close closes all opened files.
func (s *ObjectStorage) Close() error {
return s.dir.Close()
}
type lazyPackfilesIter struct {
hashes []plumbing.Hash
open func(h plumbing.Hash) (storer.EncodedObjectIter, error)
cur storer.EncodedObjectIter
}
func (it *lazyPackfilesIter) Next() (plumbing.EncodedObject, error) {
for {
if it.cur == nil {
if len(it.hashes) == 0 {
return nil, io.EOF
}
h := it.hashes[0]
it.hashes = it.hashes[1:]
sub, err := it.open(h)
if err == io.EOF {
continue
} else if err != nil {
return nil, err
}
it.cur = sub
}
ob, err := it.cur.Next()
if err == io.EOF {
it.cur.Close()
it.cur = nil
continue
} else if err != nil {
return nil, err
}
return ob, nil
}
}
func (it *lazyPackfilesIter) ForEach(cb func(plumbing.EncodedObject) error) error {
return storer.ForEachIterator(it, cb)
}
func (it *lazyPackfilesIter) Close() {
if it.cur != nil {
it.cur.Close()
it.cur = nil
}
it.hashes = nil
}
type packfileIter struct {
pack billy.File
iter storer.EncodedObjectIter
seen map[plumbing.Hash]struct{}
// tells whether the pack file should be left open after iteration or not
keepPack bool
}
// NewPackfileIter returns a new EncodedObjectIter for the provided packfile
// and object type. Packfile and index file will be closed after they're
// used. If keepPack is true the packfile won't be closed after the iteration
// finished.
func NewPackfileIter(
fs billy.Filesystem,
f billy.File,
idxFile billy.File,
t plumbing.ObjectType,
keepPack bool,
) (storer.EncodedObjectIter, error) {
idx := idxfile.NewMemoryIndex()
if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil {
return nil, err
}
if err := idxFile.Close(); err != nil {
return nil, err
}
seen := make(map[plumbing.Hash]struct{})
return newPackfileIter(fs, f, t, seen, idx, nil, keepPack)
}
func newPackfileIter(
fs billy.Filesystem,
f billy.File,
t plumbing.ObjectType,
seen map[plumbing.Hash]struct{},
index idxfile.Index,
cache cache.Object,
keepPack bool,
) (storer.EncodedObjectIter, error) {
var p *packfile.Packfile
if cache != nil {
p = packfile.NewPackfileWithCache(index, fs, f, cache)
} else {
p = packfile.NewPackfile(index, fs, f)
}
iter, err := p.GetByType(t)
if err != nil {
return nil, err
}
return &packfileIter{
pack: f,
iter: iter,
seen: seen,
keepPack: keepPack,
}, nil
}
func (iter *packfileIter) Next() (plumbing.EncodedObject, error) {
for {
obj, err := iter.iter.Next()
if err != nil {
return nil, err
}
if _, ok := iter.seen[obj.Hash()]; ok {
continue
}
return obj, nil
}
}
func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error {
for {
o, err := iter.Next()
if err != nil {
if err == io.EOF {
iter.Close()
return nil
}
return err
}
if err := cb(o); err != nil {
return err
}
}
}
func (iter *packfileIter) Close() {
iter.iter.Close()
if !iter.keepPack {
_ = iter.pack.Close()
}
}
type objectsIter struct {
s *ObjectStorage
t plumbing.ObjectType
h []plumbing.Hash
}
func (iter *objectsIter) Next() (plumbing.EncodedObject, error) {
if len(iter.h) == 0 {
return nil, io.EOF
}
obj, err := iter.s.getFromUnpacked(iter.h[0])
iter.h = iter.h[1:]
if err != nil {
return nil, err
}
if iter.t != plumbing.AnyObject && iter.t != obj.Type() {
return iter.Next()
}
return obj, err
}
func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error {
for {
o, err := iter.Next()
if err != nil {
if err == io.EOF {
return nil
}
return err
}
if err := cb(o); err != nil {
return err
}
}
}
func (iter *objectsIter) Close() {
iter.h = []plumbing.Hash{}
}
func hashListAsMap(l []plumbing.Hash) map[plumbing.Hash]struct{} {
m := make(map[plumbing.Hash]struct{}, len(l))
for _, h := range l {
m[h] = struct{}{}
}
return m
}
func (s *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error {
err := s.dir.ForEachObjectHash(fun)
if err == storer.ErrStop {
return nil
}
return err
}
func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
fi, err := s.dir.ObjectStat(hash)
if err != nil {
return time.Time{}, err
}
return fi.ModTime(), nil
}
func (s *ObjectStorage) DeleteLooseObject(hash plumbing.Hash) error {
return s.dir.ObjectDelete(hash)
}
func (s *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) {
return s.dir.ObjectPacks()
}
func (s *ObjectStorage) DeleteOldObjectPackAndIndex(h plumbing.Hash, t time.Time) error {
return s.dir.DeleteOldObjectPackAndIndex(h, t)
}

View File

@ -0,0 +1,44 @@
package filesystem
import (
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
)
type ReferenceStorage struct {
dir *dotgit.DotGit
}
func (r *ReferenceStorage) SetReference(ref *plumbing.Reference) error {
return r.dir.SetRef(ref, nil)
}
func (r *ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error {
return r.dir.SetRef(ref, old)
}
func (r *ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) {
return r.dir.Ref(n)
}
func (r *ReferenceStorage) IterReferences() (storer.ReferenceIter, error) {
refs, err := r.dir.Refs()
if err != nil {
return nil, err
}
return storer.NewReferenceSliceIter(refs), nil
}
func (r *ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error {
return r.dir.RemoveRef(n)
}
func (r *ReferenceStorage) CountLooseRefs() (int, error) {
return r.dir.CountLooseRefs()
}
func (r *ReferenceStorage) PackRefs() error {
return r.dir.PackRefs()
}

View File

@ -0,0 +1,54 @@
package filesystem
import (
"bufio"
"fmt"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
// ShallowStorage where the shallow commits are stored, an internal to
// manipulate the shallow file
type ShallowStorage struct {
dir *dotgit.DotGit
}
// SetShallow save the shallows in the shallow file in the .git folder as one
// commit per line represented by 40-byte hexadecimal object terminated by a
// newline.
func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
f, err := s.dir.ShallowWriter()
if err != nil {
return err
}
defer ioutil.CheckClose(f, &err)
for _, h := range commits {
if _, err := fmt.Fprintf(f, "%s\n", h); err != nil {
return err
}
}
return err
}
// Shallow return the shallow commits reading from shallo file from .git
func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) {
f, err := s.dir.Shallow()
if f == nil || err != nil {
return nil, err
}
defer ioutil.CheckClose(f, &err)
var hash []plumbing.Hash
scn := bufio.NewScanner(f)
for scn.Scan() {
hash = append(hash, plumbing.NewHash(scn.Text()))
}
return hash, scn.Err()
}

View File

@ -0,0 +1,71 @@
// Package filesystem is a storage backend base on filesystems
package filesystem
import (
"gopkg.in/src-d/go-git.v4/plumbing/cache"
"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
"gopkg.in/src-d/go-billy.v4"
)
// Storage is an implementation of git.Storer that stores data on disk in the
// standard git format (this is, the .git directory). Zero values of this type
// are not safe to use, see the NewStorage function below.
type Storage struct {
fs billy.Filesystem
dir *dotgit.DotGit
ObjectStorage
ReferenceStorage
IndexStorage
ShallowStorage
ConfigStorage
ModuleStorage
}
// Options holds configuration for the storage.
type Options struct {
// ExclusiveAccess means that the filesystem is not modified externally
// while the repo is open.
ExclusiveAccess bool
// KeepDescriptors makes the file descriptors to be reused but they will
// need to be manually closed calling Close().
KeepDescriptors bool
}
// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
return NewStorageWithOptions(fs, cache, Options{})
}
// NewStorageWithOptions returns a new Storage with extra options,
// backed by a given `fs.Filesystem` and cache.
func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
dirOps := dotgit.Options{
ExclusiveAccess: ops.ExclusiveAccess,
KeepDescriptors: ops.KeepDescriptors,
}
dir := dotgit.NewWithOptions(fs, dirOps)
return &Storage{
fs: fs,
dir: dir,
ObjectStorage: *NewObjectStorageWithOptions(dir, cache, ops),
ReferenceStorage: ReferenceStorage{dir: dir},
IndexStorage: IndexStorage{dir: dir},
ShallowStorage: ShallowStorage{dir: dir},
ConfigStorage: ConfigStorage{dir: dir},
ModuleStorage: ModuleStorage{dir: dir},
}
}
// Filesystem returns the underlying filesystem
func (s *Storage) Filesystem() billy.Filesystem {
return s.fs
}
// Init initializes .git directory
func (s *Storage) Init() error {
return s.dir.Initialize()
}