feat(dir2config): defaults

This commit is contained in:
Mikaël Cluseau
2019-02-28 19:27:09 +11:00
parent d2b212ae6b
commit ea6fce68e1
383 changed files with 74236 additions and 41 deletions

View File

@ -0,0 +1,203 @@
package packp
import (
"fmt"
"sort"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
"gopkg.in/src-d/go-git.v4/plumbing/storer"
"gopkg.in/src-d/go-git.v4/storage/memory"
)
// AdvRefs values represent the information transmitted on an
// advertised-refs message. Values from this type are not zero-value
// safe, use the New function instead.
type AdvRefs struct {
// Prefix stores prefix payloads.
//
// When using this message over (smart) HTTP, you have to add a pktline
// before the whole thing with the following payload:
//
// '# service=$servicename" LF
//
// Moreover, some (all) git HTTP smart servers will send a flush-pkt
// just after the first pkt-line.
//
// To accommodate both situations, the Prefix field allow you to store
// any data you want to send before the actual pktlines. It will also
// be filled up with whatever is found on the line.
Prefix [][]byte
// Head stores the resolved HEAD reference if present.
// This can be present with git-upload-pack, not with git-receive-pack.
Head *plumbing.Hash
// Capabilities are the capabilities.
Capabilities *capability.List
// References are the hash references.
References map[string]plumbing.Hash
// Peeled are the peeled hash references.
Peeled map[string]plumbing.Hash
// Shallows are the shallow object ids.
Shallows []plumbing.Hash
}
// NewAdvRefs returns a pointer to a new AdvRefs value, ready to be used.
func NewAdvRefs() *AdvRefs {
return &AdvRefs{
Prefix: [][]byte{},
Capabilities: capability.NewList(),
References: make(map[string]plumbing.Hash),
Peeled: make(map[string]plumbing.Hash),
Shallows: []plumbing.Hash{},
}
}
func (a *AdvRefs) AddReference(r *plumbing.Reference) error {
switch r.Type() {
case plumbing.SymbolicReference:
v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String())
a.Capabilities.Add(capability.SymRef, v)
case plumbing.HashReference:
a.References[r.Name().String()] = r.Hash()
default:
return plumbing.ErrInvalidType
}
return nil
}
func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) {
s := memory.ReferenceStorage{}
if err := a.addRefs(s); err != nil {
return s, plumbing.NewUnexpectedError(err)
}
return s, nil
}
func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error {
for name, hash := range a.References {
ref := plumbing.NewReferenceFromStrings(name, hash.String())
if err := s.SetReference(ref); err != nil {
return err
}
}
if a.supportSymrefs() {
return a.addSymbolicRefs(s)
}
return a.resolveHead(s)
}
// If the server does not support symrefs capability,
// we need to guess the reference where HEAD is pointing to.
//
// Git versions prior to 1.8.4.3 has an special procedure to get
// the reference where is pointing to HEAD:
// - Check if a reference called master exists. If exists and it
// has the same hash as HEAD hash, we can say that HEAD is pointing to master
// - If master does not exists or does not have the same hash as HEAD,
// order references and check in that order if that reference has the same
// hash than HEAD. If yes, set HEAD pointing to that branch hash
// - If no reference is found, throw an error
func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error {
if a.Head == nil {
return nil
}
ref, err := s.Reference(plumbing.ReferenceName(plumbing.Master))
// check first if HEAD is pointing to master
if err == nil {
ok, err := a.createHeadIfCorrectReference(ref, s)
if err != nil {
return err
}
if ok {
return nil
}
}
if err != nil && err != plumbing.ErrReferenceNotFound {
return err
}
// From here we are trying to guess the branch that HEAD is pointing
refIter, err := s.IterReferences()
if err != nil {
return err
}
var refNames []string
err = refIter.ForEach(func(r *plumbing.Reference) error {
refNames = append(refNames, string(r.Name()))
return nil
})
if err != nil {
return err
}
sort.Strings(refNames)
var headSet bool
for _, refName := range refNames {
ref, err := s.Reference(plumbing.ReferenceName(refName))
if err != nil {
return err
}
ok, err := a.createHeadIfCorrectReference(ref, s)
if err != nil {
return err
}
if ok {
headSet = true
break
}
}
if !headSet {
return plumbing.ErrReferenceNotFound
}
return nil
}
func (a *AdvRefs) createHeadIfCorrectReference(
reference *plumbing.Reference,
s storer.ReferenceStorer) (bool, error) {
if reference.Hash() == *a.Head {
headRef := plumbing.NewSymbolicReference(plumbing.HEAD, reference.Name())
if err := s.SetReference(headRef); err != nil {
return false, err
}
return true, nil
}
return false, nil
}
func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error {
for _, symref := range a.Capabilities.Get(capability.SymRef) {
chunks := strings.Split(symref, ":")
if len(chunks) != 2 {
err := fmt.Errorf("bad number of `:` in symref value (%q)", symref)
return plumbing.NewUnexpectedError(err)
}
name := plumbing.ReferenceName(chunks[0])
target := plumbing.ReferenceName(chunks[1])
ref := plumbing.NewSymbolicReference(name, target)
if err := s.SetReference(ref); err != nil {
return nil
}
}
return nil
}
func (a *AdvRefs) supportSymrefs() bool {
return a.Capabilities.Supports(capability.SymRef)
}

View File

@ -0,0 +1,288 @@
package packp
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
// Decode reads the next advertised-refs message form its input and
// stores it in the AdvRefs.
func (a *AdvRefs) Decode(r io.Reader) error {
d := newAdvRefsDecoder(r)
return d.Decode(a)
}
type advRefsDecoder struct {
s *pktline.Scanner // a pkt-line scanner from the input stream
line []byte // current pkt-line contents, use parser.nextLine() to make it advance
nLine int // current pkt-line number for debugging, begins at 1
hash plumbing.Hash // last hash read
err error // sticky error, use the parser.error() method to fill this out
data *AdvRefs // parsed data is stored here
}
var (
// ErrEmptyAdvRefs is returned by Decode if it gets an empty advertised
// references message.
ErrEmptyAdvRefs = errors.New("empty advertised-ref message")
// ErrEmptyInput is returned by Decode if the input is empty.
ErrEmptyInput = errors.New("empty input")
)
func newAdvRefsDecoder(r io.Reader) *advRefsDecoder {
return &advRefsDecoder{
s: pktline.NewScanner(r),
}
}
func (d *advRefsDecoder) Decode(v *AdvRefs) error {
d.data = v
for state := decodePrefix; state != nil; {
state = state(d)
}
return d.err
}
type decoderStateFn func(*advRefsDecoder) decoderStateFn
// fills out the parser stiky error
func (d *advRefsDecoder) error(format string, a ...interface{}) {
msg := fmt.Sprintf(
"pkt-line %d: %s", d.nLine,
fmt.Sprintf(format, a...),
)
d.err = NewErrUnexpectedData(msg, d.line)
}
// Reads a new pkt-line from the scanner, makes its payload available as
// p.line and increments p.nLine. A successful invocation returns true,
// otherwise, false is returned and the sticky error is filled out
// accordingly. Trims eols at the end of the payloads.
func (d *advRefsDecoder) nextLine() bool {
d.nLine++
if !d.s.Scan() {
if d.err = d.s.Err(); d.err != nil {
return false
}
if d.nLine == 1 {
d.err = ErrEmptyInput
return false
}
d.error("EOF")
return false
}
d.line = d.s.Bytes()
d.line = bytes.TrimSuffix(d.line, eol)
return true
}
// The HTTP smart prefix is often followed by a flush-pkt.
func decodePrefix(d *advRefsDecoder) decoderStateFn {
if ok := d.nextLine(); !ok {
return nil
}
if !isPrefix(d.line) {
return decodeFirstHash
}
tmp := make([]byte, len(d.line))
copy(tmp, d.line)
d.data.Prefix = append(d.data.Prefix, tmp)
if ok := d.nextLine(); !ok {
return nil
}
if !isFlush(d.line) {
return decodeFirstHash
}
d.data.Prefix = append(d.data.Prefix, pktline.Flush)
if ok := d.nextLine(); !ok {
return nil
}
return decodeFirstHash
}
func isPrefix(payload []byte) bool {
return len(payload) > 0 && payload[0] == '#'
}
// If the first hash is zero, then a no-refs is coming. Otherwise, a
// list-of-refs is coming, and the hash will be followed by the first
// advertised ref.
func decodeFirstHash(p *advRefsDecoder) decoderStateFn {
// If the repository is empty, we receive a flush here (HTTP).
if isFlush(p.line) {
p.err = ErrEmptyAdvRefs
return nil
}
if len(p.line) < hashSize {
p.error("cannot read hash, pkt-line too short")
return nil
}
if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil {
p.error("invalid hash text: %s", err)
return nil
}
p.line = p.line[hashSize:]
if p.hash.IsZero() {
return decodeSkipNoRefs
}
return decodeFirstRef
}
// Skips SP "capabilities^{}" NUL
func decodeSkipNoRefs(p *advRefsDecoder) decoderStateFn {
if len(p.line) < len(noHeadMark) {
p.error("too short zero-id ref")
return nil
}
if !bytes.HasPrefix(p.line, noHeadMark) {
p.error("malformed zero-id ref")
return nil
}
p.line = p.line[len(noHeadMark):]
return decodeCaps
}
// decode the refname, expects SP refname NULL
func decodeFirstRef(l *advRefsDecoder) decoderStateFn {
if len(l.line) < 3 {
l.error("line too short after hash")
return nil
}
if !bytes.HasPrefix(l.line, sp) {
l.error("no space after hash")
return nil
}
l.line = l.line[1:]
chunks := bytes.SplitN(l.line, null, 2)
if len(chunks) < 2 {
l.error("NULL not found")
return nil
}
ref := chunks[0]
l.line = chunks[1]
if bytes.Equal(ref, []byte(head)) {
l.data.Head = &l.hash
} else {
l.data.References[string(ref)] = l.hash
}
return decodeCaps
}
func decodeCaps(p *advRefsDecoder) decoderStateFn {
if err := p.data.Capabilities.Decode(p.line); err != nil {
p.error("invalid capabilities: %s", err)
return nil
}
return decodeOtherRefs
}
// The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}).
// If there are no refs, then there might be a shallow or flush-ptk.
func decodeOtherRefs(p *advRefsDecoder) decoderStateFn {
if ok := p.nextLine(); !ok {
return nil
}
if bytes.HasPrefix(p.line, shallow) {
return decodeShallow
}
if len(p.line) == 0 {
return nil
}
saveTo := p.data.References
if bytes.HasSuffix(p.line, peeled) {
p.line = bytes.TrimSuffix(p.line, peeled)
saveTo = p.data.Peeled
}
ref, hash, err := readRef(p.line)
if err != nil {
p.error("%s", err)
return nil
}
saveTo[ref] = hash
return decodeOtherRefs
}
// Reads a ref-name
func readRef(data []byte) (string, plumbing.Hash, error) {
chunks := bytes.Split(data, sp)
switch {
case len(chunks) == 1:
return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found")
case len(chunks) > 2:
return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found")
default:
return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil
}
}
// Keeps reading shallows until a flush-pkt is found
func decodeShallow(p *advRefsDecoder) decoderStateFn {
if !bytes.HasPrefix(p.line, shallow) {
p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)])
return nil
}
p.line = bytes.TrimPrefix(p.line, shallow)
if len(p.line) != hashSize {
p.error(fmt.Sprintf(
"malformed shallow hash: wrong length, expected 40 bytes, read %d bytes",
len(p.line)))
return nil
}
text := p.line[:hashSize]
var h plumbing.Hash
if _, err := hex.Decode(h[:], text); err != nil {
p.error("invalid hash text: %s", err)
return nil
}
p.data.Shallows = append(p.data.Shallows, h)
if ok := p.nextLine(); !ok {
return nil
}
if len(p.line) == 0 {
return nil // succesfull parse of the advertised-refs message
}
return decodeShallow
}

View File

@ -0,0 +1,176 @@
package packp
import (
"bytes"
"fmt"
"io"
"sort"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
)
// Encode writes the AdvRefs encoding to a writer.
//
// All the payloads will end with a newline character. Capabilities,
// references and shallows are written in alphabetical order, except for
// peeled references that always follow their corresponding references.
func (a *AdvRefs) Encode(w io.Writer) error {
e := newAdvRefsEncoder(w)
return e.Encode(a)
}
type advRefsEncoder struct {
data *AdvRefs // data to encode
pe *pktline.Encoder // where to write the encoded data
firstRefName string // reference name to encode in the first pkt-line (HEAD if present)
firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present)
sortedRefs []string // hash references to encode ordered by increasing order
err error // sticky error
}
func newAdvRefsEncoder(w io.Writer) *advRefsEncoder {
return &advRefsEncoder{
pe: pktline.NewEncoder(w),
}
}
func (e *advRefsEncoder) Encode(v *AdvRefs) error {
e.data = v
e.sortRefs()
e.setFirstRef()
for state := encodePrefix; state != nil; {
state = state(e)
}
return e.err
}
func (e *advRefsEncoder) sortRefs() {
if len(e.data.References) > 0 {
refs := make([]string, 0, len(e.data.References))
for refName := range e.data.References {
refs = append(refs, refName)
}
sort.Strings(refs)
e.sortedRefs = refs
}
}
func (e *advRefsEncoder) setFirstRef() {
if e.data.Head != nil {
e.firstRefName = head
e.firstRefHash = *e.data.Head
return
}
if len(e.sortedRefs) > 0 {
refName := e.sortedRefs[0]
e.firstRefName = refName
e.firstRefHash = e.data.References[refName]
}
}
type encoderStateFn func(*advRefsEncoder) encoderStateFn
func encodePrefix(e *advRefsEncoder) encoderStateFn {
for _, p := range e.data.Prefix {
if bytes.Equal(p, pktline.Flush) {
if e.err = e.pe.Flush(); e.err != nil {
return nil
}
continue
}
if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil {
return nil
}
}
return encodeFirstLine
}
// Adds the first pkt-line payload: head hash, head ref and capabilities.
// If HEAD ref is not found, the first reference ordered in increasing order will be used.
// If there aren't HEAD neither refs, the first line will be "PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list)".
// See: https://github.com/git/git/blob/master/Documentation/technical/pack-protocol.txt
// See: https://github.com/git/git/blob/master/Documentation/technical/protocol-common.txt
func encodeFirstLine(e *advRefsEncoder) encoderStateFn {
const formatFirstLine = "%s %s\x00%s\n"
var firstLine string
capabilities := formatCaps(e.data.Capabilities)
if e.firstRefName == "" {
firstLine = fmt.Sprintf(formatFirstLine, plumbing.ZeroHash.String(), "capabilities^{}", capabilities)
} else {
firstLine = fmt.Sprintf(formatFirstLine, e.firstRefHash.String(), e.firstRefName, capabilities)
}
if e.err = e.pe.EncodeString(firstLine); e.err != nil {
return nil
}
return encodeRefs
}
func formatCaps(c *capability.List) string {
if c == nil {
return ""
}
return c.String()
}
// Adds the (sorted) refs: hash SP refname EOL
// and their peeled refs if any.
func encodeRefs(e *advRefsEncoder) encoderStateFn {
for _, r := range e.sortedRefs {
if r == e.firstRefName {
continue
}
hash := e.data.References[r]
if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil {
return nil
}
if hash, ok := e.data.Peeled[r]; ok {
if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil {
return nil
}
}
}
return encodeShallow
}
// Adds the (sorted) shallows: "shallow" SP hash EOL
func encodeShallow(e *advRefsEncoder) encoderStateFn {
sorted := sortShallows(e.data.Shallows)
for _, hash := range sorted {
if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil {
return nil
}
}
return encodeFlush
}
func sortShallows(c []plumbing.Hash) []string {
ret := []string{}
for _, h := range c {
ret = append(ret, h.String())
}
sort.Strings(ret)
return ret
}
func encodeFlush(e *advRefsEncoder) encoderStateFn {
e.err = e.pe.Flush()
return nil
}

View File

@ -0,0 +1,252 @@
// Package capability defines the server and client capabilities.
package capability
// Capability describes a server or client capability.
type Capability string
func (n Capability) String() string {
return string(n)
}
const (
// MultiACK capability allows the server to return "ACK obj-id continue" as
// soon as it finds a commit that it can use as a common base, between the
// client's wants and the client's have set.
//
// By sending this early, the server can potentially head off the client
// from walking any further down that particular branch of the client's
// repository history. The client may still need to walk down other
// branches, sending have lines for those, until the server has a
// complete cut across the DAG, or the client has said "done".
//
// Without multi_ack, a client sends have lines in --date-order until
// the server has found a common base. That means the client will send
// have lines that are already known by the server to be common, because
// they overlap in time with another branch that the server hasn't found
// a common base on yet.
//
// For example suppose the client has commits in caps that the server
// doesn't and the server has commits in lower case that the client
// doesn't, as in the following diagram:
//
// +---- u ---------------------- x
// / +----- y
// / /
// a -- b -- c -- d -- E -- F
// \
// +--- Q -- R -- S
//
// If the client wants x,y and starts out by saying have F,S, the server
// doesn't know what F,S is. Eventually the client says "have d" and
// the server sends "ACK d continue" to let the client know to stop
// walking down that line (so don't send c-b-a), but it's not done yet,
// it needs a base for x. The client keeps going with S-R-Q, until a
// gets reached, at which point the server has a clear base and it all
// ends.
//
// Without multi_ack the client would have sent that c-b-a chain anyway,
// interleaved with S-R-Q.
MultiACK Capability = "multi_ack"
// MultiACKDetailed is an extension of multi_ack that permits client to
// better understand the server's in-memory state.
MultiACKDetailed Capability = "multi_ack_detailed"
// NoDone should only be used with the smart HTTP protocol. If
// multi_ack_detailed and no-done are both present, then the sender is
// free to immediately send a pack following its first "ACK obj-id ready"
// message.
//
// Without no-done in the smart HTTP protocol, the server session would
// end and the client has to make another trip to send "done" before
// the server can send the pack. no-done removes the last round and
// thus slightly reduces latency.
NoDone Capability = "no-done"
// ThinPack is one with deltas which reference base objects not
// contained within the pack (but are known to exist at the receiving
// end). This can reduce the network traffic significantly, but it
// requires the receiving end to know how to "thicken" these packs by
// adding the missing bases to the pack.
//
// The upload-pack server advertises 'thin-pack' when it can generate
// and send a thin pack. A client requests the 'thin-pack' capability
// when it understands how to "thicken" it, notifying the server that
// it can receive such a pack. A client MUST NOT request the
// 'thin-pack' capability if it cannot turn a thin pack into a
// self-contained pack.
//
// Receive-pack, on the other hand, is assumed by default to be able to
// handle thin packs, but can ask the client not to use the feature by
// advertising the 'no-thin' capability. A client MUST NOT send a thin
// pack if the server advertises the 'no-thin' capability.
//
// The reasons for this asymmetry are historical. The receive-pack
// program did not exist until after the invention of thin packs, so
// historically the reference implementation of receive-pack always
// understood thin packs. Adding 'no-thin' later allowed receive-pack
// to disable the feature in a backwards-compatible manner.
ThinPack Capability = "thin-pack"
// Sideband means that server can send, and client understand multiplexed
// progress reports and error info interleaved with the packfile itself.
//
// These two options are mutually exclusive. A modern client always
// favors Sideband64k.
//
// Either mode indicates that the packfile data will be streamed broken
// up into packets of up to either 1000 bytes in the case of 'side_band',
// or 65520 bytes in the case of 'side_band_64k'. Each packet is made up
// of a leading 4-byte pkt-line length of how much data is in the packet,
// followed by a 1-byte stream code, followed by the actual data.
//
// The stream code can be one of:
//
// 1 - pack data
// 2 - progress messages
// 3 - fatal error message just before stream aborts
//
// The "side-band-64k" capability came about as a way for newer clients
// that can handle much larger packets to request packets that are
// actually crammed nearly full, while maintaining backward compatibility
// for the older clients.
//
// Further, with side-band and its up to 1000-byte messages, it's actually
// 999 bytes of payload and 1 byte for the stream code. With side-band-64k,
// same deal, you have up to 65519 bytes of data and 1 byte for the stream
// code.
//
// The client MUST send only maximum of one of "side-band" and "side-
// band-64k". Server MUST diagnose it as an error if client requests
// both.
Sideband Capability = "side-band"
Sideband64k Capability = "side-band-64k"
// OFSDelta server can send, and client understand PACKv2 with delta
// referring to its base by position in pack rather than by an obj-id. That
// is, they can send/read OBJ_OFS_DELTA (aka type 6) in a packfile.
OFSDelta Capability = "ofs-delta"
// Agent the server may optionally send this capability to notify the client
// that the server is running version `X`. The client may optionally return
// its own agent string by responding with an `agent=Y` capability (but it
// MUST NOT do so if the server did not mention the agent capability). The
// `X` and `Y` strings may contain any printable ASCII characters except
// space (i.e., the byte range 32 < x < 127), and are typically of the form
// "package/version" (e.g., "git/1.8.3.1"). The agent strings are purely
// informative for statistics and debugging purposes, and MUST NOT be used
// to programmatically assume the presence or absence of particular features.
Agent Capability = "agent"
// Shallow capability adds "deepen", "shallow" and "unshallow" commands to
// the fetch-pack/upload-pack protocol so clients can request shallow
// clones.
Shallow Capability = "shallow"
// DeepenSince adds "deepen-since" command to fetch-pack/upload-pack
// protocol so the client can request shallow clones that are cut at a
// specific time, instead of depth. Internally it's equivalent of doing
// "rev-list --max-age=<timestamp>" on the server side. "deepen-since"
// cannot be used with "deepen".
DeepenSince Capability = "deepen-since"
// DeepenNot adds "deepen-not" command to fetch-pack/upload-pack
// protocol so the client can request shallow clones that are cut at a
// specific revision, instead of depth. Internally it's equivalent of
// doing "rev-list --not <rev>" on the server side. "deepen-not"
// cannot be used with "deepen", but can be used with "deepen-since".
DeepenNot Capability = "deepen-not"
// DeepenRelative if this capability is requested by the client, the
// semantics of "deepen" command is changed. The "depth" argument is the
// depth from the current shallow boundary, instead of the depth from
// remote refs.
DeepenRelative Capability = "deepen-relative"
// NoProgress the client was started with "git clone -q" or something, and
// doesn't want that side band 2. Basically the client just says "I do not
// wish to receive stream 2 on sideband, so do not send it to me, and if
// you did, I will drop it on the floor anyway". However, the sideband
// channel 3 is still used for error responses.
NoProgress Capability = "no-progress"
// IncludeTag capability is about sending annotated tags if we are
// sending objects they point to. If we pack an object to the client, and
// a tag object points exactly at that object, we pack the tag object too.
// In general this allows a client to get all new annotated tags when it
// fetches a branch, in a single network connection.
//
// Clients MAY always send include-tag, hardcoding it into a request when
// the server advertises this capability. The decision for a client to
// request include-tag only has to do with the client's desires for tag
// data, whether or not a server had advertised objects in the
// refs/tags/* namespace.
//
// Servers MUST pack the tags if their referrant is packed and the client
// has requested include-tags.
//
// Clients MUST be prepared for the case where a server has ignored
// include-tag and has not actually sent tags in the pack. In such
// cases the client SHOULD issue a subsequent fetch to acquire the tags
// that include-tag would have otherwise given the client.
//
// The server SHOULD send include-tag, if it supports it, regardless
// of whether or not there are tags available.
IncludeTag Capability = "include-tag"
// ReportStatus the receive-pack process can receive a 'report-status'
// capability, which tells it that the client wants a report of what
// happened after a packfile upload and reference update. If the pushing
// client requests this capability, after unpacking and updating references
// the server will respond with whether the packfile unpacked successfully
// and if each reference was updated successfully. If any of those were not
// successful, it will send back an error message. See pack-protocol.txt
// for example messages.
ReportStatus Capability = "report-status"
// DeleteRefs If the server sends back this capability, it means that
// it is capable of accepting a zero-id value as the target
// value of a reference update. It is not sent back by the client, it
// simply informs the client that it can be sent zero-id values
// to delete references
DeleteRefs Capability = "delete-refs"
// Quiet If the receive-pack server advertises this capability, it is
// capable of silencing human-readable progress output which otherwise may
// be shown when processing the received pack. A send-pack client should
// respond with the 'quiet' capability to suppress server-side progress
// reporting if the local progress reporting is also being suppressed
// (e.g., via `push -q`, or if stderr does not go to a tty).
Quiet Capability = "quiet"
// Atomic If the server sends this capability it is capable of accepting
// atomic pushes. If the pushing client requests this capability, the server
// will update the refs in one atomic transaction. Either all refs are
// updated or none.
Atomic Capability = "atomic"
// PushOptions If the server sends this capability it is able to accept
// push options after the update commands have been sent, but before the
// packfile is streamed. If the pushing client requests this capability,
// the server will pass the options to the pre- and post- receive hooks
// that process this push request.
PushOptions Capability = "push-options"
// AllowTipSHA1InWant if the upload-pack server advertises this capability,
// fetch-pack may send "want" lines with SHA-1s that exist at the server but
// are not advertised by upload-pack.
AllowTipSHA1InWant Capability = "allow-tip-sha1-in-want"
// AllowReachableSHA1InWant if the upload-pack server advertises this
// capability, fetch-pack may send "want" lines with SHA-1s that exist at
// the server but are not advertised by upload-pack.
AllowReachableSHA1InWant Capability = "allow-reachable-sha1-in-want"
// PushCert the receive-pack server that advertises this capability is
// willing to accept a signed push certificate, and asks the <nonce> to be
// included in the push certificate. A send-pack client MUST NOT
// send a push-cert packet unless the receive-pack server advertises
// this capability.
PushCert Capability = "push-cert"
// SymRef symbolic reference support for better negotiation.
SymRef Capability = "symref"
)
const DefaultAgent = "go-git/4.x"
var known = map[Capability]bool{
MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true,
Sideband: true, Sideband64k: true, OFSDelta: true, Agent: true,
Shallow: true, DeepenSince: true, DeepenNot: true, DeepenRelative: true,
NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true,
Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true,
AllowReachableSHA1InWant: true, PushCert: true, SymRef: true,
}
var requiresArgument = map[Capability]bool{
Agent: true, PushCert: true, SymRef: true,
}
var multipleArgument = map[Capability]bool{
SymRef: true,
}

View File

@ -0,0 +1,196 @@
package capability
import (
"bytes"
"errors"
"fmt"
"strings"
)
var (
// ErrArgumentsRequired is returned if no arguments are giving with a
// capability that requires arguments
ErrArgumentsRequired = errors.New("arguments required")
// ErrArguments is returned if arguments are given with a capabilities that
// not supports arguments
ErrArguments = errors.New("arguments not allowed")
// ErrEmtpyArgument is returned when an empty value is given
ErrEmtpyArgument = errors.New("empty argument")
// ErrMultipleArguments multiple argument given to a capabilities that not
// support it
ErrMultipleArguments = errors.New("multiple arguments not allowed")
)
// List represents a list of capabilities
type List struct {
m map[Capability]*entry
sort []string
}
type entry struct {
Name Capability
Values []string
}
// NewList returns a new List of capabilities
func NewList() *List {
return &List{
m: make(map[Capability]*entry),
}
}
// IsEmpty returns true if the List is empty
func (l *List) IsEmpty() bool {
return len(l.sort) == 0
}
// Decode decodes list of capabilities from raw into the list
func (l *List) Decode(raw []byte) error {
// git 1.x receive pack used to send a leading space on its
// git-receive-pack capabilities announcement. We just trim space to be
// tolerant to space changes in different versions.
raw = bytes.TrimSpace(raw)
if len(raw) == 0 {
return nil
}
for _, data := range bytes.Split(raw, []byte{' '}) {
pair := bytes.SplitN(data, []byte{'='}, 2)
c := Capability(pair[0])
if len(pair) == 1 {
if err := l.Add(c); err != nil {
return err
}
continue
}
if err := l.Add(c, string(pair[1])); err != nil {
return err
}
}
return nil
}
// Get returns the values for a capability
func (l *List) Get(capability Capability) []string {
if _, ok := l.m[capability]; !ok {
return nil
}
return l.m[capability].Values
}
// Set sets a capability removing the previous values
func (l *List) Set(capability Capability, values ...string) error {
if _, ok := l.m[capability]; ok {
delete(l.m, capability)
}
return l.Add(capability, values...)
}
// Add adds a capability, values are optional
func (l *List) Add(c Capability, values ...string) error {
if err := l.validate(c, values); err != nil {
return err
}
if !l.Supports(c) {
l.m[c] = &entry{Name: c}
l.sort = append(l.sort, c.String())
}
if len(values) == 0 {
return nil
}
if known[c] && !multipleArgument[c] && len(l.m[c].Values) > 0 {
return ErrMultipleArguments
}
l.m[c].Values = append(l.m[c].Values, values...)
return nil
}
func (l *List) validateNoEmptyArgs(values []string) error {
for _, v := range values {
if v == "" {
return ErrEmtpyArgument
}
}
return nil
}
func (l *List) validate(c Capability, values []string) error {
if !known[c] {
return l.validateNoEmptyArgs(values)
}
if requiresArgument[c] && len(values) == 0 {
return ErrArgumentsRequired
}
if !requiresArgument[c] && len(values) != 0 {
return ErrArguments
}
if !multipleArgument[c] && len(values) > 1 {
return ErrMultipleArguments
}
return l.validateNoEmptyArgs(values)
}
// Supports returns true if capability is present
func (l *List) Supports(capability Capability) bool {
_, ok := l.m[capability]
return ok
}
// Delete deletes a capability from the List
func (l *List) Delete(capability Capability) {
if !l.Supports(capability) {
return
}
delete(l.m, capability)
for i, c := range l.sort {
if c != string(capability) {
continue
}
l.sort = append(l.sort[:i], l.sort[i+1:]...)
return
}
}
// All returns a slice with all defined capabilities.
func (l *List) All() []Capability {
var cs []Capability
for _, key := range l.sort {
cs = append(cs, Capability(key))
}
return cs
}
// String generates the capabilities strings, the capabilities are sorted in
// insertion order
func (l *List) String() string {
var o []string
for _, key := range l.sort {
cap := l.m[Capability(key)]
if len(cap.Values) == 0 {
o = append(o, key)
continue
}
for _, value := range cap.Values {
o = append(o, fmt.Sprintf("%s=%s", key, value))
}
}
return strings.Join(o, " ")
}

View File

@ -0,0 +1,70 @@
package packp
import (
"fmt"
)
type stateFn func() stateFn
const (
// common
hashSize = 40
// advrefs
head = "HEAD"
noHead = "capabilities^{}"
)
var (
// common
sp = []byte(" ")
eol = []byte("\n")
eq = []byte{'='}
// advertised-refs
null = []byte("\x00")
peeled = []byte("^{}")
noHeadMark = []byte(" capabilities^{}\x00")
// upload-request
want = []byte("want ")
shallow = []byte("shallow ")
deepen = []byte("deepen")
deepenCommits = []byte("deepen ")
deepenSince = []byte("deepen-since ")
deepenReference = []byte("deepen-not ")
// shallow-update
unshallow = []byte("unshallow ")
// server-response
ack = []byte("ACK")
nak = []byte("NAK")
// updreq
shallowNoSp = []byte("shallow")
)
func isFlush(payload []byte) bool {
return len(payload) == 0
}
// ErrUnexpectedData represents an unexpected data decoding a message
type ErrUnexpectedData struct {
Msg string
Data []byte
}
// NewErrUnexpectedData returns a new ErrUnexpectedData containing the data and
// the message given
func NewErrUnexpectedData(msg string, data []byte) error {
return &ErrUnexpectedData{Msg: msg, Data: data}
}
func (err *ErrUnexpectedData) Error() string {
if len(err.Data) == 0 {
return err.Msg
}
return fmt.Sprintf("%s (%s)", err.Msg, err.Data)
}

View File

@ -0,0 +1,724 @@
package packp
/*
A nice way to trace the real data transmitted and received by git, use:
GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git
GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git
Here follows a copy of the current protocol specification at the time of
this writing.
(Please notice that most http git servers will add a flush-pkt after the
first pkt-line when using HTTP smart.)
Documentation Common to Pack and Http Protocols
===============================================
ABNF Notation
-------------
ABNF notation as described by RFC 5234 is used within the protocol documents,
except the following replacement core rules are used:
----
HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f"
----
We also define the following common rules:
----
NUL = %x00
zero-id = 40*"0"
obj-id = 40*(HEXDIGIT)
refname = "HEAD"
refname /= "refs/" <see discussion below>
----
A refname is a hierarchical octet string beginning with "refs/" and
not violating the 'git-check-ref-format' command's validation rules.
More specifically, they:
. They can include slash `/` for hierarchical (directory)
grouping, but no slash-separated component can begin with a
dot `.`.
. They must contain at least one `/`. This enforces the presence of a
category like `heads/`, `tags/` etc. but the actual names are not
restricted.
. They cannot have two consecutive dots `..` anywhere.
. They cannot have ASCII control characters (i.e. bytes whose
values are lower than \040, or \177 `DEL`), space, tilde `~`,
caret `^`, colon `:`, question-mark `?`, asterisk `*`,
or open bracket `[` anywhere.
. They cannot end with a slash `/` or a dot `.`.
. They cannot end with the sequence `.lock`.
. They cannot contain a sequence `@{`.
. They cannot contain a `\\`.
pkt-line Format
---------------
Much (but not all) of the payload is described around pkt-lines.
A pkt-line is a variable length binary string. The first four bytes
of the line, the pkt-len, indicates the total length of the line,
in hexadecimal. The pkt-len includes the 4 bytes used to contain
the length's hexadecimal representation.
A pkt-line MAY contain binary data, so implementors MUST ensure
pkt-line parsing/formatting routines are 8-bit clean.
A non-binary line SHOULD BE terminated by an LF, which if present
MUST be included in the total length. Receivers MUST treat pkt-lines
with non-binary data the same whether or not they contain the trailing
LF (stripping the LF if present, and not complaining when it is
missing).
The maximum length of a pkt-line's data component is 65516 bytes.
Implementations MUST NOT send pkt-line whose length exceeds 65520
(65516 bytes of payload + 4 bytes of length data).
Implementations SHOULD NOT send an empty pkt-line ("0004").
A pkt-line with a length field of 0 ("0000"), called a flush-pkt,
is a special case and MUST be handled differently than an empty
pkt-line ("0004").
----
pkt-line = data-pkt / flush-pkt
data-pkt = pkt-len pkt-payload
pkt-len = 4*(HEXDIG)
pkt-payload = (pkt-len - 4)*(OCTET)
flush-pkt = "0000"
----
Examples (as C-style strings):
----
pkt-line actual value
---------------------------------
"0006a\n" "a\n"
"0005a" "a"
"000bfoobar\n" "foobar\n"
"0004" ""
----
Packfile transfer protocols
===========================
Git supports transferring data in packfiles over the ssh://, git://, http:// and
file:// transports. There exist two sets of protocols, one for pushing
data from a client to a server and another for fetching data from a
server to a client. The three transports (ssh, git, file) use the same
protocol to transfer data. http is documented in http-protocol.txt.
The processes invoked in the canonical Git implementation are 'upload-pack'
on the server side and 'fetch-pack' on the client side for fetching data;
then 'receive-pack' on the server and 'send-pack' on the client for pushing
data. The protocol functions to have a server tell a client what is
currently on the server, then for the two to negotiate the smallest amount
of data to send in order to fully update one or the other.
pkt-line Format
---------------
The descriptions below build on the pkt-line format described in
protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless
otherwise noted the usual pkt-line LF rules apply: the sender SHOULD
include a LF, but the receiver MUST NOT complain if it is not present.
Transports
----------
There are three transports over which the packfile protocol is
initiated. The Git transport is a simple, unauthenticated server that
takes the command (almost always 'upload-pack', though Git
servers can be configured to be globally writable, in which 'receive-
pack' initiation is also allowed) with which the client wishes to
communicate and executes it and connects it to the requesting
process.
In the SSH transport, the client just runs the 'upload-pack'
or 'receive-pack' process on the server over the SSH protocol and then
communicates with that invoked process over the SSH connection.
The file:// transport runs the 'upload-pack' or 'receive-pack'
process locally and communicates with it over a pipe.
Git Transport
-------------
The Git transport starts off by sending the command and repository
on the wire using the pkt-line format, followed by a NUL byte and a
hostname parameter, terminated by a NUL byte.
0032git-upload-pack /project.git\0host=myserver.com\0
--
git-proto-request = request-command SP pathname NUL [ host-parameter NUL ]
request-command = "git-upload-pack" / "git-receive-pack" /
"git-upload-archive" ; case sensitive
pathname = *( %x01-ff ) ; exclude NUL
host-parameter = "host=" hostname [ ":" port ]
--
Only host-parameter is allowed in the git-proto-request. Clients
MUST NOT attempt to send additional parameters. It is used for the
git-daemon name based virtual hosting. See --interpolated-path
option to git daemon, with the %H/%CH format characters.
Basically what the Git client is doing to connect to an 'upload-pack'
process on the server side over the Git protocol is this:
$ echo -e -n \
"0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" |
nc -v example.com 9418
If the server refuses the request for some reasons, it could abort
gracefully with an error message.
----
error-line = PKT-LINE("ERR" SP explanation-text)
----
SSH Transport
-------------
Initiating the upload-pack or receive-pack processes over SSH is
executing the binary on the server via SSH remote execution.
It is basically equivalent to running this:
$ ssh git.example.com "git-upload-pack '/project.git'"
For a server to support Git pushing and pulling for a given user over
SSH, that user needs to be able to execute one or both of those
commands via the SSH shell that they are provided on login. On some
systems, that shell access is limited to only being able to run those
two commands, or even just one of them.
In an ssh:// format URI, it's absolute in the URI, so the '/' after
the host name (or port number) is sent as an argument, which is then
read by the remote git-upload-pack exactly as is, so it's effectively
an absolute path in the remote filesystem.
git clone ssh://user@example.com/project.git
|
v
ssh user@example.com "git-upload-pack '/project.git'"
In a "user@host:path" format URI, its relative to the user's home
directory, because the Git client will run:
git clone user@example.com:project.git
|
v
ssh user@example.com "git-upload-pack 'project.git'"
The exception is if a '~' is used, in which case
we execute it without the leading '/'.
ssh://user@example.com/~alice/project.git,
|
v
ssh user@example.com "git-upload-pack '~alice/project.git'"
A few things to remember here:
- The "command name" is spelled with dash (e.g. git-upload-pack), but
this can be overridden by the client;
- The repository path is always quoted with single quotes.
Fetching Data From a Server
---------------------------
When one Git repository wants to get data that a second repository
has, the first can 'fetch' from the second. This operation determines
what data the server has that the client does not then streams that
data down to the client in packfile format.
Reference Discovery
-------------------
When the client initially connects the server will immediately respond
with a listing of each reference it has (all branches and tags) along
with the object name that each reference currently points to.
$ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" |
nc -v example.com 9418
00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack
side-band side-band-64k ofs-delta shallow no-progress include-tag
00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration
003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master
003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9
003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0
003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{}
0000
The returned response is a pkt-line stream describing each ref and
its current value. The stream MUST be sorted by name according to
the C locale ordering.
If HEAD is a valid ref, HEAD MUST appear as the first advertised
ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the
advertisement list at all, but other refs may still appear.
The stream MUST include capability declarations behind a NUL on the
first ref. The peeled value of a ref (that is "ref^{}") MUST be
immediately after the ref itself, if presented. A conforming server
MUST peel the ref if it's an annotated tag.
----
advertised-refs = (no-refs / list-of-refs)
*shallow
flush-pkt
no-refs = PKT-LINE(zero-id SP "capabilities^{}"
NUL capability-list)
list-of-refs = first-ref *other-ref
first-ref = PKT-LINE(obj-id SP refname
NUL capability-list)
other-ref = PKT-LINE(other-tip / other-peeled)
other-tip = obj-id SP refname
other-peeled = obj-id SP refname "^{}"
shallow = PKT-LINE("shallow" SP obj-id)
capability-list = capability *(SP capability)
capability = 1*(LC_ALPHA / DIGIT / "-" / "_")
LC_ALPHA = %x61-7A
----
Server and client MUST use lowercase for obj-id, both MUST treat obj-id
as case-insensitive.
See protocol-capabilities.txt for a list of allowed server capabilities
and descriptions.
Packfile Negotiation
--------------------
After reference and capabilities discovery, the client can decide to
terminate the connection by sending a flush-pkt, telling the server it can
now gracefully terminate, and disconnect, when it does not need any pack
data. This can happen with the ls-remote command, and also can happen when
the client already is up-to-date.
Otherwise, it enters the negotiation phase, where the client and
server determine what the minimal packfile necessary for transport is,
by telling the server what objects it wants, its shallow objects
(if any), and the maximum commit depth it wants (if any). The client
will also send a list of the capabilities it wants to be in effect,
out of what the server said it could do with the first 'want' line.
----
upload-request = want-list
*shallow-line
*1depth-request
flush-pkt
want-list = first-want
*additional-want
shallow-line = PKT-LINE("shallow" SP obj-id)
depth-request = PKT-LINE("deepen" SP depth) /
PKT-LINE("deepen-since" SP timestamp) /
PKT-LINE("deepen-not" SP ref)
first-want = PKT-LINE("want" SP obj-id SP capability-list)
additional-want = PKT-LINE("want" SP obj-id)
depth = 1*DIGIT
----
Clients MUST send all the obj-ids it wants from the reference
discovery phase as 'want' lines. Clients MUST send at least one
'want' command in the request body. Clients MUST NOT mention an
obj-id in a 'want' command which did not appear in the response
obtained through ref discovery.
The client MUST write all obj-ids which it only has shallow copies
of (meaning that it does not have the parents of a commit) as
'shallow' lines so that the server is aware of the limitations of
the client's history.
The client now sends the maximum commit history depth it wants for
this transaction, which is the number of commits it wants from the
tip of the history, if any, as a 'deepen' line. A depth of 0 is the
same as not making a depth request. The client does not want to receive
any commits beyond this depth, nor does it want objects needed only to
complete those commits. Commits whose parents are not received as a
result are defined as shallow and marked as such in the server. This
information is sent back to the client in the next step.
Once all the 'want's and 'shallow's (and optional 'deepen') are
transferred, clients MUST send a flush-pkt, to tell the server side
that it is done sending the list.
Otherwise, if the client sent a positive depth request, the server
will determine which commits will and will not be shallow and
send this information to the client. If the client did not request
a positive depth, this step is skipped.
----
shallow-update = *shallow-line
*unshallow-line
flush-pkt
shallow-line = PKT-LINE("shallow" SP obj-id)
unshallow-line = PKT-LINE("unshallow" SP obj-id)
----
If the client has requested a positive depth, the server will compute
the set of commits which are no deeper than the desired depth. The set
of commits start at the client's wants.
The server writes 'shallow' lines for each
commit whose parents will not be sent as a result. The server writes
an 'unshallow' line for each commit which the client has indicated is
shallow, but is no longer shallow at the currently requested depth
(that is, its parents will now be sent). The server MUST NOT mark
as unshallow anything which the client has not indicated was shallow.
Now the client will send a list of the obj-ids it has using 'have'
lines, so the server can make a packfile that only contains the objects
that the client needs. In multi_ack mode, the canonical implementation
will send up to 32 of these at a time, then will send a flush-pkt. The
canonical implementation will skip ahead and send the next 32 immediately,
so that there is always a block of 32 "in-flight on the wire" at a time.
----
upload-haves = have-list
compute-end
have-list = *have-line
have-line = PKT-LINE("have" SP obj-id)
compute-end = flush-pkt / PKT-LINE("done")
----
If the server reads 'have' lines, it then will respond by ACKing any
of the obj-ids the client said it had that the server also has. The
server will ACK obj-ids differently depending on which ack mode is
chosen by the client.
In multi_ack mode:
* the server will respond with 'ACK obj-id continue' for any common
commits.
* once the server has found an acceptable common base commit and is
ready to make a packfile, it will blindly ACK all 'have' obj-ids
back to the client.
* the server will then send a 'NAK' and then wait for another response
from the client - either a 'done' or another list of 'have' lines.
In multi_ack_detailed mode:
* the server will differentiate the ACKs where it is signaling
that it is ready to send data with 'ACK obj-id ready' lines, and
signals the identified common commits with 'ACK obj-id common' lines.
Without either multi_ack or multi_ack_detailed:
* upload-pack sends "ACK obj-id" on the first common object it finds.
After that it says nothing until the client gives it a "done".
* upload-pack sends "NAK" on a flush-pkt if no common object
has been found yet. If one has been found, and thus an ACK
was already sent, it's silent on the flush-pkt.
After the client has gotten enough ACK responses that it can determine
that the server has enough information to send an efficient packfile
(in the canonical implementation, this is determined when it has received
enough ACKs that it can color everything left in the --date-order queue
as common with the server, or the --date-order queue is empty), or the
client determines that it wants to give up (in the canonical implementation,
this is determined when the client sends 256 'have' lines without getting
any of them ACKed by the server - meaning there is nothing in common and
the server should just send all of its objects), then the client will send
a 'done' command. The 'done' command signals to the server that the client
is ready to receive its packfile data.
However, the 256 limit *only* turns on in the canonical client
implementation if we have received at least one "ACK %s continue"
during a prior round. This helps to ensure that at least one common
ancestor is found before we give up entirely.
Once the 'done' line is read from the client, the server will either
send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object
name of the last commit determined to be common. The server only sends
ACK after 'done' if there is at least one common base and multi_ack or
multi_ack_detailed is enabled. The server always sends NAK after 'done'
if there is no common base found.
Then the server will start sending its packfile data.
----
server-response = *ack_multi ack / nak
ack_multi = PKT-LINE("ACK" SP obj-id ack_status)
ack_status = "continue" / "common" / "ready"
ack = PKT-LINE("ACK" SP obj-id)
nak = PKT-LINE("NAK")
----
A simple clone may look like this (with no 'have' lines):
----
C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \
side-band-64k ofs-delta\n
C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n
C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n
C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n
C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n
C: 0000
C: 0009done\n
S: 0008NAK\n
S: [PACKFILE]
----
An incremental update (fetch) response might look like this:
----
C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \
side-band-64k ofs-delta\n
C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n
C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n
C: 0000
C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n
C: [30 more have lines]
C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n
C: 0000
S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n
S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n
S: 0008NAK\n
C: 0009done\n
S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n
S: [PACKFILE]
----
Packfile Data
-------------
Now that the client and server have finished negotiation about what
the minimal amount of data that needs to be sent to the client is, the server
will construct and send the required data in packfile format.
See pack-format.txt for what the packfile itself actually looks like.
If 'side-band' or 'side-band-64k' capabilities have been specified by
the client, the server will send the packfile data multiplexed.
Each packet starting with the packet-line length of the amount of data
that follows, followed by a single byte specifying the sideband the
following data is coming in on.
In 'side-band' mode, it will send up to 999 data bytes plus 1 control
code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k'
mode it will send up to 65519 data bytes plus 1 control code, for a
total of up to 65520 bytes in a pkt-line.
The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain
packfile data, sideband '2' will be used for progress information that the
client will generally print to stderr and sideband '3' is used for error
information.
If no 'side-band' capability was specified, the server will stream the
entire packfile without multiplexing.
Pushing Data To a Server
------------------------
Pushing data to a server will invoke the 'receive-pack' process on the
server, which will allow the client to tell it which references it should
update and then send all the data the server will need for those new
references to be complete. Once all the data is received and validated,
the server will then update its references to what the client specified.
Authentication
--------------
The protocol itself contains no authentication mechanisms. That is to be
handled by the transport, such as SSH, before the 'receive-pack' process is
invoked. If 'receive-pack' is configured over the Git transport, those
repositories will be writable by anyone who can access that port (9418) as
that transport is unauthenticated.
Reference Discovery
-------------------
The reference discovery phase is done nearly the same way as it is in the
fetching protocol. Each reference obj-id and name on the server is sent
in packet-line format to the client, followed by a flush-pkt. The only
real difference is that the capability listing is different - the only
possible values are 'report-status', 'delete-refs', 'ofs-delta' and
'push-options'.
Reference Update Request and Packfile Transfer
----------------------------------------------
Once the client knows what references the server is at, it can send a
list of reference update requests. For each reference on the server
that it wants to update, it sends a line listing the obj-id currently on
the server, the obj-id the client would like to update it to and the name
of the reference.
This list is followed by a flush-pkt. Then the push options are transmitted
one per packet followed by another flush-pkt. After that the packfile that
should contain all the objects that the server will need to complete the new
references will be sent.
----
update-request = *shallow ( command-list | push-cert ) [packfile]
shallow = PKT-LINE("shallow" SP obj-id)
command-list = PKT-LINE(command NUL capability-list)
*PKT-LINE(command)
flush-pkt
command = create / delete / update
create = zero-id SP new-id SP name
delete = old-id SP zero-id SP name
update = old-id SP new-id SP name
old-id = obj-id
new-id = obj-id
push-cert = PKT-LINE("push-cert" NUL capability-list LF)
PKT-LINE("certificate version 0.1" LF)
PKT-LINE("pusher" SP ident LF)
PKT-LINE("pushee" SP url LF)
PKT-LINE("nonce" SP nonce LF)
PKT-LINE(LF)
*PKT-LINE(command LF)
*PKT-LINE(gpg-signature-lines LF)
PKT-LINE("push-cert-end" LF)
packfile = "PACK" 28*(OCTET)
----
If the receiving end does not support delete-refs, the sending end MUST
NOT ask for delete command.
If the receiving end does not support push-cert, the sending end
MUST NOT send a push-cert command. When a push-cert command is
sent, command-list MUST NOT be sent; the commands recorded in the
push certificate is used instead.
The packfile MUST NOT be sent if the only command used is 'delete'.
A packfile MUST be sent if either create or update command is used,
even if the server already has all the necessary objects. In this
case the client MUST send an empty packfile. The only time this
is likely to happen is if the client is creating
a new branch or a tag that points to an existing obj-id.
The server will receive the packfile, unpack it, then validate each
reference that is being updated that it hasn't changed while the request
was being processed (the obj-id is still the same as the old-id), and
it will run any update hooks to make sure that the update is acceptable.
If all of that is fine, the server will then update the references.
Push Certificate
----------------
A push certificate begins with a set of header lines. After the
header and an empty line, the protocol commands follow, one per
line. Note that the trailing LF in push-cert PKT-LINEs is _not_
optional; it must be present.
Currently, the following header fields are defined:
`pusher` ident::
Identify the GPG key in "Human Readable Name <email@address>"
format.
`pushee` url::
The repository URL (anonymized, if the URL contains
authentication material) the user who ran `git push`
intended to push into.
`nonce` nonce::
The 'nonce' string the receiving repository asked the
pushing user to include in the certificate, to prevent
replay attacks.
The GPG signature lines are a detached signature for the contents
recorded in the push certificate before the signature block begins.
The detached signature is used to certify that the commands were
given by the pusher, who must be the signer.
Report Status
-------------
After receiving the pack data from the sender, the receiver sends a
report if 'report-status' capability is in effect.
It is a short listing of what happened in that update. It will first
list the status of the packfile unpacking as either 'unpack ok' or
'unpack [error]'. Then it will list the status for each of the references
that it tried to update. Each line is either 'ok [refname]' if the
update was successful, or 'ng [refname] [error]' if the update was not.
----
report-status = unpack-status
1*(command-status)
flush-pkt
unpack-status = PKT-LINE("unpack" SP unpack-result)
unpack-result = "ok" / error-msg
command-status = command-ok / command-fail
command-ok = PKT-LINE("ok" SP refname)
command-fail = PKT-LINE("ng" SP refname SP error-msg)
error-msg = 1*(OCTECT) ; where not "ok"
----
Updates can be unsuccessful for a number of reasons. The reference can have
changed since the reference discovery phase was originally sent, meaning
someone pushed in the meantime. The reference being pushed could be a
non-fast-forward reference and the update hooks or configuration could be
set to not allow that, etc. Also, some references can be updated while others
can be rejected.
An example client/server communication might look like this:
----
S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n
S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n
S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n
S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n
S: 0000
C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n
C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n
C: 0000
C: [PACKDATA]
S: 000eunpack ok\n
S: 0018ok refs/heads/debug\n
S: 002ang refs/heads/master non-fast-forward\n
----
*/

View File

@ -0,0 +1,165 @@
package packp
import (
"bytes"
"fmt"
"io"
"strings"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
const (
ok = "ok"
)
// ReportStatus is a report status message, as used in the git-receive-pack
// process whenever the 'report-status' capability is negotiated.
type ReportStatus struct {
UnpackStatus string
CommandStatuses []*CommandStatus
}
// NewReportStatus creates a new ReportStatus message.
func NewReportStatus() *ReportStatus {
return &ReportStatus{}
}
// Error returns the first error if any.
func (s *ReportStatus) Error() error {
if s.UnpackStatus != ok {
return fmt.Errorf("unpack error: %s", s.UnpackStatus)
}
for _, s := range s.CommandStatuses {
if err := s.Error(); err != nil {
return err
}
}
return nil
}
// Encode writes the report status to a writer.
func (s *ReportStatus) Encode(w io.Writer) error {
e := pktline.NewEncoder(w)
if err := e.Encodef("unpack %s\n", s.UnpackStatus); err != nil {
return err
}
for _, cs := range s.CommandStatuses {
if err := cs.encode(w); err != nil {
return err
}
}
return e.Flush()
}
// Decode reads from the given reader and decodes a report-status message. It
// does not read more input than what is needed to fill the report status.
func (s *ReportStatus) Decode(r io.Reader) error {
scan := pktline.NewScanner(r)
if err := s.scanFirstLine(scan); err != nil {
return err
}
if err := s.decodeReportStatus(scan.Bytes()); err != nil {
return err
}
flushed := false
for scan.Scan() {
b := scan.Bytes()
if isFlush(b) {
flushed = true
break
}
if err := s.decodeCommandStatus(b); err != nil {
return err
}
}
if !flushed {
return fmt.Errorf("missing flush")
}
return scan.Err()
}
func (s *ReportStatus) scanFirstLine(scan *pktline.Scanner) error {
if scan.Scan() {
return nil
}
if scan.Err() != nil {
return scan.Err()
}
return io.ErrUnexpectedEOF
}
func (s *ReportStatus) decodeReportStatus(b []byte) error {
if isFlush(b) {
return fmt.Errorf("premature flush")
}
b = bytes.TrimSuffix(b, eol)
line := string(b)
fields := strings.SplitN(line, " ", 2)
if len(fields) != 2 || fields[0] != "unpack" {
return fmt.Errorf("malformed unpack status: %s", line)
}
s.UnpackStatus = fields[1]
return nil
}
func (s *ReportStatus) decodeCommandStatus(b []byte) error {
b = bytes.TrimSuffix(b, eol)
line := string(b)
fields := strings.SplitN(line, " ", 3)
status := ok
if len(fields) == 3 && fields[0] == "ng" {
status = fields[2]
} else if len(fields) != 2 || fields[0] != "ok" {
return fmt.Errorf("malformed command status: %s", line)
}
cs := &CommandStatus{
ReferenceName: plumbing.ReferenceName(fields[1]),
Status: status,
}
s.CommandStatuses = append(s.CommandStatuses, cs)
return nil
}
// CommandStatus is the status of a reference in a report status.
// See ReportStatus struct.
type CommandStatus struct {
ReferenceName plumbing.ReferenceName
Status string
}
// Error returns the error, if any.
func (s *CommandStatus) Error() error {
if s.Status == ok {
return nil
}
return fmt.Errorf("command error on %s: %s",
s.ReferenceName.String(), s.Status)
}
func (s *CommandStatus) encode(w io.Writer) error {
e := pktline.NewEncoder(w)
if s.Error() == nil {
return e.Encodef("ok %s\n", s.ReferenceName.String())
}
return e.Encodef("ng %s %s\n", s.ReferenceName.String(), s.Status)
}

View File

@ -0,0 +1,92 @@
package packp
import (
"bytes"
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
const (
shallowLineLen = 48
unshallowLineLen = 50
)
type ShallowUpdate struct {
Shallows []plumbing.Hash
Unshallows []plumbing.Hash
}
func (r *ShallowUpdate) Decode(reader io.Reader) error {
s := pktline.NewScanner(reader)
for s.Scan() {
line := s.Bytes()
line = bytes.TrimSpace(line)
var err error
switch {
case bytes.HasPrefix(line, shallow):
err = r.decodeShallowLine(line)
case bytes.HasPrefix(line, unshallow):
err = r.decodeUnshallowLine(line)
case bytes.Equal(line, pktline.Flush):
return nil
}
if err != nil {
return err
}
}
return s.Err()
}
func (r *ShallowUpdate) decodeShallowLine(line []byte) error {
hash, err := r.decodeLine(line, shallow, shallowLineLen)
if err != nil {
return err
}
r.Shallows = append(r.Shallows, hash)
return nil
}
func (r *ShallowUpdate) decodeUnshallowLine(line []byte) error {
hash, err := r.decodeLine(line, unshallow, unshallowLineLen)
if err != nil {
return err
}
r.Unshallows = append(r.Unshallows, hash)
return nil
}
func (r *ShallowUpdate) decodeLine(line, prefix []byte, expLen int) (plumbing.Hash, error) {
if len(line) != expLen {
return plumbing.ZeroHash, fmt.Errorf("malformed %s%q", prefix, line)
}
raw := string(line[expLen-40 : expLen])
return plumbing.NewHash(raw), nil
}
func (r *ShallowUpdate) Encode(w io.Writer) error {
e := pktline.NewEncoder(w)
for _, h := range r.Shallows {
if err := e.Encodef("%s%s\n", shallow, h.String()); err != nil {
return err
}
}
for _, h := range r.Unshallows {
if err := e.Encodef("%s%s\n", unshallow, h.String()); err != nil {
return err
}
}
return e.Flush()
}

View File

@ -0,0 +1,33 @@
package sideband
// Type sideband type "side-band" or "side-band-64k"
type Type int8
const (
// Sideband legacy sideband type up to 1000-byte messages
Sideband Type = iota
// Sideband64k sideband type up to 65519-byte messages
Sideband64k Type = iota
// MaxPackedSize for Sideband type
MaxPackedSize = 1000
// MaxPackedSize64k for Sideband64k type
MaxPackedSize64k = 65520
)
// Channel sideband channel
type Channel byte
// WithPayload encode the payload as a message
func (ch Channel) WithPayload(payload []byte) []byte {
return append([]byte{byte(ch)}, payload...)
}
const (
// PackData packfile content
PackData Channel = 1
// ProgressMessage progress messages
ProgressMessage Channel = 2
// ErrorMessage fatal error message just before stream aborts
ErrorMessage Channel = 3
)

View File

@ -0,0 +1,148 @@
package sideband
import (
"errors"
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
// ErrMaxPackedExceeded returned by Read, if the maximum packed size is exceeded
var ErrMaxPackedExceeded = errors.New("max. packed size exceeded")
// Progress where the progress information is stored
type Progress interface {
io.Writer
}
// Demuxer demultiplexes the progress reports and error info interleaved with the
// packfile itself.
//
// A sideband has three different channels the main one, called PackData, contains
// the packfile data; the ErrorMessage channel, that contains server errors; and
// the last one, ProgressMessage channel, containing information about the ongoing
// task happening in the server (optional, can be suppressed sending NoProgress
// or Quiet capabilities to the server)
//
// In order to demultiplex the data stream, method `Read` should be called to
// retrieve the PackData channel, the incoming data from the ProgressMessage is
// written at `Progress` (if any), if any message is retrieved from the
// ErrorMessage channel an error is returned and we can assume that the
// connection has been closed.
type Demuxer struct {
t Type
r io.Reader
s *pktline.Scanner
max int
pending []byte
// Progress is where the progress messages are stored
Progress Progress
}
// NewDemuxer returns a new Demuxer for the given t and read from r
func NewDemuxer(t Type, r io.Reader) *Demuxer {
max := MaxPackedSize64k
if t == Sideband {
max = MaxPackedSize
}
return &Demuxer{
t: t,
r: r,
max: max,
s: pktline.NewScanner(r),
}
}
// Read reads up to len(p) bytes from the PackData channel into p, an error can
// be return if an error happens when reading or if a message is sent in the
// ErrorMessage channel.
//
// When a ProgressMessage is read, is not copy to b, instead of this is written
// to the Progress
func (d *Demuxer) Read(b []byte) (n int, err error) {
var read, req int
req = len(b)
for read < req {
n, err := d.doRead(b[read:req])
read += n
if err != nil {
return read, err
}
}
return read, nil
}
func (d *Demuxer) doRead(b []byte) (int, error) {
read, err := d.nextPackData()
size := len(read)
wanted := len(b)
if size > wanted {
d.pending = read[wanted:]
}
if wanted > size {
wanted = size
}
size = copy(b, read[:wanted])
return size, err
}
func (d *Demuxer) nextPackData() ([]byte, error) {
content := d.getPending()
if len(content) != 0 {
return content, nil
}
if !d.s.Scan() {
if err := d.s.Err(); err != nil {
return nil, err
}
return nil, io.EOF
}
content = d.s.Bytes()
size := len(content)
if size == 0 {
return nil, nil
} else if size > d.max {
return nil, ErrMaxPackedExceeded
}
switch Channel(content[0]) {
case PackData:
return content[1:], nil
case ProgressMessage:
if d.Progress != nil {
_, err := d.Progress.Write(content[1:])
return nil, err
}
case ErrorMessage:
return nil, fmt.Errorf("unexpected error: %s", content[1:])
default:
return nil, fmt.Errorf("unknown channel %s", content)
}
return nil, nil
}
func (d *Demuxer) getPending() (b []byte) {
if len(d.pending) == 0 {
return nil
}
content := d.pending
d.pending = nil
return content
}

View File

@ -0,0 +1,31 @@
// Package sideband implements a sideband mutiplex/demultiplexer
package sideband
// If 'side-band' or 'side-band-64k' capabilities have been specified by
// the client, the server will send the packfile data multiplexed.
//
// Either mode indicates that the packfile data will be streamed broken
// up into packets of up to either 1000 bytes in the case of 'side_band',
// or 65520 bytes in the case of 'side_band_64k'. Each packet is made up
// of a leading 4-byte pkt-line length of how much data is in the packet,
// followed by a 1-byte stream code, followed by the actual data.
//
// The stream code can be one of:
//
// 1 - pack data
// 2 - progress messages
// 3 - fatal error message just before stream aborts
//
// The "side-band-64k" capability came about as a way for newer clients
// that can handle much larger packets to request packets that are
// actually crammed nearly full, while maintaining backward compatibility
// for the older clients.
//
// Further, with side-band and its up to 1000-byte messages, it's actually
// 999 bytes of payload and 1 byte for the stream code. With side-band-64k,
// same deal, you have up to 65519 bytes of data and 1 byte for the stream
// code.
//
// The client MUST send only maximum of one of "side-band" and "side-
// band-64k". Server MUST diagnose it as an error if client requests
// both.

View File

@ -0,0 +1,65 @@
package sideband
import (
"io"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
// Muxer multiplex the packfile along with the progress messages and the error
// information. The multiplex is perform using pktline format.
type Muxer struct {
max int
e *pktline.Encoder
}
const chLen = 1
// NewMuxer returns a new Muxer for the given t that writes on w.
//
// If t is equal to `Sideband` the max pack size is set to MaxPackedSize, in any
// other value is given, max pack is set to MaxPackedSize64k, that is the
// maximum length of a line in pktline format.
func NewMuxer(t Type, w io.Writer) *Muxer {
max := MaxPackedSize64k
if t == Sideband {
max = MaxPackedSize
}
return &Muxer{
max: max - chLen,
e: pktline.NewEncoder(w),
}
}
// Write writes p in the PackData channel
func (m *Muxer) Write(p []byte) (int, error) {
return m.WriteChannel(PackData, p)
}
// WriteChannel writes p in the given channel. This method can be used with any
// channel, but is recommend use it only for the ProgressMessage and
// ErrorMessage channels and use Write for the PackData channel
func (m *Muxer) WriteChannel(t Channel, p []byte) (int, error) {
wrote := 0
size := len(p)
for wrote < size {
n, err := m.doWrite(t, p[wrote:])
wrote += n
if err != nil {
return wrote, err
}
}
return wrote, nil
}
func (m *Muxer) doWrite(ch Channel, p []byte) (int, error) {
sz := len(p)
if sz > m.max {
sz = m.max
}
return sz, m.e.Encode(ch.WithPayload(p[:sz]))
}

View File

@ -0,0 +1,127 @@
package packp
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
const ackLineLen = 44
// ServerResponse object acknowledgement from upload-pack service
type ServerResponse struct {
ACKs []plumbing.Hash
}
// Decode decodes the response into the struct, isMultiACK should be true, if
// the request was done with multi_ack or multi_ack_detailed capabilities.
func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
// TODO: implement support for multi_ack or multi_ack_detailed responses
if isMultiACK {
return errors.New("multi_ack and multi_ack_detailed are not supported")
}
s := pktline.NewScanner(reader)
for s.Scan() {
line := s.Bytes()
if err := r.decodeLine(line); err != nil {
return err
}
// we need to detect when the end of a response header and the beginning
// of a packfile header happened, some requests to the git daemon
// produces a duplicate ACK header even when multi_ack is not supported.
stop, err := r.stopReading(reader)
if err != nil {
return err
}
if stop {
break
}
}
return s.Err()
}
// stopReading detects when a valid command such as ACK or NAK is found to be
// read in the buffer without moving the read pointer.
func (r *ServerResponse) stopReading(reader *bufio.Reader) (bool, error) {
ahead, err := reader.Peek(7)
if err == io.EOF {
return true, nil
}
if err != nil {
return false, err
}
if len(ahead) > 4 && r.isValidCommand(ahead[0:3]) {
return false, nil
}
if len(ahead) == 7 && r.isValidCommand(ahead[4:]) {
return false, nil
}
return true, nil
}
func (r *ServerResponse) isValidCommand(b []byte) bool {
commands := [][]byte{ack, nak}
for _, c := range commands {
if bytes.Equal(b, c) {
return true
}
}
return false
}
func (r *ServerResponse) decodeLine(line []byte) error {
if len(line) == 0 {
return fmt.Errorf("unexpected flush")
}
if bytes.Equal(line[0:3], ack) {
return r.decodeACKLine(line)
}
if bytes.Equal(line[0:3], nak) {
return nil
}
return fmt.Errorf("unexpected content %q", string(line))
}
func (r *ServerResponse) decodeACKLine(line []byte) error {
if len(line) < ackLineLen {
return fmt.Errorf("malformed ACK %q", line)
}
sp := bytes.Index(line, []byte(" "))
h := plumbing.NewHash(string(line[sp+1 : sp+41]))
r.ACKs = append(r.ACKs, h)
return nil
}
// Encode encodes the ServerResponse into a writer.
func (r *ServerResponse) Encode(w io.Writer) error {
if len(r.ACKs) > 1 {
return errors.New("multi_ack and multi_ack_detailed are not supported")
}
e := pktline.NewEncoder(w)
if len(r.ACKs) == 0 {
return e.Encodef("%s\n", nak)
}
return e.Encodef("%s %s\n", ack, r.ACKs[0].String())
}

View File

@ -0,0 +1,168 @@
package packp
import (
"fmt"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
)
// UploadRequest values represent the information transmitted on a
// upload-request message. Values from this type are not zero-value
// safe, use the New function instead.
// This is a low level type, use UploadPackRequest instead.
type UploadRequest struct {
Capabilities *capability.List
Wants []plumbing.Hash
Shallows []plumbing.Hash
Depth Depth
}
// Depth values stores the desired depth of the requested packfile: see
// DepthCommit, DepthSince and DepthReference.
type Depth interface {
isDepth()
IsZero() bool
}
// DepthCommits values stores the maximum number of requested commits in
// the packfile. Zero means infinite. A negative value will have
// undefined consequences.
type DepthCommits int
func (d DepthCommits) isDepth() {}
func (d DepthCommits) IsZero() bool {
return d == 0
}
// DepthSince values requests only commits newer than the specified time.
type DepthSince time.Time
func (d DepthSince) isDepth() {}
func (d DepthSince) IsZero() bool {
return time.Time(d).IsZero()
}
// DepthReference requests only commits not to found in the specified reference.
type DepthReference string
func (d DepthReference) isDepth() {}
func (d DepthReference) IsZero() bool {
return string(d) == ""
}
// NewUploadRequest returns a pointer to a new UploadRequest value, ready to be
// used. It has no capabilities, wants or shallows and an infinite depth. Please
// note that to encode an upload-request it has to have at least one wanted hash.
func NewUploadRequest() *UploadRequest {
return &UploadRequest{
Capabilities: capability.NewList(),
Wants: []plumbing.Hash{},
Shallows: []plumbing.Hash{},
Depth: DepthCommits(0),
}
}
// NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest
// value, the request capabilities are filled with the most optiomal ones, based
// on the adv value (advertaised capabilities), the UploadRequest generated it
// has no wants or shallows and an infinite depth.
func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest {
r := NewUploadRequest()
if adv.Supports(capability.MultiACKDetailed) {
r.Capabilities.Set(capability.MultiACKDetailed)
} else if adv.Supports(capability.MultiACK) {
r.Capabilities.Set(capability.MultiACK)
}
if adv.Supports(capability.Sideband64k) {
r.Capabilities.Set(capability.Sideband64k)
} else if adv.Supports(capability.Sideband) {
r.Capabilities.Set(capability.Sideband)
}
if adv.Supports(capability.ThinPack) {
r.Capabilities.Set(capability.ThinPack)
}
if adv.Supports(capability.OFSDelta) {
r.Capabilities.Set(capability.OFSDelta)
}
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
}
return r
}
// Validate validates the content of UploadRequest, following the next rules:
// - Wants MUST have at least one reference
// - capability.Shallow MUST be present if Shallows is not empty
// - is a non-zero DepthCommits is given capability.Shallow MUST be present
// - is a DepthSince is given capability.Shallow MUST be present
// - is a DepthReference is given capability.DeepenNot MUST be present
// - MUST contain only maximum of one of capability.Sideband and capability.Sideband64k
// - MUST contain only maximum of one of capability.MultiACK and capability.MultiACKDetailed
func (r *UploadRequest) Validate() error {
if len(r.Wants) == 0 {
return fmt.Errorf("want can't be empty")
}
if err := r.validateRequiredCapabilities(); err != nil {
return err
}
if err := r.validateConflictCapabilities(); err != nil {
return err
}
return nil
}
func (r *UploadRequest) validateRequiredCapabilities() error {
msg := "missing capability %s"
if len(r.Shallows) != 0 && !r.Capabilities.Supports(capability.Shallow) {
return fmt.Errorf(msg, capability.Shallow)
}
switch r.Depth.(type) {
case DepthCommits:
if r.Depth != DepthCommits(0) {
if !r.Capabilities.Supports(capability.Shallow) {
return fmt.Errorf(msg, capability.Shallow)
}
}
case DepthSince:
if !r.Capabilities.Supports(capability.DeepenSince) {
return fmt.Errorf(msg, capability.DeepenSince)
}
case DepthReference:
if !r.Capabilities.Supports(capability.DeepenNot) {
return fmt.Errorf(msg, capability.DeepenNot)
}
}
return nil
}
func (r *UploadRequest) validateConflictCapabilities() error {
msg := "capabilities %s and %s are mutually exclusive"
if r.Capabilities.Supports(capability.Sideband) &&
r.Capabilities.Supports(capability.Sideband64k) {
return fmt.Errorf(msg, capability.Sideband, capability.Sideband64k)
}
if r.Capabilities.Supports(capability.MultiACK) &&
r.Capabilities.Supports(capability.MultiACKDetailed) {
return fmt.Errorf(msg, capability.MultiACK, capability.MultiACKDetailed)
}
return nil
}

View File

@ -0,0 +1,257 @@
package packp
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"strconv"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
// Decode reads the next upload-request form its input and
// stores it in the UploadRequest.
func (u *UploadRequest) Decode(r io.Reader) error {
d := newUlReqDecoder(r)
return d.Decode(u)
}
type ulReqDecoder struct {
s *pktline.Scanner // a pkt-line scanner from the input stream
line []byte // current pkt-line contents, use parser.nextLine() to make it advance
nLine int // current pkt-line number for debugging, begins at 1
err error // sticky error, use the parser.error() method to fill this out
data *UploadRequest // parsed data is stored here
}
func newUlReqDecoder(r io.Reader) *ulReqDecoder {
return &ulReqDecoder{
s: pktline.NewScanner(r),
}
}
func (d *ulReqDecoder) Decode(v *UploadRequest) error {
d.data = v
for state := d.decodeFirstWant; state != nil; {
state = state()
}
return d.err
}
// fills out the parser stiky error
func (d *ulReqDecoder) error(format string, a ...interface{}) {
msg := fmt.Sprintf(
"pkt-line %d: %s", d.nLine,
fmt.Sprintf(format, a...),
)
d.err = NewErrUnexpectedData(msg, d.line)
}
// Reads a new pkt-line from the scanner, makes its payload available as
// p.line and increments p.nLine. A successful invocation returns true,
// otherwise, false is returned and the sticky error is filled out
// accordingly. Trims eols at the end of the payloads.
func (d *ulReqDecoder) nextLine() bool {
d.nLine++
if !d.s.Scan() {
if d.err = d.s.Err(); d.err != nil {
return false
}
d.error("EOF")
return false
}
d.line = d.s.Bytes()
d.line = bytes.TrimSuffix(d.line, eol)
return true
}
// Expected format: want <hash>[ capabilities]
func (d *ulReqDecoder) decodeFirstWant() stateFn {
if ok := d.nextLine(); !ok {
return nil
}
if !bytes.HasPrefix(d.line, want) {
d.error("missing 'want ' prefix")
return nil
}
d.line = bytes.TrimPrefix(d.line, want)
hash, ok := d.readHash()
if !ok {
return nil
}
d.data.Wants = append(d.data.Wants, hash)
return d.decodeCaps
}
func (d *ulReqDecoder) readHash() (plumbing.Hash, bool) {
if len(d.line) < hashSize {
d.err = fmt.Errorf("malformed hash: %v", d.line)
return plumbing.ZeroHash, false
}
var hash plumbing.Hash
if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil {
d.error("invalid hash text: %s", err)
return plumbing.ZeroHash, false
}
d.line = d.line[hashSize:]
return hash, true
}
// Expected format: sp cap1 sp cap2 sp cap3...
func (d *ulReqDecoder) decodeCaps() stateFn {
d.line = bytes.TrimPrefix(d.line, sp)
if err := d.data.Capabilities.Decode(d.line); err != nil {
d.error("invalid capabilities: %s", err)
}
return d.decodeOtherWants
}
// Expected format: want <hash>
func (d *ulReqDecoder) decodeOtherWants() stateFn {
if ok := d.nextLine(); !ok {
return nil
}
if bytes.HasPrefix(d.line, shallow) {
return d.decodeShallow
}
if bytes.HasPrefix(d.line, deepen) {
return d.decodeDeepen
}
if len(d.line) == 0 {
return nil
}
if !bytes.HasPrefix(d.line, want) {
d.error("unexpected payload while expecting a want: %q", d.line)
return nil
}
d.line = bytes.TrimPrefix(d.line, want)
hash, ok := d.readHash()
if !ok {
return nil
}
d.data.Wants = append(d.data.Wants, hash)
return d.decodeOtherWants
}
// Expected format: shallow <hash>
func (d *ulReqDecoder) decodeShallow() stateFn {
if bytes.HasPrefix(d.line, deepen) {
return d.decodeDeepen
}
if len(d.line) == 0 {
return nil
}
if !bytes.HasPrefix(d.line, shallow) {
d.error("unexpected payload while expecting a shallow: %q", d.line)
return nil
}
d.line = bytes.TrimPrefix(d.line, shallow)
hash, ok := d.readHash()
if !ok {
return nil
}
d.data.Shallows = append(d.data.Shallows, hash)
if ok := d.nextLine(); !ok {
return nil
}
return d.decodeShallow
}
// Expected format: deepen <n> / deepen-since <ul> / deepen-not <ref>
func (d *ulReqDecoder) decodeDeepen() stateFn {
if bytes.HasPrefix(d.line, deepenCommits) {
return d.decodeDeepenCommits
}
if bytes.HasPrefix(d.line, deepenSince) {
return d.decodeDeepenSince
}
if bytes.HasPrefix(d.line, deepenReference) {
return d.decodeDeepenReference
}
if len(d.line) == 0 {
return nil
}
d.error("unexpected deepen specification: %q", d.line)
return nil
}
func (d *ulReqDecoder) decodeDeepenCommits() stateFn {
d.line = bytes.TrimPrefix(d.line, deepenCommits)
var n int
if n, d.err = strconv.Atoi(string(d.line)); d.err != nil {
return nil
}
if n < 0 {
d.err = fmt.Errorf("negative depth")
return nil
}
d.data.Depth = DepthCommits(n)
return d.decodeFlush
}
func (d *ulReqDecoder) decodeDeepenSince() stateFn {
d.line = bytes.TrimPrefix(d.line, deepenSince)
var secs int64
secs, d.err = strconv.ParseInt(string(d.line), 10, 64)
if d.err != nil {
return nil
}
t := time.Unix(secs, 0).UTC()
d.data.Depth = DepthSince(t)
return d.decodeFlush
}
func (d *ulReqDecoder) decodeDeepenReference() stateFn {
d.line = bytes.TrimPrefix(d.line, deepenReference)
d.data.Depth = DepthReference(string(d.line))
return d.decodeFlush
}
func (d *ulReqDecoder) decodeFlush() stateFn {
if ok := d.nextLine(); !ok {
return nil
}
if len(d.line) != 0 {
d.err = fmt.Errorf("unexpected payload while expecting a flush-pkt: %q", d.line)
}
return nil
}

View File

@ -0,0 +1,145 @@
package packp
import (
"bytes"
"fmt"
"io"
"time"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
// Encode writes the UlReq encoding of u to the stream.
//
// All the payloads will end with a newline character. Wants and
// shallows are sorted alphabetically. A depth of 0 means no depth
// request is sent.
func (u *UploadRequest) Encode(w io.Writer) error {
e := newUlReqEncoder(w)
return e.Encode(u)
}
type ulReqEncoder struct {
pe *pktline.Encoder // where to write the encoded data
data *UploadRequest // the data to encode
err error // sticky error
}
func newUlReqEncoder(w io.Writer) *ulReqEncoder {
return &ulReqEncoder{
pe: pktline.NewEncoder(w),
}
}
func (e *ulReqEncoder) Encode(v *UploadRequest) error {
e.data = v
if len(v.Wants) == 0 {
return fmt.Errorf("empty wants provided")
}
plumbing.HashesSort(e.data.Wants)
for state := e.encodeFirstWant; state != nil; {
state = state()
}
return e.err
}
func (e *ulReqEncoder) encodeFirstWant() stateFn {
var err error
if e.data.Capabilities.IsEmpty() {
err = e.pe.Encodef("want %s\n", e.data.Wants[0])
} else {
err = e.pe.Encodef(
"want %s %s\n",
e.data.Wants[0],
e.data.Capabilities.String(),
)
}
if err != nil {
e.err = fmt.Errorf("encoding first want line: %s", err)
return nil
}
return e.encodeAditionalWants
}
func (e *ulReqEncoder) encodeAditionalWants() stateFn {
last := e.data.Wants[0]
for _, w := range e.data.Wants[1:] {
if bytes.Equal(last[:], w[:]) {
continue
}
if err := e.pe.Encodef("want %s\n", w); err != nil {
e.err = fmt.Errorf("encoding want %q: %s", w, err)
return nil
}
last = w
}
return e.encodeShallows
}
func (e *ulReqEncoder) encodeShallows() stateFn {
plumbing.HashesSort(e.data.Shallows)
var last plumbing.Hash
for _, s := range e.data.Shallows {
if bytes.Equal(last[:], s[:]) {
continue
}
if err := e.pe.Encodef("shallow %s\n", s); err != nil {
e.err = fmt.Errorf("encoding shallow %q: %s", s, err)
return nil
}
last = s
}
return e.encodeDepth
}
func (e *ulReqEncoder) encodeDepth() stateFn {
switch depth := e.data.Depth.(type) {
case DepthCommits:
if depth != 0 {
commits := int(depth)
if err := e.pe.Encodef("deepen %d\n", commits); err != nil {
e.err = fmt.Errorf("encoding depth %d: %s", depth, err)
return nil
}
}
case DepthSince:
when := time.Time(depth).UTC()
if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil {
e.err = fmt.Errorf("encoding depth %s: %s", when, err)
return nil
}
case DepthReference:
reference := string(depth)
if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil {
e.err = fmt.Errorf("encoding depth %s: %s", reference, err)
return nil
}
default:
e.err = fmt.Errorf("unsupported depth type")
return nil
}
return e.encodeFlush
}
func (e *ulReqEncoder) encodeFlush() stateFn {
if err := e.pe.Flush(); err != nil {
e.err = fmt.Errorf("encoding flush-pkt: %s", err)
return nil
}
return nil
}

View File

@ -0,0 +1,122 @@
package packp
import (
"errors"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband"
)
var (
ErrEmptyCommands = errors.New("commands cannot be empty")
ErrMalformedCommand = errors.New("malformed command")
)
// ReferenceUpdateRequest values represent reference upload requests.
// Values from this type are not zero-value safe, use the New function instead.
type ReferenceUpdateRequest struct {
Capabilities *capability.List
Commands []*Command
Shallow *plumbing.Hash
// Packfile contains an optional packfile reader.
Packfile io.ReadCloser
// Progress receives sideband progress messages from the server
Progress sideband.Progress
}
// New returns a pointer to a new ReferenceUpdateRequest value.
func NewReferenceUpdateRequest() *ReferenceUpdateRequest {
return &ReferenceUpdateRequest{
// TODO: Add support for push-cert
Capabilities: capability.NewList(),
Commands: nil,
}
}
// NewReferenceUpdateRequestFromCapabilities returns a pointer to a new
// ReferenceUpdateRequest value, the request capabilities are filled with the
// most optimal ones, based on the adv value (advertised capabilities), the
// ReferenceUpdateRequest contains no commands
//
// It does set the following capabilities:
// - agent
// - report-status
// - ofs-delta
// - ref-delta
// - delete-refs
// It leaves up to the user to add the following capabilities later:
// - atomic
// - ofs-delta
// - side-band
// - side-band-64k
// - quiet
// - push-cert
func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceUpdateRequest {
r := NewReferenceUpdateRequest()
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
}
if adv.Supports(capability.ReportStatus) {
r.Capabilities.Set(capability.ReportStatus)
}
return r
}
func (r *ReferenceUpdateRequest) validate() error {
if len(r.Commands) == 0 {
return ErrEmptyCommands
}
for _, c := range r.Commands {
if err := c.validate(); err != nil {
return err
}
}
return nil
}
type Action string
const (
Create Action = "create"
Update = "update"
Delete = "delete"
Invalid = "invalid"
)
type Command struct {
Name plumbing.ReferenceName
Old plumbing.Hash
New plumbing.Hash
}
func (c *Command) Action() Action {
if c.Old == plumbing.ZeroHash && c.New == plumbing.ZeroHash {
return Invalid
}
if c.Old == plumbing.ZeroHash {
return Create
}
if c.New == plumbing.ZeroHash {
return Delete
}
return Update
}
func (c *Command) validate() error {
if c.Action() == Invalid {
return ErrMalformedCommand
}
return nil
}

View File

@ -0,0 +1,250 @@
package packp
import (
"bytes"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
)
var (
shallowLineLength = len(shallow) + hashSize
minCommandLength = hashSize*2 + 2 + 1
minCommandAndCapsLenth = minCommandLength + 1
)
var (
ErrEmpty = errors.New("empty update-request message")
errNoCommands = errors.New("unexpected EOF before any command")
errMissingCapabilitiesDelimiter = errors.New("capabilities delimiter not found")
)
func errMalformedRequest(reason string) error {
return fmt.Errorf("malformed request: %s", reason)
}
func errInvalidHashSize(got int) error {
return fmt.Errorf("invalid hash size: expected %d, got %d",
hashSize, got)
}
func errInvalidHash(err error) error {
return fmt.Errorf("invalid hash: %s", err.Error())
}
func errInvalidShallowLineLength(got int) error {
return errMalformedRequest(fmt.Sprintf(
"invalid shallow line length: expected %d, got %d",
shallowLineLength, got))
}
func errInvalidCommandCapabilitiesLineLength(got int) error {
return errMalformedRequest(fmt.Sprintf(
"invalid command and capabilities line length: expected at least %d, got %d",
minCommandAndCapsLenth, got))
}
func errInvalidCommandLineLength(got int) error {
return errMalformedRequest(fmt.Sprintf(
"invalid command line length: expected at least %d, got %d",
minCommandLength, got))
}
func errInvalidShallowObjId(err error) error {
return errMalformedRequest(
fmt.Sprintf("invalid shallow object id: %s", err.Error()))
}
func errInvalidOldObjId(err error) error {
return errMalformedRequest(
fmt.Sprintf("invalid old object id: %s", err.Error()))
}
func errInvalidNewObjId(err error) error {
return errMalformedRequest(
fmt.Sprintf("invalid new object id: %s", err.Error()))
}
func errMalformedCommand(err error) error {
return errMalformedRequest(fmt.Sprintf(
"malformed command: %s", err.Error()))
}
// Decode reads the next update-request message form the reader and wr
func (req *ReferenceUpdateRequest) Decode(r io.Reader) error {
var rc io.ReadCloser
var ok bool
rc, ok = r.(io.ReadCloser)
if !ok {
rc = ioutil.NopCloser(r)
}
d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)}
return d.Decode(req)
}
type updReqDecoder struct {
r io.ReadCloser
s *pktline.Scanner
req *ReferenceUpdateRequest
}
func (d *updReqDecoder) Decode(req *ReferenceUpdateRequest) error {
d.req = req
funcs := []func() error{
d.scanLine,
d.decodeShallow,
d.decodeCommandAndCapabilities,
d.decodeCommands,
d.setPackfile,
req.validate,
}
for _, f := range funcs {
if err := f(); err != nil {
return err
}
}
return nil
}
func (d *updReqDecoder) scanLine() error {
if ok := d.s.Scan(); !ok {
return d.scanErrorOr(ErrEmpty)
}
return nil
}
func (d *updReqDecoder) decodeShallow() error {
b := d.s.Bytes()
if !bytes.HasPrefix(b, shallowNoSp) {
return nil
}
if len(b) != shallowLineLength {
return errInvalidShallowLineLength(len(b))
}
h, err := parseHash(string(b[len(shallow):]))
if err != nil {
return errInvalidShallowObjId(err)
}
if ok := d.s.Scan(); !ok {
return d.scanErrorOr(errNoCommands)
}
d.req.Shallow = &h
return nil
}
func (d *updReqDecoder) decodeCommands() error {
for {
b := d.s.Bytes()
if bytes.Equal(b, pktline.Flush) {
return nil
}
c, err := parseCommand(b)
if err != nil {
return err
}
d.req.Commands = append(d.req.Commands, c)
if ok := d.s.Scan(); !ok {
return d.s.Err()
}
}
}
func (d *updReqDecoder) decodeCommandAndCapabilities() error {
b := d.s.Bytes()
i := bytes.IndexByte(b, 0)
if i == -1 {
return errMissingCapabilitiesDelimiter
}
if len(b) < minCommandAndCapsLenth {
return errInvalidCommandCapabilitiesLineLength(len(b))
}
cmd, err := parseCommand(b[:i])
if err != nil {
return err
}
d.req.Commands = append(d.req.Commands, cmd)
if err := d.req.Capabilities.Decode(b[i+1:]); err != nil {
return err
}
if err := d.scanLine(); err != nil {
return err
}
return nil
}
func (d *updReqDecoder) setPackfile() error {
d.req.Packfile = d.r
return nil
}
func parseCommand(b []byte) (*Command, error) {
if len(b) < minCommandLength {
return nil, errInvalidCommandLineLength(len(b))
}
var (
os, ns string
n plumbing.ReferenceName
)
if _, err := fmt.Sscanf(string(b), "%s %s %s", &os, &ns, &n); err != nil {
return nil, errMalformedCommand(err)
}
oh, err := parseHash(os)
if err != nil {
return nil, errInvalidOldObjId(err)
}
nh, err := parseHash(ns)
if err != nil {
return nil, errInvalidNewObjId(err)
}
return &Command{Old: oh, New: nh, Name: plumbing.ReferenceName(n)}, nil
}
func parseHash(s string) (plumbing.Hash, error) {
if len(s) != hashSize {
return plumbing.ZeroHash, errInvalidHashSize(len(s))
}
if _, err := hex.DecodeString(s); err != nil {
return plumbing.ZeroHash, errInvalidHash(err)
}
h := plumbing.NewHash(s)
return h, nil
}
func (d *updReqDecoder) scanErrorOr(origErr error) error {
if err := d.s.Err(); err != nil {
return err
}
return origErr
}

View File

@ -0,0 +1,75 @@
package packp
import (
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
)
var (
zeroHashString = plumbing.ZeroHash.String()
)
// Encode writes the ReferenceUpdateRequest encoding to the stream.
func (r *ReferenceUpdateRequest) Encode(w io.Writer) error {
if err := r.validate(); err != nil {
return err
}
e := pktline.NewEncoder(w)
if err := r.encodeShallow(e, r.Shallow); err != nil {
return err
}
if err := r.encodeCommands(e, r.Commands, r.Capabilities); err != nil {
return err
}
if r.Packfile != nil {
if _, err := io.Copy(w, r.Packfile); err != nil {
return err
}
return r.Packfile.Close()
}
return nil
}
func (r *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder,
h *plumbing.Hash) error {
if h == nil {
return nil
}
objId := []byte(h.String())
return e.Encodef("%s%s", shallow, objId)
}
func (r *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder,
cmds []*Command, cap *capability.List) error {
if err := e.Encodef("%s\x00%s",
formatCommand(cmds[0]), cap.String()); err != nil {
return err
}
for _, cmd := range cmds[1:] {
if err := e.Encodef(formatCommand(cmd)); err != nil {
return err
}
}
return e.Flush()
}
func formatCommand(cmd *Command) string {
o := cmd.Old.String()
n := cmd.New.String()
return fmt.Sprintf("%s %s %s", o, n, cmd.Name)
}

View File

@ -0,0 +1,98 @@
package packp
import (
"bytes"
"fmt"
"io"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/plumbing/format/pktline"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
)
// UploadPackRequest represents a upload-pack request.
// Zero-value is not safe, use NewUploadPackRequest instead.
type UploadPackRequest struct {
UploadRequest
UploadHaves
}
// NewUploadPackRequest creates a new UploadPackRequest and returns a pointer.
func NewUploadPackRequest() *UploadPackRequest {
ur := NewUploadRequest()
return &UploadPackRequest{
UploadHaves: UploadHaves{},
UploadRequest: *ur,
}
}
// NewUploadPackRequestFromCapabilities creates a new UploadPackRequest and
// returns a pointer. The request capabilities are filled with the most optiomal
// ones, based on the adv value (advertaised capabilities), the UploadPackRequest
// it has no wants, haves or shallows and an infinite depth
func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackRequest {
ur := NewUploadRequestFromCapabilities(adv)
return &UploadPackRequest{
UploadHaves: UploadHaves{},
UploadRequest: *ur,
}
}
// IsEmpty a request if empty if Haves are contained in the Wants, or if Wants
// length is zero
func (r *UploadPackRequest) IsEmpty() bool {
return isSubset(r.Wants, r.Haves)
}
func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool {
for _, h := range needle {
found := false
for _, oh := range haystack {
if h == oh {
found = true
break
}
}
if !found {
return false
}
}
return true
}
// UploadHaves is a message to signal the references that a client has in a
// upload-pack. Do not use this directly. Use UploadPackRequest request instead.
type UploadHaves struct {
Haves []plumbing.Hash
}
// Encode encodes the UploadHaves into the Writer. If flush is true, a flush
// command will be encoded at the end of the writer content.
func (u *UploadHaves) Encode(w io.Writer, flush bool) error {
e := pktline.NewEncoder(w)
plumbing.HashesSort(u.Haves)
var last plumbing.Hash
for _, have := range u.Haves {
if bytes.Equal(last[:], have[:]) {
continue
}
if err := e.Encodef("have %s\n", have); err != nil {
return fmt.Errorf("sending haves for %q: %s", have, err)
}
last = have
}
if flush && len(u.Haves) != 0 {
if err := e.Flush(); err != nil {
return fmt.Errorf("sending flush-pkt after haves: %s", err)
}
}
return nil
}

View File

@ -0,0 +1,109 @@
package packp
import (
"errors"
"io"
"bufio"
"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
"gopkg.in/src-d/go-git.v4/utils/ioutil"
)
// ErrUploadPackResponseNotDecoded is returned if Read is called without
// decoding first
var ErrUploadPackResponseNotDecoded = errors.New("upload-pack-response should be decoded")
// UploadPackResponse contains all the information responded by the upload-pack
// service, the response implements io.ReadCloser that allows to read the
// packfile directly from it.
type UploadPackResponse struct {
ShallowUpdate
ServerResponse
r io.ReadCloser
isShallow bool
isMultiACK bool
isOk bool
}
// NewUploadPackResponse create a new UploadPackResponse instance, the request
// being responded by the response is required.
func NewUploadPackResponse(req *UploadPackRequest) *UploadPackResponse {
isShallow := !req.Depth.IsZero()
isMultiACK := req.Capabilities.Supports(capability.MultiACK) ||
req.Capabilities.Supports(capability.MultiACKDetailed)
return &UploadPackResponse{
isShallow: isShallow,
isMultiACK: isMultiACK,
}
}
// NewUploadPackResponseWithPackfile creates a new UploadPackResponse instance,
// and sets its packfile reader.
func NewUploadPackResponseWithPackfile(req *UploadPackRequest,
pf io.ReadCloser) *UploadPackResponse {
r := NewUploadPackResponse(req)
r.r = pf
return r
}
// Decode decodes all the responses sent by upload-pack service into the struct
// and prepares it to read the packfile using the Read method
func (r *UploadPackResponse) Decode(reader io.ReadCloser) error {
buf := bufio.NewReader(reader)
if r.isShallow {
if err := r.ShallowUpdate.Decode(buf); err != nil {
return err
}
}
if err := r.ServerResponse.Decode(buf, r.isMultiACK); err != nil {
return err
}
// now the reader is ready to read the packfile content
r.r = ioutil.NewReadCloser(buf, reader)
return nil
}
// Encode encodes an UploadPackResponse.
func (r *UploadPackResponse) Encode(w io.Writer) (err error) {
if r.isShallow {
if err := r.ShallowUpdate.Encode(w); err != nil {
return err
}
}
if err := r.ServerResponse.Encode(w); err != nil {
return err
}
defer ioutil.CheckClose(r.r, &err)
_, err = io.Copy(w, r.r)
return err
}
// Read reads the packfile data, if the request was done with any Sideband
// capability the content read should be demultiplexed. If the methods wasn't
// called before the ErrUploadPackResponseNotDecoded will be return
func (r *UploadPackResponse) Read(p []byte) (int, error) {
if r.r == nil {
return 0, ErrUploadPackResponseNotDecoded
}
return r.r.Read(p)
}
// Close the underlying reader, if any
func (r *UploadPackResponse) Close() error {
if r.r == nil {
return nil
}
return r.r.Close()
}