Showing with 2,091 additions and 215 deletions.
  1. +5 −0 config/config.go
  2. +37 −0 internal/url/url.go
  3. +5 −0 options.go
  4. +7 −1 plumbing/format/packfile/common.go
  5. +14 −0 plumbing/format/packfile/common_test.go
  6. +1 −1 plumbing/format/packfile/fsobject.go
  7. +29 −24 plumbing/format/packfile/packfile.go
  8. +26 −0 plumbing/format/packfile/packfile_test.go
  9. +1 −5 plumbing/format/packfile/parser.go
  10. +42 −4 plumbing/format/packfile/scanner.go
  11. +17 −0 plumbing/format/packfile/scanner_test.go
  12. +144 −0 plumbing/object/commit_walker.go
  13. +40 −10 plumbing/object/commit_walker_file.go
  14. +14 −2 plumbing/revlist/revlist.go
  15. +26 −0 plumbing/revlist/revlist_test.go
  16. +1 −1 plumbing/storer/object.go
  17. +66 −4 plumbing/storer/reference.go
  18. +23 −0 plumbing/storer/reference_test.go
  19. +8 −14 plumbing/transport/common.go
  20. +1 −5 plumbing/transport/test/receive_pack.go
  21. +14 −1 remote.go
  22. +76 −27 repository.go
  23. +227 −14 repository_test.go
  24. +2 −1 storage/filesystem/dotgit/dotgit.go
  25. +49 −2 storage/filesystem/dotgit/dotgit_setref.go
  26. +0 −47 storage/filesystem/dotgit/dotgit_setref_norwfs.go
  27. +24 −1 storage/filesystem/dotgit/dotgit_test.go
  28. +36 −24 storage/filesystem/object.go
  29. +17 −0 storage/filesystem/object_test.go
  30. +1 −5 storage/filesystem/storage.go
  31. +1 −2 storage/memory/storage.go
  32. +4 −0 storage/storer.go
  33. +51 −0 storage/test/storage_suite.go
  34. +50 −0 storage/transactional/config.go
  35. +82 −0 storage/transactional/config_test.go
  36. +7 −0 storage/transactional/doc.go
  37. +56 −0 storage/transactional/index.go
  38. +52 −0 storage/transactional/index_test.go
  39. +84 −0 storage/transactional/object.go
  40. +153 −0 storage/transactional/object_test.go
  41. +138 −0 storage/transactional/reference.go
  42. +157 −0 storage/transactional/reference_test.go
  43. +51 −0 storage/transactional/shallow.go
  44. +62 −0 storage/transactional/shallow_test.go
  45. +69 −0 storage/transactional/storage.go
  46. +52 −0 storage/transactional/storage_test.go
  47. +22 −1 utils/merkletrie/difftree_test.go
  48. +3 −7 utils/merkletrie/noder/path.go
  49. +6 −2 utils/merkletrie/noder/path_test.go
  50. +6 −5 worktree.go
  51. +32 −5 worktree_test.go
5 changes: 5 additions & 0 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"sort"
"strconv"

"gopkg.in/src-d/go-git.v4/internal/url"
format "gopkg.in/src-d/go-git.v4/plumbing/format/config"
)

Expand Down Expand Up @@ -399,3 +400,7 @@ func (c *RemoteConfig) marshal() *format.Subsection {

return c.raw
}

func (c *RemoteConfig) IsFirstURLLocal() bool {
return url.IsLocalEndpoint(c.URLs[0])
}
37 changes: 37 additions & 0 deletions internal/url/url.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
package url

import (
"regexp"
)

var (
isSchemeRegExp = regexp.MustCompile(`^[^:]+://`)
scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})/)?(?P<path>[^\\].*)$`)
)

// MatchesScheme returns true if the given string matches a URL-like
// format scheme.
func MatchesScheme(url string) bool {
return isSchemeRegExp.MatchString(url)
}

// MatchesScpLike returns true if the given string matches an SCP-like
// format scheme.
func MatchesScpLike(url string) bool {
return scpLikeUrlRegExp.MatchString(url)
}

// FindScpLikeComponents returns the user, host, port and path of the
// given SCP-like URL.
func FindScpLikeComponents(url string) (user, host, port, path string) {
m := scpLikeUrlRegExp.FindStringSubmatch(url)
return m[1], m[2], m[3], m[4]
}

// IsLocalEndpoint returns true if the given URL string specifies a
// local file endpoint. For example, on a Linux machine,
// `/home/user/src/go-git` would match as a local endpoint, but
// `https://github.com/src-d/go-git` would not.
func IsLocalEndpoint(url string) bool {
return !MatchesScheme(url) && !MatchesScpLike(url)
}
5 changes: 5 additions & 0 deletions options.go
Original file line number Diff line number Diff line change
Expand Up @@ -335,6 +335,11 @@ type LogOptions struct {
// Show only those commits in which the specified file was inserted/updated.
// It is equivalent to running `git log -- <file-name>`.
FileName *string

// Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as <commit>.
// It is equivalent to running `git log --all`.
// If set on true, the From option will be ignored.
All bool
}

var (
Expand Down
8 changes: 7 additions & 1 deletion plumbing/format/packfile/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,13 @@ func WritePackfileToObjectStorage(
}

defer ioutil.CheckClose(w, &err)
_, err = io.Copy(w, packfile)

var n int64
n, err = io.Copy(w, packfile)
if err == nil && n == 0 {
return ErrEmptyPackfile
}

return err
}

Expand Down
14 changes: 14 additions & 0 deletions plumbing/format/packfile/common_test.go
Original file line number Diff line number Diff line change
@@ -1,15 +1,29 @@
package packfile

import (
"bytes"
"testing"

"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/storage/memory"

. "gopkg.in/check.v1"
)

func Test(t *testing.T) { TestingT(t) }

type CommonSuite struct{}

var _ = Suite(&CommonSuite{})

func (s *CommonSuite) TestEmptyUpdateObjectStorage(c *C) {
var buf bytes.Buffer
sto := memory.NewStorage()

err := UpdateObjectStorage(sto, &buf)
c.Assert(err, Equals, ErrEmptyPackfile)
}

func newObject(t plumbing.ObjectType, cont []byte) plumbing.EncodedObject {
o := plumbing.MemoryObject{}
o.SetType(t)
Expand Down
2 changes: 1 addition & 1 deletion plumbing/format/packfile/fsobject.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func NewFSObject(
// Reader implements the plumbing.EncodedObject interface.
func (o *FSObject) Reader() (io.ReadCloser, error) {
obj, ok := o.cache.Get(o.hash)
if ok {
if ok && obj != o {
reader, err := obj.Reader()
if err != nil {
return nil, err
Expand Down
53 changes: 29 additions & 24 deletions plumbing/format/packfile/packfile.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,16 @@ var (
ErrZLib = NewError("zlib reading error")
)

// When reading small objects from packfile it is beneficial to do so at
// once to exploit the buffered I/O. In many cases the objects are so small
// that they were already loaded to memory when the object header was
// loaded from the packfile. Wrapping in FSObject would cause this buffered
// data to be thrown away and then re-read later, with the additional
// seeking causing reloads from disk. Objects smaller than this threshold
// are now always read into memory and stored in cache instead of being
// wrapped in FSObject.
const smallObjectThreshold = 16 * 1024

// Packfile allows retrieving information from inside a packfile.
type Packfile struct {
idxfile.Index
Expand Down Expand Up @@ -79,15 +89,7 @@ func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
}
}

if _, err := p.s.SeekFromStart(o); err != nil {
if err == io.EOF || isInvalid(err) {
return nil, plumbing.ErrObjectNotFound
}

return nil, err
}

return p.nextObject()
return p.objectAtOffset(o)
}

// GetSizeByOffset retrieves the size of the encoded object from the
Expand All @@ -105,7 +107,13 @@ func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) {
if err != nil {
return 0, err
}
return h.Length, nil
return p.getObjectSize(h)
}

func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) {
h, err := p.s.SeekObjectHeader(offset)
p.s.pendingObject = nil
return h, err
}

func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
Expand Down Expand Up @@ -154,11 +162,7 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err
if baseType, ok := p.offsetToType[offset]; ok {
typ = baseType
} else {
if _, err = p.s.SeekFromStart(offset); err != nil {
return
}

h, err = p.nextObjectHeader()
h, err = p.objectHeaderAtOffset(offset)
if err != nil {
return
}
Expand All @@ -175,8 +179,8 @@ func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err
return
}

func (p *Packfile) nextObject() (plumbing.EncodedObject, error) {
h, err := p.nextObjectHeader()
func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) {
h, err := p.objectHeaderAtOffset(offset)
if err != nil {
if err == io.EOF || isInvalid(err) {
return nil, plumbing.ErrObjectNotFound
Expand All @@ -190,6 +194,13 @@ func (p *Packfile) nextObject() (plumbing.EncodedObject, error) {
return p.getNextObject(h)
}

// If the object is not a delta and it's small enough then read it
// completely into memory now since it is already read from disk
// into buffer anyway.
if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
return p.getNextObject(h)
}

hash, err := p.FindHash(h.Offset)
if err != nil {
return nil, err
Expand Down Expand Up @@ -233,11 +244,7 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
}
}

if _, err := p.s.SeekFromStart(offset); err != nil {
return nil, err
}

h, err := p.nextObjectHeader()
h, err := p.objectHeaderAtOffset(offset)
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -329,8 +336,6 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset
if err != nil {
return err
}

p.cachePut(base)
}

obj.SetType(base.Type())
Expand Down
26 changes: 26 additions & 0 deletions plumbing/format/packfile/packfile_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,3 +277,29 @@ func getIndexFromIdxFile(r io.Reader) idxfile.Index {

return idxf
}

func (s *PackfileSuite) TestSize(c *C) {
f := fixtures.Basic().ByTag("ref-delta").One()

index := getIndexFromIdxFile(f.Idx())
fs := osfs.New("")
pf, err := fs.Open(f.Packfile().Name())
c.Assert(err, IsNil)

packfile := packfile.NewPackfile(index, fs, pf)
defer packfile.Close()

// Get the size of binary.jpg, which is not delta-encoded.
offset, err := packfile.FindOffset(plumbing.NewHash("d5c0f4ab811897cadf03aec358ae60d21f91c50d"))
c.Assert(err, IsNil)
size, err := packfile.GetSizeByOffset(offset)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(76110))

// Get the size of the root commit, which is delta-encoded.
offset, err = packfile.FindOffset(f.Head)
c.Assert(err, IsNil)
size, err = packfile.GetSizeByOffset(offset)
c.Assert(err, IsNil)
c.Assert(size, Equals, int64(245))
}
6 changes: 1 addition & 5 deletions plumbing/format/packfile/parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -398,11 +398,7 @@ func (p *Parser) readData(o *objectInfo) ([]byte, error) {
return data, nil
}

if _, err := p.scanner.SeekFromStart(o.Offset); err != nil {
return nil, err
}

if _, err := p.scanner.NextObjectHeader(); err != nil {
if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil {
return nil, err
}

Expand Down
46 changes: 42 additions & 4 deletions plumbing/format/packfile/scanner.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,14 +138,52 @@ func (s *Scanner) readCount() (uint32, error) {
return binary.ReadUint32(s.r)
}

// SeekObjectHeader seeks to specified offset and returns the ObjectHeader
// for the next object in the reader
func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) {
// if seeking we assume that you are not interested in the header
if s.version == 0 {
s.version = VersionSupported
}

if _, err := s.r.Seek(offset, io.SeekStart); err != nil {
return nil, err
}

h, err := s.nextObjectHeader()
if err != nil {
return nil, err
}

h.Offset = offset
return h, nil
}

// NextObjectHeader returns the ObjectHeader for the next object in the reader
func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
defer s.Flush()

if err := s.doPending(); err != nil {
return nil, err
}

offset, err := s.r.Seek(0, io.SeekCurrent)
if err != nil {
return nil, err
}

h, err := s.nextObjectHeader()
if err != nil {
return nil, err
}

h.Offset = offset
return h, nil
}

// nextObjectHeader returns the ObjectHeader for the next object in the reader
// without the Offset field
func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
defer s.Flush()

s.crc.Reset()

h := &ObjectHeader{}
Expand Down Expand Up @@ -308,7 +346,7 @@ var byteSlicePool = sync.Pool{
// SeekFromStart sets a new offset from start, returns the old position before
// the change.
func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
// if seeking we assume that you are not interested on the header
// if seeking we assume that you are not interested in the header
if s.version == 0 {
s.version = VersionSupported
}
Expand Down Expand Up @@ -385,7 +423,7 @@ type bufferedSeeker struct {
}

func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
if whence == io.SeekCurrent {
if whence == io.SeekCurrent && offset == 0 {
current, err := r.r.Seek(offset, whence)
if err != nil {
return current, err
Expand Down
17 changes: 17 additions & 0 deletions plumbing/format/packfile/scanner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,23 @@ func (s *ScannerSuite) TestNextObjectHeaderWithOutReadObjectNonSeekable(c *C) {
c.Assert(n, Equals, f.PackfileHash)
}

func (s *ScannerSuite) TestSeekObjectHeader(c *C) {
r := fixtures.Basic().One().Packfile()
p := NewScanner(r)

h, err := p.SeekObjectHeader(expectedHeadersOFS[4].Offset)
c.Assert(err, IsNil)
c.Assert(h, DeepEquals, &expectedHeadersOFS[4])
}

func (s *ScannerSuite) TestSeekObjectHeaderNonSeekable(c *C) {
r := io.MultiReader(fixtures.Basic().One().Packfile())
p := NewScanner(r)

_, err := p.SeekObjectHeader(expectedHeadersOFS[4].Offset)
c.Assert(err, Equals, ErrSeekNotSupported)
}

var expectedHeadersOFS = []ObjectHeader{
{Type: plumbing.CommitObject, Offset: 12, Length: 254},
{Type: plumbing.OFSDeltaObject, Offset: 186, Length: 93, OffsetReference: 12},
Expand Down
Loading