Skip to content

Commit

Permalink
Merge pull request #3569 from mstrap/issue-3252
Browse files Browse the repository at this point in the history
Issue 3252: git-lfs locks should optionally denote own locks
  • Loading branch information
bk2204 committed Apr 16, 2019
2 parents 47cf02f + ce1c1c0 commit 2bbe78f
Show file tree
Hide file tree
Showing 6 changed files with 233 additions and 95 deletions.
51 changes: 47 additions & 4 deletions commands/command_locks.go
@@ -1,7 +1,7 @@
package commands

import (
"encoding/json"
"io"
"os"
"sort"
"strings"
Expand Down Expand Up @@ -44,11 +44,41 @@ func locksCommand(cmd *cobra.Command, args []string) {
}
}

locks, err := lockClient.SearchLocks(filters, locksCmdFlags.Limit, locksCmdFlags.Local, locksCmdFlags.Cached)
if locksCmdFlags.Verify {
if len(filters) > 0 {
Exit("--verify option can't be combined with filters")
}
if locksCmdFlags.Local {
Exit("--verify option can't be combined with --local")
}
}

var locks []locking.Lock
var locksOwned map[locking.Lock]bool
var jsonWriteFunc func(io.Writer) error
if locksCmdFlags.Verify {
var ourLocks, theirLocks []locking.Lock
ourLocks, theirLocks, err = lockClient.SearchLocksVerifiable(locksCmdFlags.Limit, locksCmdFlags.Cached)
jsonWriteFunc = func(writer io.Writer) error {
return lockClient.EncodeLocksVerifiable(ourLocks, theirLocks, writer)
}

locks = append(ourLocks, theirLocks...)
locksOwned = make(map[locking.Lock]bool)
for _, lock := range ourLocks {
locksOwned[lock] = true
}
} else {
locks, err = lockClient.SearchLocks(filters, locksCmdFlags.Limit, locksCmdFlags.Local, locksCmdFlags.Cached)
jsonWriteFunc = func(writer io.Writer) error {
return lockClient.EncodeLocks(locks, writer)
}
}

// Print any we got before exiting

if locksCmdFlags.JSON {
if err := json.NewEncoder(os.Stdout).Encode(locks); err != nil {
if err := jsonWriteFunc(os.Stdout); err != nil {
Error(err.Error())
}
return
Expand Down Expand Up @@ -77,7 +107,16 @@ func locksCommand(cmd *cobra.Command, args []string) {

pathPadding := tools.MaxInt(maxPathLen-len(lock.Path), 0)
namePadding := tools.MaxInt(maxNameLen-len(ownerName), 0)
Print("%s%s\t%s%s\tID:%s", lock.Path, strings.Repeat(" ", pathPadding),
kind := ""
if locksOwned != nil {
if locksOwned[lock] {
kind = "O "
} else {
kind = " "
}
}

Print("%s%s%s\t%s%s\tID:%s", kind, lock.Path, strings.Repeat(" ", pathPadding),
ownerName, strings.Repeat(" ", namePadding),
lock.Id,
)
Expand Down Expand Up @@ -108,6 +147,9 @@ type locksFlags struct {
// for non-local queries, report cached query results from the last query
// instead of actually querying the server again
Cached bool
// for non-local queries, verify lock owner on server and
// denote our locks in output
Verify bool
}

// Filters produces a filter based on locksFlags instance.
Expand Down Expand Up @@ -137,6 +179,7 @@ func init() {
cmd.Flags().IntVarP(&locksCmdFlags.Limit, "limit", "l", 0, "optional limit for number of results to return")
cmd.Flags().BoolVarP(&locksCmdFlags.Local, "local", "", false, "only list cached local record of own locks")
cmd.Flags().BoolVarP(&locksCmdFlags.Cached, "cached", "", false, "list cached lock information from the last remote query, instead of actually querying the server")
cmd.Flags().BoolVarP(&locksCmdFlags.Verify, "verify", "", false, "verify lock owner on server and mark own locks by 'O'")
cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json")
})
}
7 changes: 6 additions & 1 deletion commands/lockverifier.go
Expand Up @@ -46,12 +46,17 @@ type lockVerifier struct {
}

func (lv *lockVerifier) Verify(ref *git.Ref) {
if ref == nil {
panic("no ref specified for verification")
}

if lv.verifyState == verifyStateDisabled || lv.verifiedRefs[ref.Refspec()] {
return
}

lockClient := newLockClient()
ours, theirs, err := lockClient.VerifiableLocks(ref, 0)
lockClient.RemoteRef = ref
ours, theirs, err := lockClient.SearchLocksVerifiable(0, false)
if err != nil {
if errors.IsNotImplementedError(err) {
disableFor(lv.endpoint.Url)
Expand Down
10 changes: 10 additions & 0 deletions docs/man/git-lfs-locks.1.ronn
Expand Up @@ -29,6 +29,16 @@ Lists current locks from the Git LFS server.
last known locks in case you are offline. There is no guarantee that locks
on the server have not changed in the meanwhile.

* `--verify`:
Verifies the lock owner on the server and marks our own locks by 'O'.
Own locks are actually held by us and corresponding files can be updated for
the next push. All other locks are held by someone else.
Contrary to --local, this option will also detect locks which are held by us
despite no local lock information being available (e.g. because the file had
been locked from a different clone);
it will also detect 'broken' locks (e.g. if someone else has forcefully
unlocked our files).

* `-l <num>` `--limit=<num>`:
Specifies number of results to return.

Expand Down
2 changes: 1 addition & 1 deletion locking/api_test.go
Expand Up @@ -171,7 +171,7 @@ func TestAPISearch(t *testing.T) {
assert.Equal(t, "2", locks.Locks[1].Id)
}

func TestAPIVerifiableLocks(t *testing.T) {
func TestAPISearchVerifiable(t *testing.T) {
require.NotNil(t, verifyResSchema)

srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expand Down
184 changes: 105 additions & 79 deletions locking/locks.go
Expand Up @@ -3,6 +3,7 @@ package locking
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
Expand Down Expand Up @@ -218,100 +219,107 @@ func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool
return []Lock{}, errors.New("can't search cached locks when filter or limit is set")
}

cacheFile, err := c.prepareCacheDirectory()
if err != nil {
return []Lock{}, err
}

_, err = os.Stat(cacheFile)
if err != nil {
if os.IsNotExist(err) {
return []Lock{}, errors.New("no cached locks present")
}

return []Lock{}, err
}

return c.readLocksFromCacheFile(cacheFile)
locks := []Lock{}
err := c.readLocksFromCacheFile("remote", func(decoder *json.Decoder) error {
return decoder.Decode(&locks)
})
return locks, err
} else {
locks, err := c.searchRemoteLocks(filter, limit)
if err != nil {
return locks, err
}

if len(filter) == 0 && limit == 0 {
cacheFile, err := c.prepareCacheDirectory()
if err != nil {
return locks, err
}

err = c.writeLocksToCacheFile(cacheFile, locks)
err = c.writeLocksToCacheFile("remote", func(writer io.Writer) error {
return c.EncodeLocks(locks, writer)
})
}

return locks, err
}
}

func (c *Client) VerifiableLocks(ref *git.Ref, limit int) (ourLocks, theirLocks []Lock, err error) {
if ref == nil {
ref = c.RemoteRef
}

func (c *Client) SearchLocksVerifiable(limit int, cached bool) (ourLocks, theirLocks []Lock, err error) {
ourLocks = make([]Lock, 0, limit)
theirLocks = make([]Lock, 0, limit)
body := &lockVerifiableRequest{
Ref: &lockRef{Name: ref.Refspec()},
Limit: limit,
}

c.cache.Clear()
if cached {
if limit != 0 {
return []Lock{}, []Lock{}, errors.New("can't search cached locks when limit is set")
}

for {
list, res, err := c.client.SearchVerifiable(c.Remote, body)
if res != nil {
switch res.StatusCode {
case http.StatusNotFound, http.StatusNotImplemented:
return ourLocks, theirLocks, errors.NewNotImplementedError(err)
case http.StatusForbidden:
return ourLocks, theirLocks, errors.NewAuthError(err)
}
locks := &lockVerifiableList{}
err := c.readLocksFromCacheFile("verifiable", func(decoder *json.Decoder) error {
return decoder.Decode(&locks)
})
return locks.Ours, locks.Theirs, err
} else {
var requestRef *lockRef
if c.RemoteRef != nil {
requestRef = &lockRef{Name: c.RemoteRef.Refspec()}
}

if err != nil {
return ourLocks, theirLocks, err
body := &lockVerifiableRequest{
Ref: requestRef,
Limit: limit,
}

if list.Message != "" {
if len(list.RequestID) > 0 {
tracerx.Printf("Server Request ID: %s", list.RequestID)
c.cache.Clear()

for {
list, res, err := c.client.SearchVerifiable(c.Remote, body)
if res != nil {
switch res.StatusCode {
case http.StatusNotFound, http.StatusNotImplemented:
return ourLocks, theirLocks, errors.NewNotImplementedError(err)
case http.StatusForbidden:
return ourLocks, theirLocks, errors.NewAuthError(err)
}
}
return ourLocks, theirLocks, fmt.Errorf("Server error searching locks: %s", list.Message)
}

for _, l := range list.Ours {
c.cache.Add(l)
ourLocks = append(ourLocks, l)
if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit {
return ourLocks, theirLocks, nil
if err != nil {
return ourLocks, theirLocks, err
}
}

for _, l := range list.Theirs {
c.cache.Add(l)
theirLocks = append(theirLocks, l)
if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit {
return ourLocks, theirLocks, nil
if list.Message != "" {
if len(list.RequestID) > 0 {
tracerx.Printf("Server Request ID: %s", list.RequestID)
}
return ourLocks, theirLocks, fmt.Errorf("Server error searching locks: %s", list.Message)
}

for _, l := range list.Ours {
c.cache.Add(l)
ourLocks = append(ourLocks, l)
if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit {
return ourLocks, theirLocks, nil
}
}

for _, l := range list.Theirs {
c.cache.Add(l)
theirLocks = append(theirLocks, l)
if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit {
return ourLocks, theirLocks, nil
}
}

if list.NextCursor != "" {
body.Cursor = list.NextCursor
} else {
break
}
}

if list.NextCursor != "" {
body.Cursor = list.NextCursor
} else {
break
if limit == 0 {
err = c.writeLocksToCacheFile("verifiable", func(writer io.Writer) error {
return c.EncodeLocksVerifiable(ourLocks, theirLocks, writer)
})
}
}

return ourLocks, theirLocks, nil
return ourLocks, theirLocks, err
}
}

func (c *Client) searchLocalLocks(filter map[string]string, limit int) ([]Lock, error) {
Expand Down Expand Up @@ -426,7 +434,7 @@ func init() {
kv.RegisterTypeForStorage(&Lock{})
}

func (c *Client) prepareCacheDirectory() (string, error) {
func (c *Client) prepareCacheDirectory(kind string) (string, error) {
cacheDir := filepath.Join(c.cacheDir, "locks")
if c.RemoteRef != nil {
cacheDir = filepath.Join(cacheDir, c.RemoteRef.Refspec())
Expand All @@ -446,39 +454,57 @@ func (c *Client) prepareCacheDirectory() (string, error) {
return cacheDir, errors.Wrap(err, "init cache directory "+cacheDir+" failed")
}

return filepath.Join(cacheDir, "remote"), nil
return filepath.Join(cacheDir, kind), nil
}

func (c *Client) readLocksFromCacheFile(path string) ([]Lock, error) {
file, err := os.Open(path)
func (c *Client) readLocksFromCacheFile(kind string, decoder func(*json.Decoder) error) error {
cacheFile, err := c.prepareCacheDirectory(kind)
if err != nil {
return []Lock{}, err
return err
}

defer file.Close()
_, err = os.Stat(cacheFile)
if err != nil {
if os.IsNotExist(err) {
return errors.New("no cached locks present")
}

return err
}

locks := []Lock{}
err = json.NewDecoder(file).Decode(&locks)
file, err := os.Open(cacheFile)
if err != nil {
return []Lock{}, err
return err
}

return locks, nil
defer file.Close()
return decoder(json.NewDecoder(file))
}

func (c *Client) writeLocksToCacheFile(path string, locks []Lock) error {
file, err := os.Create(path)
func (c *Client) EncodeLocks(locks []Lock, writer io.Writer) error {
return json.NewEncoder(writer).Encode(locks)
}

func (c *Client) EncodeLocksVerifiable(ourLocks, theirLocks []Lock, writer io.Writer) error {
return json.NewEncoder(writer).Encode(&lockVerifiableList{
Ours: ourLocks,
Theirs: theirLocks,
})
}

func (c *Client) writeLocksToCacheFile(kind string, writer func(io.Writer) error) error {
cacheFile, err := c.prepareCacheDirectory(kind)
if err != nil {
return err
}

err = json.NewEncoder(file).Encode(locks)
file, err := os.Create(cacheFile)
if err != nil {
file.Close()
return err
}

return file.Close()
defer file.Close()
return writer(file)
}

type nilLockCacher struct{}
Expand Down

0 comments on commit 2bbe78f

Please sign in to comment.