forked from git-lfs/git-lfs
/
object_db.go
305 lines (258 loc) · 7.85 KB
/
object_db.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
package odb
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"sync/atomic"
"github.com/git-lfs/git-lfs/errors"
"github.com/git-lfs/git-lfs/git/odb/pack"
)
// ObjectDatabase enables the reading and writing of objects against a storage
// backend.
type ObjectDatabase struct {
// members managed via sync/atomic must be aligned at the top of this
// structure (see: https://github.com/git-lfs/git-lfs/pull/2880).
// closed is a uint32 managed by sync/atomic's <X>Uint32 methods. It
// yields a value of 0 if the *ObjectDatabase it is stored upon is open,
// and a value of 1 if it is closed.
closed uint32
// s is the storage backend which opens/creates/reads/writes.
s storer
// packs are the set of packfiles which contain all packed objects
// within this repository.
packs *pack.Set
// temp directory, defaults to os.TempDir
tmp string
}
// FromFilesystem constructs an *ObjectDatabase instance that is backed by a
// directory on the filesystem. Specifically, this should point to:
//
// /absolute/repo/path/.git/objects
func FromFilesystem(root, tmp string) (*ObjectDatabase, error) {
packs, err := pack.NewSet(root)
if err != nil {
return nil, err
}
return &ObjectDatabase{
tmp: tmp,
s: newFileStorer(root, tmp),
packs: packs,
}, nil
}
// Close closes the *ObjectDatabase, freeing any open resources (namely: the
// `*git.ObjectScanner instance), and returning any errors encountered in
// closing them.
//
// If Close() has already been called, this function will return an error.
func (o *ObjectDatabase) Close() error {
if !atomic.CompareAndSwapUint32(&o.closed, 0, 1) {
return errors.New("git/odb: *ObjectDatabase already closed")
}
if err := o.packs.Close(); err != nil {
return err
}
return nil
}
// Blob returns a *Blob as identified by the SHA given, or an error if one was
// encountered.
func (o *ObjectDatabase) Blob(sha []byte) (*Blob, error) {
var b Blob
if err := o.decode(sha, &b); err != nil {
return nil, err
}
return &b, nil
}
// Tree returns a *Tree as identified by the SHA given, or an error if one was
// encountered.
func (o *ObjectDatabase) Tree(sha []byte) (*Tree, error) {
var t Tree
if err := o.decode(sha, &t); err != nil {
return nil, err
}
return &t, nil
}
// Commit returns a *Commit as identified by the SHA given, or an error if one
// was encountered.
func (o *ObjectDatabase) Commit(sha []byte) (*Commit, error) {
var c Commit
if err := o.decode(sha, &c); err != nil {
return nil, err
}
return &c, nil
}
// Tag returns a *Tag as identified by the SHA given, or an error if one was
// encountered.
func (o *ObjectDatabase) Tag(sha []byte) (*Tag, error) {
var t Tag
if err := o.decode(sha, &t); err != nil {
return nil, err
}
return &t, nil
}
// WriteBlob stores a *Blob on disk and returns the SHA it is uniquely
// identified by, or an error if one was encountered.
func (o *ObjectDatabase) WriteBlob(b *Blob) ([]byte, error) {
buf, err := ioutil.TempFile(o.tmp, "")
if err != nil {
return nil, err
}
defer os.Remove(buf.Name())
sha, _, err := o.encodeBuffer(b, buf)
if err != nil {
return nil, err
}
if err = b.Close(); err != nil {
return nil, err
}
return sha, nil
}
// WriteTree stores a *Tree on disk and returns the SHA it is uniquely
// identified by, or an error if one was encountered.
func (o *ObjectDatabase) WriteTree(t *Tree) ([]byte, error) {
sha, _, err := o.encode(t)
if err != nil {
return nil, err
}
return sha, nil
}
// WriteCommit stores a *Commit on disk and returns the SHA it is uniquely
// identified by, or an error if one was encountered.
func (o *ObjectDatabase) WriteCommit(c *Commit) ([]byte, error) {
sha, _, err := o.encode(c)
if err != nil {
return nil, err
}
return sha, nil
}
// WriteTag stores a *Tag on disk and returns the SHA it is uniquely identified
// by, or an error if one was encountered.
func (o *ObjectDatabase) WriteTag(t *Tag) ([]byte, error) {
sha, _, err := o.encode(t)
if err != nil {
return nil, err
}
return sha, nil
}
// Root returns the filesystem root that this *ObjectDatabase works within, if
// backed by a fileStorer (constructed by FromFilesystem). If so, it returns
// the fully-qualified path on a disk and a value of true.
//
// Otherwise, it returns empty-string and a value of false.
func (o *ObjectDatabase) Root() (string, bool) {
type rooter interface {
Root() string
}
if root, ok := o.s.(rooter); ok {
return root.Root(), true
}
return "", false
}
// encode encodes and saves an object to the storage backend and uses an
// in-memory buffer to calculate the object's encoded body.
func (d *ObjectDatabase) encode(object Object) (sha []byte, n int64, err error) {
return d.encodeBuffer(object, bytes.NewBuffer(nil))
}
// encodeBuffer encodes and saves an object to the storage backend by using the
// given buffer to calculate and store the object's encoded body.
func (d *ObjectDatabase) encodeBuffer(object Object, buf io.ReadWriter) (sha []byte, n int64, err error) {
cn, err := object.Encode(buf)
if err != nil {
return nil, 0, err
}
tmp, err := ioutil.TempFile(d.tmp, "")
if err != nil {
return nil, 0, err
}
defer os.Remove(tmp.Name())
to := NewObjectWriter(tmp)
if _, err = to.WriteHeader(object.Type(), int64(cn)); err != nil {
return nil, 0, err
}
if seek, ok := buf.(io.Seeker); ok {
if _, err = seek.Seek(0, io.SeekStart); err != nil {
return nil, 0, err
}
}
if _, err = io.Copy(to, buf); err != nil {
return nil, 0, err
}
if err = to.Close(); err != nil {
return nil, 0, err
}
if _, err := tmp.Seek(0, io.SeekStart); err != nil {
return nil, 0, err
}
return d.save(to.Sha(), tmp)
}
// save writes the given buffer to the location given by the storer "o.s" as
// identified by the sha []byte.
func (o *ObjectDatabase) save(sha []byte, buf io.Reader) ([]byte, int64, error) {
n, err := o.s.Store(sha, buf)
return sha, n, err
}
// open gives an `*ObjectReader` for the given loose object keyed by the given
// "sha" []byte, or an error.
func (o *ObjectDatabase) open(sha []byte) (*ObjectReader, error) {
f, err := o.s.Open(sha)
if err != nil {
if !os.IsNotExist(err) {
// If there was some other issue beyond not being able
// to find the object, return that immediately and don't
// try and fallback to the *git.ObjectScanner.
return nil, err
}
// Otherwise, if the file simply couldn't be found, attempt to
// load its contents from the *git.ObjectScanner by leveraging
// `git-cat-file --batch`.
if atomic.LoadUint32(&o.closed) == 1 {
return nil, errors.New("git/odb: cannot use closed *pack.Set")
}
packed, err := o.packs.Object(sha)
if err != nil {
return nil, err
}
unpacked, err := packed.Unpack()
if err != nil {
return nil, err
}
return NewUncompressedObjectReader(io.MultiReader(
// Git object header:
strings.NewReader(fmt.Sprintf("%s %d\x00",
packed.Type(), len(unpacked),
)),
// Git object (uncompressed) contents:
bytes.NewReader(unpacked),
))
}
return NewObjectReadCloser(f)
}
// decode decodes an object given by the sha "sha []byte" into the given object
// "into", or returns an error if one was encountered.
//
// Ordinarily, it closes the object's underlying io.ReadCloser (if it implements
// the `io.Closer` interface), but skips this if the "into" Object is of type
// BlobObjectType. Blob's don't exhaust the buffer completely (they instead
// maintain a handle on the blob's contents via an io.LimitedReader) and
// therefore cannot be closed until signaled explicitly by git/odb.Blob.Close().
func (o *ObjectDatabase) decode(sha []byte, into Object) error {
r, err := o.open(sha)
if err != nil {
return err
}
typ, size, err := r.Header()
if err != nil {
return err
} else if typ != into.Type() {
return &UnexpectedObjectType{Got: typ, Wanted: into.Type()}
}
if _, err = into.Decode(r, size); err != nil {
return err
}
if into.Type() == BlobObjectType {
return nil
}
return r.Close()
}