/
versions_leaf.go
126 lines (108 loc) · 3.1 KB
/
versions_leaf.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
package s3file
import (
"context"
"os"
"sync/atomic"
"unsafe"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/grailbio/base/errors"
"github.com/grailbio/base/file"
"github.com/grailbio/base/file/fsnode"
"github.com/grailbio/base/grail/biofs/biofseventlog"
"github.com/grailbio/base/ioctx"
"github.com/grailbio/base/ioctx/fsctx"
)
type (
versionsLeaf struct {
fsnode.FileInfo
s3Query
versionID string
}
versionsFile struct {
versionsLeaf
// readOffset is the cursor for Read().
readOffset int64
reader chunkReaderCache
}
)
var (
_ fsnode.Leaf = versionsLeaf{}
_ fsctx.File = (*versionsFile)(nil)
_ ioctx.ReaderAt = (*versionsFile)(nil)
)
func (n versionsLeaf) FSNodeT() {}
func (n versionsLeaf) OpenFile(ctx context.Context, flag int) (fsctx.File, error) {
biofseventlog.UsedFeature("s3.versions.open")
return &versionsFile{versionsLeaf: n}, nil
}
func (f *versionsFile) Stat(ctx context.Context) (os.FileInfo, error) {
return f.FileInfo, nil
}
func (f *versionsFile) Read(ctx context.Context, dst []byte) (int, error) {
n, err := f.ReadAt(ctx, dst, f.readOffset)
f.readOffset += int64(n)
return n, err
}
func (f *versionsFile) ReadAt(ctx context.Context, dst []byte, offset int64) (int, error) {
reader, cleanUp, err := f.reader.getOrCreate(ctx, func() (*chunkReaderAt, error) {
clients, err := f.impl.clientsForAction(ctx, "GetObjectVersion", f.bucket, f.key)
if err != nil {
return nil, errors.E(err, "getting clients")
}
return &chunkReaderAt{
name: f.path(), bucket: f.bucket, key: f.key, versionID: f.versionID,
newRetryPolicy: func() retryPolicy {
return newBackoffPolicy(append([]s3iface.S3API{}, clients...), file.Opts{})
},
}, nil
})
if err != nil {
return 0, err
}
defer cleanUp()
// TODO: Consider checking s3Info for ETag changes.
n, _, err := reader.ReadAt(ctx, dst, offset)
return n, err
}
func (f *versionsFile) Close(ctx context.Context) error {
f.reader.close()
return nil
}
type chunkReaderCache struct {
// available is idle (for some goroutine to use). Goroutines set available = nil before
// using it to "acquire" it, then return it after their operation (if available == nil then).
// If the caller only uses one thread, we'll end up creating and reusing just one
// *chunkReaderAt for all operations.
available unsafe.Pointer // *chunkReaderAt
}
// get constructs a reader. cleanUp must be called iff error is nil.
func (c *chunkReaderCache) getOrCreate(
ctx context.Context, create func() (*chunkReaderAt, error),
) (
reader *chunkReaderAt, cleanUp func(), err error,
) {
trySaveReader := func() {
if atomic.CompareAndSwapPointer(&c.available, nil, unsafe.Pointer(reader)) {
return
}
reader.Close()
}
reader = (*chunkReaderAt)(atomic.SwapPointer(&c.available, nil))
if reader != nil {
return reader, trySaveReader, nil
}
reader, err = create()
if err != nil {
if reader != nil {
reader.Close()
}
return nil, nil, err
}
return reader, trySaveReader, nil
}
func (c *chunkReaderCache) close() {
reader := (*chunkReaderAt)(atomic.SwapPointer(&c.available, nil))
if reader != nil {
reader.Close()
}
}