-
Notifications
You must be signed in to change notification settings - Fork 0
/
interface.go
239 lines (210 loc) · 7.51 KB
/
interface.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
package blobstore
import (
"context"
"errors"
"io"
"net/http"
"github.com/Bnei-Baruch/jukfs/pkg/blob"
"time"
)
var (
ErrCorruptBlob = errors.New("corrupt blob; digest doesn't match")
// ErrNotImplemented should be returned in methods where the function is not implemented
ErrNotImplemented = errors.New("not implemented")
)
// BlobReceiver is the interface for receiving blobs.
type BlobReceiver interface {
// ReceiveBlob accepts a newly uploaded blob and writes it to
// permanent storage.
//
// Implementations of BlobReceiver downstream of the HTTP
// server can trust that the source isn't larger than
// MaxBlobSize and that its digest matches the provided blob
// ref. (If not, the read of the source will fail before EOF)
//
// To ensure those guarantees, callers of ReceiveBlob should
// not call ReceiveBlob directly but instead use either
// blobserver.Receive or blobserver.ReceiveString, which also
// take care of notifying the BlobReceiver's "BlobHub"
// notification bus for observers.
ReceiveBlob(ctx context.Context, br blob.Ref, source io.Reader) (blob.SizedRef, error)
}
// BlobStatter is the interface for checking the size and existence of blobs.
type BlobStatter interface {
// Stat checks for the existence of blobs, calling fn in
// serial for each found blob, in any order, but with no
// duplicates. The blobs slice should not have duplicates.
//
// If fn returns an error, StatBlobs returns with that value
// and makes no further calls to fn.
//
// StatBlobs does not return an error on missing blobs, only
// on failure to stat blobs.
StatBlobs(ctx context.Context, blobs []blob.Ref, fn func(blob.SizedRef) error) error
}
type StatReceiver interface {
BlobReceiver
BlobStatter
}
type BlobEnumerator interface {
// EnumerateBobs sends at most limit SizedBlobRef into dest,
// sorted, as long as they are lexigraphically greater than
// after (if provided).
// limit will be supplied and sanity checked by caller.
// EnumerateBlobs must close the channel. (even if limit
// was hit and more blobs remain, or an error is returned, or
// the ctx is canceled)
EnumerateBlobs(ctx context.Context,
dest chan<- blob.SizedRef,
after string,
limit int) error
// TODO: remove limit from this interface, since the caller
// can cancel? see if that would simplify implementations and
// callers.
}
// BlobAndToken is the value used by the BlobStreamer interface,
// containing both a Blob and a continuation token.
type BlobAndToken struct {
*blob.Blob
// Token is the continuation token to resume streaming
// starting at this blob in the future.
Token string
}
type BlobStreamer interface {
// BlobStream is an optional interface that may be implemented by
// Storage implementations.
//
// StreamBlobs sends blobs to dest in an unspecified order. It is
// expected that a Storage implementation implementing
// BlobStreamer will send blobs to dest in the most efficient
// order possible.
//
// The provided continuation token resumes the stream at a
// point. To start from the beginning, send the empty string.
// The token is opaque and must never be interpreted; its
// format may change between versions of the server.
//
// If the content is canceled, the error value is
// context.Canceled.
//
// StreamBlobs must unconditionally close dest before
// returning, and it must return context.Canceled if
// ctx.Done() becomes readable.
//
// When StreamBlobs reaches the end, the return value is nil.
StreamBlobs(ctx context.Context, dest chan<- BlobAndToken, contToken string) error
}
// Cache is the minimal interface expected of a blob cache.
type Cache interface {
blob.Fetcher
BlobReceiver
BlobStatter
}
type BlobReceiveConfiger interface {
BlobReceiver
Configer
}
type Config struct {
Writable bool
Readable bool
Deletable bool
CanLongPoll bool
// the "http://host:port" and optional path (but without trailing slash) to have "/camli/*" appended
URLBase string
//HandlerFinder FindHandlerByTyper
}
type BlobRemover interface {
// RemoveBlobs removes 0 or more blobs. Removal of
// non-existent items isn't an error. Returns failure if any
// items existed but failed to be deleted.
// ErrNotImplemented may be returned for storage types not implementing removal.
// If RemoveBlobs returns an error, it's possible that either
// none or only some of the blobs were deleted.
RemoveBlobs(ctx context.Context, blobs []blob.Ref) error
}
// Storage is the interface that must be implemented by a blobserver
// storage type. (e.g. localdisk, s3, encrypt, shard, replica, remote)
type Storage interface {
blob.Fetcher
BlobReceiver
BlobStatter
BlobEnumerator
BlobRemover
}
type FetcherEnumerator interface {
blob.Fetcher
BlobEnumerator
}
// StorageHandler is a storage implementation that also exports an HTTP
// status page.
type StorageHandler interface {
Storage
http.Handler
}
// ShutdownStorage is an optional interface for storage
// implementations which can be asked to shut down
// cleanly. Regardless, all implementations should be able to survive
// crashes without data loss.
type ShutdownStorage interface {
Storage
io.Closer
}
// WholeRefFetcher is an optional fast-path interface exposed by the
// 'blobpacked' blob storage implementation, which packs pieces of
// files together and can efficiently serve them contigously.
type WholeRefFetcher interface {
// OpenWholeRef returns a ReadCloser reading from offset bytes
// into wholeRef (the blobref of an entire file).
//
// The returned wholeSize is the size of the file, without
// subtracting any offset.
//
// The err will be os.ErrNotExist if the wholeref is not
// known.
OpenWholeRef(wholeRef blob.Ref, offset int64) (rc io.ReadCloser, wholeSize int64, err error)
}
// A GenerationNotSupportedError explains why a Storage
// value implemented the Generationer interface but failed due
// to a wrapped Storage value not implementing the interface.
type GenerationNotSupportedError string
func (s GenerationNotSupportedError) Error() string { return string(s) }
/*
Generationer is an optional interface and an optimization and paranoia
facility for clients which can be implemented by Storage
implementations.
If the client sees the same random string in multiple upload sessions,
it assumes that the blobserver still has all the same blobs, and also
it's the same server. This mechanism is not fundamental to
Perkeep's operation: the client could also check each blob before
uploading, or enumerate all blobs from the server too. This is purely
an optimization so clients can mix this value into their "is this file
uploaded?" local cache keys.
*/
type Generationer interface {
// Generation returns a Storage's initialization time and
// and unique random string (or UUID). Implementations
// should call ResetStorageGeneration on demand if no
// information is known.
// The error will be of type GenerationNotSupportedError if an underlying
// storage target doesn't support the Generationer interface.
StorageGeneration() (initTime time.Time, random string, err error)
// ResetGeneration deletes the information returned by Generation
// and re-generates it.
ResetStorageGeneration() error
}
type Configer interface {
Config() *Config
}
type StorageConfiger interface {
Storage
Configer
}
// MaxEnumerateConfig is an optional interface implemented by Storage
// interfaces to advertise their max value for how many items can
// be enumerated at once.
type MaxEnumerateConfig interface {
Storage
// MaxEnumerate returns the max that this storage interface is
// capable of enumerating at once.
MaxEnumerate() int
}