diff --git a/README.md b/README.md index 89815eae77..73fbf73802 100644 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ cost of consistency guarantees. These caching behaviors can be controlled with the flags `--stat-cache-ttl` and `--type-cache-ttl`. See [semantics.md](docs/semantics.md#caching) for more information. -## Downloading file contents +## Downloading object contents Behind the scenes, when a newly-opened file is first modified, gcsfuse downloads the entire backing object's contents from GCS. The contents are stored in a @@ -92,9 +92,9 @@ local temporary file whose location is controlled by the flag `--temp-dir`. Later, when the file is closed or fsync'd, gcsfuse writes the contents of the local file back to GCS as a new object generation. -Files that are not modified are read chunk by chunk on demand. Such non-dirty -content is cached in the temporary directory, with a size limit defined by -`--temp-dir-bytes`. The chunk size is controlled by `--gcs-chunk-size`. +Files that are read but not been modified are read portion by portion on demand. +gcsfuse uses a heuristic to detect when a file is being read sequentially, and +will issue fewer, larger read requests to GCS in this case. The consequence of this is that gcsfuse is relatively efficient when reading or writing entire large files, but will not be particularly fast for small numbers diff --git a/flags.go b/flags.go index a360ec94c9..f234fca248 100644 --- a/flags.go +++ b/flags.go @@ -143,12 +143,6 @@ func newApp() (app *cli.App) { "inodes.", }, - cli.IntFlag{ - Name: "gcs-chunk-size", - Value: 1 << 24, - Usage: "Max chunk size for loading GCS objects.", - }, - cli.StringFlag{ Name: "temp-dir", Value: "", @@ -156,12 +150,6 @@ func newApp() (app *cli.App) { "(default: system default, likely /tmp)", }, - cli.IntFlag{ - Name: "temp-dir-bytes", - Value: 1 << 31, - Usage: "Size limit of the temporary directory.", - }, - ///////////////////////// // Debugging ///////////////////////// @@ -208,9 +196,7 @@ type flagStorage struct { // Tuning StatCacheTTL time.Duration TypeCacheTTL time.Duration - GCSChunkSize uint64 TempDir string - TempDirLimit int64 // Debugging DebugFuse bool @@ -238,9 +224,7 @@ func populateFlags(c *cli.Context) (flags *flagStorage) { // Tuning, StatCacheTTL: c.Duration("stat-cache-ttl"), TypeCacheTTL: c.Duration("type-cache-ttl"), - GCSChunkSize: uint64(c.Int("gcs-chunk-size")), TempDir: c.String("temp-dir"), - TempDirLimit: int64(c.Int("temp-dir-bytes")), ImplicitDirs: c.Bool("implicit-dirs"), // Debugging, diff --git a/flags_test.go b/flags_test.go index 83fd71b802..0f7e519097 100644 --- a/flags_test.go +++ b/flags_test.go @@ -78,9 +78,7 @@ func (t *FlagsTest) Defaults() { // Tuning ExpectEq(time.Minute, f.StatCacheTTL) ExpectEq(time.Minute, f.TypeCacheTTL) - ExpectEq(1<<24, f.GCSChunkSize) ExpectEq("", f.TempDir) - ExpectEq(1<<31, f.TempDirLimit) // Debugging ExpectFalse(f.DebugFuse) @@ -149,8 +147,6 @@ func (t *FlagsTest) Numbers() { "--gid=19", "--limit-bytes-per-sec=123.4", "--limit-ops-per-sec=56.78", - "--gcs-chunk-size=1000", - "--temp-dir-bytes=2000", } f := parseArgs(args) @@ -160,8 +156,6 @@ func (t *FlagsTest) Numbers() { ExpectEq(19, f.Gid) ExpectEq(123.4, f.EgressBandwidthLimitBytesPerSecond) ExpectEq(56.78, f.OpRateLimitHz) - ExpectEq(1000, f.GCSChunkSize) - ExpectEq(2000, f.TempDirLimit) } func (t *FlagsTest) Strings() { diff --git a/fs/foreign_modifications_test.go b/fs/foreign_modifications_test.go index d5984403ff..9ae4301ed1 100644 --- a/fs/foreign_modifications_test.go +++ b/fs/foreign_modifications_test.go @@ -520,6 +520,8 @@ func (t *ForeignModsTest) ReadFromFile_Small() { } func (t *ForeignModsTest) ReadFromFile_Large() { + randSrc := rand.New(rand.NewSource(0xdeadbeef)) + // Create some random contents. const contentLen = 1 << 22 contents := randBytes(contentLen) @@ -547,8 +549,8 @@ func (t *ForeignModsTest) ReadFromFile_Large() { defer func() { AssertEq(nil, f.Close()) }() // Read part of it. - offset := rand.Int63n(contentLen + 1) - size := rand.Intn(int(contentLen - offset)) + offset := randSrc.Int63n(contentLen + 1) + size := randSrc.Intn(int(contentLen - offset)) n, err := f.ReadAt(buf[:size], offset) if offset+int64(size) == contentLen && err == io.EOF { @@ -560,9 +562,11 @@ func (t *ForeignModsTest) ReadFromFile_Large() { AssertTrue( bytes.Equal(contents[offset:offset+int64(size)], buf[:n]), "offset: %d\n"+ - "size:%d\n", + "size: %d\n"+ + "n: %d", offset, - size) + size, + n) } start := time.Now() diff --git a/fs/fs.go b/fs/fs.go index 50ea0312ac..2f5a10912a 100644 --- a/fs/fs.go +++ b/fs/fs.go @@ -17,15 +17,15 @@ package fs import ( "errors" "fmt" + "io" "log" - "math" "os" "reflect" "time" "github.com/googlecloudplatform/gcsfuse/fs/inode" - "github.com/googlecloudplatform/gcsfuse/gcsproxy" - "github.com/googlecloudplatform/gcsfuse/lease" + "github.com/googlecloudplatform/gcsfuse/internal/fs/handle" + "github.com/googlecloudplatform/gcsfuse/internal/gcsx" "github.com/jacobsa/fuse" "github.com/jacobsa/fuse/fuseops" "github.com/jacobsa/fuse/fuseutil" @@ -33,7 +33,6 @@ import ( "github.com/jacobsa/syncutil" "github.com/jacobsa/timeutil" "golang.org/x/net/context" - "golang.org/x/sys/unix" ) type ServerConfig struct { @@ -47,25 +46,6 @@ type ServerConfig struct { // use the system default. TempDir string - // A desired limit on the number of open files used for storing temporary - // object contents. May not be obeyed if there is a large number of dirtied - // files that have not been flushed or closed. - // - // Most users will want to use ChooseTempDirLimitNumFiles to choose this. - TempDirLimitNumFiles int - - // A desired limit on temporary space usage, in bytes. May not be obeyed if - // there is a large volume of dirtied files that have not been flushed or - // closed. - TempDirLimitBytes int64 - - // If set to a non-zero value N, the file system will read objects from GCS a - // chunk at a time with a maximum read size of N, caching each chunk - // independently. The part about separate caching does not apply to dirty - // files, for which the entire contents will be in the temporary directory - // regardless of this setting. - GCSChunkSize uint64 - // By default, if a bucket contains the object "foo/bar" but no object named // "foo/", it's as if the directory doesn't exist. This allows us to have // non-flaky name resolution code. @@ -128,26 +108,13 @@ func NewServer(cfg *ServerConfig) (server fuse.Server, err error) { return } - // Disable chunking if set to zero. - gcsChunkSize := cfg.GCSChunkSize - if gcsChunkSize == 0 { - gcsChunkSize = math.MaxUint64 - } - - // Create the file leaser. - leaser := lease.NewFileLeaser( - cfg.TempDir, - cfg.TempDirLimitNumFiles, - cfg.TempDirLimitBytes) - // Create the object syncer. - // Check TmpObjectPrefix. if cfg.TmpObjectPrefix == "" { err = errors.New("You must set TmpObjectPrefix.") return } - objectSyncer := gcsproxy.NewObjectSyncer( + syncer := gcsx.NewSyncer( cfg.AppendThreshold, cfg.TmpObjectPrefix, cfg.Bucket) @@ -156,9 +123,8 @@ func NewServer(cfg *ServerConfig) (server fuse.Server, err error) { fs := &fileSystem{ clock: cfg.Clock, bucket: cfg.Bucket, - leaser: leaser, - objectSyncer: objectSyncer, - gcsChunkSize: gcsChunkSize, + syncer: syncer, + tempDir: cfg.TempDir, implicitDirs: cfg.ImplicitDirectories, dirTypeCacheTTL: cfg.DirTypeCacheTTL, uid: cfg.Uid, @@ -204,37 +170,6 @@ func NewServer(cfg *ServerConfig) (server fuse.Server, err error) { return } -// Choose a reasonable value for ServerConfig.TempDirLimitNumFiles based on -// process limits. -func ChooseTempDirLimitNumFiles() (limit int) { - // Ask what the process's limit on open files is. Use a default on error. - var rlimit unix.Rlimit - err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit) - if err != nil { - const defaultLimit = 512 - log.Println( - "Warning: failed to query RLIMIT_NOFILE. Using default "+ - "file count limit of %d", - defaultLimit) - - limit = defaultLimit - return - } - - // Heuristic: Use about 75% of the limit. - limit64 := rlimit.Cur/2 + rlimit.Cur/4 - - // But not too large. - const reasonableLimit = 1 << 15 - if limit64 > reasonableLimit { - limit64 = reasonableLimit - } - - limit = int(limit64) - - return -} - //////////////////////////////////////////////////////////////////////// // fileSystem type //////////////////////////////////////////////////////////////////////// @@ -244,20 +179,19 @@ func ChooseTempDirLimitNumFiles() (limit int) { // Let FS be the file system lock. Define a strict partial order < as follows: // // 1. For any inode lock I, I < FS. -// 2. For any directory handle lock DH and inode lock I, DH < I. +// 2. For any handle lock H and inode lock I, H < I. // // We follow the rule "acquire A then B only if A < B". // // In other words: // -// * Don't hold multiple directory handle locks at the same time. +// * Don't hold multiple handle locks at the same time. // * Don't hold multiple inode locks at the same time. -// * Don't acquire inode locks before directory handle locks. +// * Don't acquire inode locks before handle locks. // * Don't acquire file system locks before either. // -// The intuition is that we hold inode and directory handle locks for -// long-running operations, and we don't want to block the entire file system -// on those. +// The intuition is that we hold inode and handle locks for long-running +// operations, and we don't want to block the entire file system on those. // // See http://goo.gl/rDxxlG for more discussion, including an informal proof // that a strict partial order is sufficient. @@ -269,16 +203,15 @@ type fileSystem struct { // Dependencies ///////////////////////// - clock timeutil.Clock - bucket gcs.Bucket - objectSyncer gcsproxy.ObjectSyncer - leaser lease.FileLeaser + clock timeutil.Clock + bucket gcs.Bucket + syncer gcsx.Syncer ///////////////////////// // Constant data ///////////////////////// - gcsChunkSize uint64 + tempDir string implicitDirs bool dirTypeCacheTTL time.Duration @@ -367,7 +300,7 @@ type fileSystem struct { // The collection of live handles, keyed by handle ID. // - // INVARIANT: All values are of type *dirHandle + // INVARIANT: All values are of type *dirHandle or *handle.FileHandle // // GUARDED_BY(mu) handles map[fuseops.HandleID]interface{} @@ -515,9 +448,14 @@ func (fs *fileSystem) checkInvariants() { // handles ////////////////////////////////// - // INVARIANT: All values are of type *dirHandle + // INVARIANT: All values are of type *dirHandle or *handle.FileHandle for _, h := range fs.handles { - _ = h.(*dirHandle) + switch h.(type) { + case *dirHandle: + case *handle.FileHandle: + default: + panic(fmt.Sprintf("Unexpected handle type: %T", h)) + } } ////////////////////////////////// @@ -592,10 +530,9 @@ func (fs *fileSystem) mintInode(name string, o *gcs.Object) (in inode.Inode) { Gid: fs.gid, Mode: fs.fileMode, }, - fs.gcsChunkSize, fs.bucket, - fs.leaser, - fs.objectSyncer, + fs.syncer, + fs.tempDir, fs.clock) } @@ -1087,6 +1024,19 @@ func (fs *fileSystem) CreateFile( defer fs.unlockAndMaybeDisposeOfInode(child, &err) + // Allocate a handle. + fs.mu.Lock() + + handleID := fs.nextHandleID + fs.nextHandleID++ + + fs.handles[handleID] = handle.NewFileHandle( + child.(*inode.FileInode), + fs.bucket) + op.Handle = handleID + + fs.mu.Unlock() + // Fill out the response. op.Entry.Child = child.ID() op.Entry.Attributes, err = child.Attributes(ctx) @@ -1381,8 +1331,15 @@ func (fs *fileSystem) OpenFile( fs.mu.Lock() defer fs.mu.Unlock() - // Sanity check that this inode exists and is of the correct type. - _ = fs.inodes[op.Inode].(*inode.FileInode) + // Find the inode. + in := fs.inodes[op.Inode].(*inode.FileInode) + + // Allocate a handle. + handleID := fs.nextHandleID + fs.nextHandleID++ + + fs.handles[handleID] = handle.NewFileHandle(in, fs.bucket) + op.Handle = handleID // When we observe object generations that we didn't create, we assign them // new inode IDs. So for a given inode, all modifications go through the @@ -1397,16 +1354,21 @@ func (fs *fileSystem) OpenFile( func (fs *fileSystem) ReadFile( ctx context.Context, op *fuseops.ReadFileOp) (err error) { - // Find the inode. + // Find the handle and lock it. fs.mu.Lock() - in := fs.inodes[op.Inode].(*inode.FileInode) + fh := fs.handles[op.Handle].(*handle.FileHandle) fs.mu.Unlock() - in.Lock() - defer in.Unlock() + fh.Lock() + defer fh.Unlock() - // Serve the request. - op.BytesRead, err = in.Read(ctx, op.Dst, op.Offset) + // Serve the read. + op.BytesRead, err = fh.Read(ctx, op.Dst, op.Offset) + + // As required by fuse, we don't treat EOF as an error. + if err == io.EOF { + err = nil + } return } @@ -1487,7 +1449,14 @@ func (fs *fileSystem) FlushFile( func (fs *fileSystem) ReleaseFileHandle( ctx context.Context, op *fuseops.ReleaseFileHandleOp) (err error) { - // We implement this only to keep it from appearing in the log of fuse - // errors. There's nothing we need to actually do. + fs.mu.Lock() + defer fs.mu.Unlock() + + // Destroy the handle. + fs.handles[op.Handle].(*handle.FileHandle).Destroy() + + // Update the map. + delete(fs.handles, op.Handle) + return } diff --git a/fs/fs_test.go b/fs/fs_test.go index aab2e8081c..9905f58813 100644 --- a/fs/fs_test.go +++ b/fs/fs_test.go @@ -15,6 +15,7 @@ package fs_test import ( + "flag" "fmt" "io" "io/ioutil" @@ -46,6 +47,8 @@ const ( func TestFS(t *testing.T) { RunTests(t) } +var fDebug = flag.Bool("debug", false, "Print debugging output.") + // Install a SIGINT handler that exits gracefully once the current test is // finished. It's not safe to exit in the middle of a test because closing any // open files may require the fuse daemon to still be responsive. @@ -112,10 +115,6 @@ func (t *fsTest) SetUp(ti *TestInfo) { t.serverCfg.FilePerms = filePerms t.serverCfg.DirPerms = dirPerms - // Use some temporary space to speed tests. - t.serverCfg.TempDirLimitNumFiles = 16 - t.serverCfg.TempDirLimitBytes = 1 << 22 // 4 MiB - // Set up the append optimization. t.serverCfg.AppendThreshold = 0 t.serverCfg.TmpObjectPrefix = ".gcsfuse_tmp/" @@ -132,6 +131,10 @@ func (t *fsTest) SetUp(ti *TestInfo) { mountCfg := t.mountCfg mountCfg.OpContext = t.ctx + if *fDebug { + mountCfg.DebugLogger = log.New(os.Stderr, "fuse: ", 0) + } + t.mfs, err = fuse.Mount(t.Dir, server, &mountCfg) AssertEq(nil, err) } diff --git a/fs/inode/file.go b/fs/inode/file.go index 132a3a3e2f..920488a8b2 100644 --- a/fs/inode/file.go +++ b/fs/inode/file.go @@ -18,9 +18,7 @@ import ( "fmt" "io" - "github.com/googlecloudplatform/gcsfuse/gcsproxy" - "github.com/googlecloudplatform/gcsfuse/lease" - "github.com/googlecloudplatform/gcsfuse/mutable" + "github.com/googlecloudplatform/gcsfuse/internal/gcsx" "github.com/jacobsa/fuse/fuseops" "github.com/jacobsa/gcloud/gcs" "github.com/jacobsa/syncutil" @@ -33,19 +31,18 @@ type FileInode struct { // Dependencies ///////////////////////// - bucket gcs.Bucket - leaser lease.FileLeaser - objectSyncer gcsproxy.ObjectSyncer - clock timeutil.Clock + bucket gcs.Bucket + syncer gcsx.Syncer + clock timeutil.Clock ///////////////////////// // Constant data ///////////////////////// - id fuseops.InodeID - name string - attrs fuseops.InodeAttributes - gcsChunkSize uint64 + id fuseops.InodeID + name string + attrs fuseops.InodeAttributes + tempDir string ///////////////////////// // Mutable state @@ -65,12 +62,9 @@ type FileInode struct { // GUARDED_BY(mu) src gcs.Object - // The current content of this inode, branched from the source object. - // - // INVARIANT: content.CheckInvariants() does not panic - // - // GUARDED_BY(mu) - content mutable.Content + // The current content of this inode, or nil if the source object is still + // authoritative. + content gcsx.TempFile // Has Destroy been called? // @@ -83,9 +77,6 @@ var _ Inode = &FileInode{} // Create a file inode for the given object in GCS. The initial lookup count is // zero. // -// gcsChunkSize controls the maximum size of each individual read request made -// to GCS. -// // REQUIRES: o != nil // REQUIRES: o.Generation > 0 // REQUIRES: len(o.Name) > 0 @@ -94,30 +85,20 @@ func NewFileInode( id fuseops.InodeID, o *gcs.Object, attrs fuseops.InodeAttributes, - gcsChunkSize uint64, bucket gcs.Bucket, - leaser lease.FileLeaser, - objectSyncer gcsproxy.ObjectSyncer, + syncer gcsx.Syncer, + tempDir string, clock timeutil.Clock) (f *FileInode) { // Set up the basic struct. f = &FileInode{ - bucket: bucket, - leaser: leaser, - objectSyncer: objectSyncer, - clock: clock, - id: id, - name: o.Name, - attrs: attrs, - gcsChunkSize: gcsChunkSize, - src: *o, - content: mutable.NewContent( - gcsproxy.NewReadProxy( - o, - nil, // Initial read lease - gcsChunkSize, - leaser, - bucket), - clock), + bucket: bucket, + syncer: syncer, + clock: clock, + id: id, + name: o.Name, + attrs: attrs, + tempDir: tempDir, + src: *o, } f.lc.Init(id) @@ -150,7 +131,9 @@ func (f *FileInode) checkInvariants() { } // INVARIANT: content.CheckInvariants() does not panic - f.content.CheckInvariants() + if f.content != nil { + f.content.CheckInvariants() + } } // LOCKS_REQUIRED(f.mu) @@ -178,6 +161,43 @@ func (f *FileInode) clobbered(ctx context.Context) (b bool, err error) { return } +// Ensure that f.content != nil +// +// LOCKS_REQUIRED(f.mu) +func (f *FileInode) ensureContent(ctx context.Context) (err error) { + // Is there anything to do? + if f.content != nil { + return + } + + // Open a reader for the generation we care about. + rc, err := f.bucket.NewReader( + ctx, + &gcs.ReadObjectRequest{ + Name: f.src.Name, + Generation: f.src.Generation, + }) + + if err != nil { + err = fmt.Errorf("NewReader: %v", err) + return + } + + defer rc.Close() + + // Create a temporary file with its contents. + tf, err := gcsx.NewTempFile(rc, f.tempDir, f.clock) + if err != nil { + err = fmt.Errorf("NewTempFile: %v", err) + return + } + + // Update state. + f.content = tf + + return +} + //////////////////////////////////////////////////////////////////////// // Public interface //////////////////////////////////////////////////////////////////////// @@ -198,7 +218,28 @@ func (f *FileInode) Name() string { return f.name } -// Return the object generation number from which this inode was branched. +// Return a record for the GCS object generation from which this inode is +// branched. The record is guaranteed not to be modified, and users must not +// modify it. +// +// LOCKS_REQUIRED(f.mu) +func (f *FileInode) Source() *gcs.Object { + // Make a copy, since we modify f.src. + o := f.src + return &o +} + +// If true, it is safe to serve reads directly from the object generation given +// by f.Source(), rather than calling f.ReadAt. Doing so may be more efficient, +// because f.ReadAt may cause the entire object to be faulted in and requires +// the inode to be locked during the read. +// +// LOCKS_REQUIRED(f.mu) +func (f *FileInode) SourceGenerationIsAuthoritative() bool { + return f.content == nil +} + +// Equivalent to f.Source().Generation. // // LOCKS_REQUIRED(f) func (f *FileInode) SourceGeneration() int64 { @@ -220,28 +261,36 @@ func (f *FileInode) DecrementLookupCount(n uint64) (destroy bool) { func (f *FileInode) Destroy() (err error) { f.destroyed = true - f.content.Destroy() + if f.content != nil { + f.content.Destroy() + } + return } // LOCKS_REQUIRED(f.mu) func (f *FileInode) Attributes( ctx context.Context) (attrs fuseops.InodeAttributes, err error) { - // Stat the content. - sr, err := f.content.Stat(ctx) - if err != nil { - err = fmt.Errorf("Stat: %v", err) - return - } - - // Fill out the struct. attrs = f.attrs - attrs.Size = uint64(sr.Size) - if sr.Mtime != nil { - attrs.Mtime = *sr.Mtime - } else { - attrs.Mtime = f.src.Updated + // Obtain default information from the source object. + attrs.Mtime = f.src.Updated + attrs.Size = uint64(f.src.Size) + + // If GCS is no longer authoritative, stat our local content to obtain size + // and mtime. + if f.content != nil { + var sr gcsx.StatResult + sr, err = f.content.Stat() + if err != nil { + err = fmt.Errorf("Stat: %v", err) + return + } + + attrs.Size = uint64(sr.Size) + if sr.Mtime != nil { + attrs.Mtime = *sr.Mtime + } } // If the object has been clobbered, we reflect that as the inode being @@ -259,21 +308,31 @@ func (f *FileInode) Attributes( return } -// Serve a read for this file with semantics matching fuseops.ReadFileOp. +// Serve a read for this file with semantics matching io.ReaderAt. +// +// The caller may be better off reading directly from GCS when +// f.SourceGenerationIsAuthoritative() is true. // // LOCKS_REQUIRED(f.mu) func (f *FileInode) Read( ctx context.Context, dst []byte, offset int64) (n int, err error) { - // Read from the mutable content. - n, err = f.content.ReadAt(ctx, dst, offset) + // Make sure f.content != nil. + err = f.ensureContent(ctx) + if err != nil { + err = fmt.Errorf("ensureContent: %v", err) + return + } - // We don't return errors for EOF. Otherwise, propagate errors. - if err == io.EOF { - err = nil - } else if err != nil { - err = fmt.Errorf("ReadAt: %v", err) + // Read from the local content, propagating io.EOF. + n, err = f.content.ReadAt(dst, offset) + switch { + case err == io.EOF: + return + + case err != nil: + err = fmt.Errorf("content.ReadAt: %v", err) return } @@ -287,9 +346,16 @@ func (f *FileInode) Write( ctx context.Context, data []byte, offset int64) (err error) { - // Write to the mutable content. Note that the mutable content guarantees - // that it returns an error for short writes. - _, err = f.content.WriteAt(ctx, data, offset) + // Make sure f.content != nil. + err = f.ensureContent(ctx) + if err != nil { + err = fmt.Errorf("ensureContent: %v", err) + return + } + + // Write to the mutable content. Note that io.WriterAt guarantees it returns + // an error for short writes. + _, err = f.content.WriteAt(data, offset) return } @@ -304,11 +370,13 @@ func (f *FileInode) Write( // // LOCKS_REQUIRED(f.mu) func (f *FileInode) Sync(ctx context.Context) (err error) { + // If we have not been dirtied, there is nothing to do. + if f.content == nil { + return + } + // Write out the contents if they are dirty. - rl, newObj, err := f.objectSyncer.SyncObject( - ctx, - &f.src, - f.content) + newObj, err := f.syncer.SyncObject(ctx, &f.src, f.content) // Special case: a precondition error means we were clobbered, which we treat // as being unlinked. There's no reason to return an error in that case. @@ -318,21 +386,14 @@ func (f *FileInode) Sync(ctx context.Context) (err error) { // Propagate other errors. if err != nil { - err = fmt.Errorf("gcsproxy.Sync: %v", err) + err = fmt.Errorf("SyncObject: %v", err) return } // If we wrote out a new object, we need to update our state. if newObj != nil { f.src = *newObj - f.content = mutable.NewContent( - gcsproxy.NewReadProxy( - newObj, - rl, - f.gcsChunkSize, - f.leaser, - f.bucket), - f.clock) + f.content = nil } return @@ -344,6 +405,15 @@ func (f *FileInode) Sync(ctx context.Context) (err error) { func (f *FileInode) Truncate( ctx context.Context, size int64) (err error) { - err = f.content.Truncate(ctx, size) + // Make sure f.content != nil. + err = f.ensureContent(ctx) + if err != nil { + err = fmt.Errorf("ensureContent: %v", err) + return + } + + // Call through. + err = f.content.Truncate(size) + return } diff --git a/fs/inode/file_test.go b/fs/inode/file_test.go index 93ef33d8c2..311785fb95 100644 --- a/fs/inode/file_test.go +++ b/fs/inode/file_test.go @@ -16,7 +16,7 @@ package inode_test import ( "fmt" - "math" + "io" "os" "testing" "time" @@ -24,8 +24,7 @@ import ( "golang.org/x/net/context" "github.com/googlecloudplatform/gcsfuse/fs/inode" - "github.com/googlecloudplatform/gcsfuse/gcsproxy" - "github.com/googlecloudplatform/gcsfuse/lease" + "github.com/googlecloudplatform/gcsfuse/internal/gcsx" "github.com/jacobsa/fuse/fuseops" "github.com/jacobsa/gcloud/gcs" "github.com/jacobsa/gcloud/gcs/gcsfake" @@ -50,7 +49,6 @@ const fileMode os.FileMode = 0641 type FileTest struct { ctx context.Context bucket gcs.Bucket - leaser lease.FileLeaser clock timeutil.SimulatedClock initialContents string @@ -67,7 +65,6 @@ func init() { RegisterTestSuite(&FileTest{}) } func (t *FileTest) SetUp(ti *TestInfo) { t.ctx = ti.Ctx t.clock.SetTime(time.Date(2012, 8, 15, 22, 56, 0, 0, time.Local)) - t.leaser = lease.NewFileLeaser("", math.MaxInt32, math.MaxInt64) t.bucket = gcsfake.NewFakeBucket(&t.clock, "some_bucket") // Set up the backing object. @@ -91,13 +88,12 @@ func (t *FileTest) SetUp(ti *TestInfo) { Gid: gid, Mode: fileMode, }, - math.MaxUint64, // GCS chunk size t.bucket, - t.leaser, - gcsproxy.NewObjectSyncer( + gcsx.NewSyncer( 1, // Append threshold ".gcsfuse_tmp/", t.bucket), + "", &t.clock) t.in.Lock() @@ -138,8 +134,7 @@ func (t *FileTest) InitialAttributes() { func (t *FileTest) Read() { AssertEq("taco", t.initialContents) - // Make several reads, checking the expected contents. We should never get an - // EOF error, since fuseops.ReadFileOp is not supposed to see those. + // Make several reads, checking the expected contents. testCases := []struct { offset int64 size int @@ -176,6 +171,11 @@ func (t *FileTest) Read() { n, err := t.in.Read(t.ctx, data, tc.offset) data = data[:n] + // Ignore EOF. + if err == io.EOF { + err = nil + } + AssertEq(nil, err, "%s", desc) ExpectEq(tc.expected, string(data), "%s", desc) } @@ -202,6 +202,11 @@ func (t *FileTest) Write() { // Read back the content. var buf [1024]byte n, err := t.in.Read(t.ctx, buf[:], 0) + + if err == io.EOF { + err = nil + } + AssertEq(nil, err) ExpectEq("pacoburrito", string(buf[:n])) @@ -231,6 +236,11 @@ func (t *FileTest) Truncate() { // Read the contents. var buf [1024]byte n, err := t.in.Read(t.ctx, buf[:], 0) + + if err == io.EOF { + err = nil + } + AssertEq(nil, err) ExpectEq("ta", string(buf[:n])) diff --git a/fs/stress_test.go b/fs/stress_test.go index cb2f0f8cf3..fe6d742e72 100644 --- a/fs/stress_test.go +++ b/fs/stress_test.go @@ -72,10 +72,9 @@ func (t *StressTest) CreateAndReadManyFilesInParallel() { // Ensure that we get parallelism for this test. defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU())) - // Exercise lease revocation logic. - numFiles := 2 * t.serverCfg.TempDirLimitNumFiles - // Choose a bunch of file names. + const numFiles = 32 + var names []string for i := 0; i < numFiles; i++ { names = append(names, fmt.Sprintf("%d", i)) diff --git a/gcsproxy/read_proxy.go b/gcsproxy/read_proxy.go deleted file mode 100644 index 10618932c5..0000000000 --- a/gcsproxy/read_proxy.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gcsproxy - -import ( - "fmt" - "io" - - "github.com/googlecloudplatform/gcsfuse/lease" - "github.com/jacobsa/gcloud/gcs" - "golang.org/x/net/context" -) - -//////////////////////////////////////////////////////////////////////// -// Public interface -//////////////////////////////////////////////////////////////////////// - -// Create a view on the given GCS object generation. If rl is non-nil, it must -// contain a lease for the contents of the object and will be used when -// possible instead of re-reading the object. -// -// If the object is larger than the given chunk size, we will only read -// and cache portions of it at a time. -func NewReadProxy( - o *gcs.Object, - rl lease.ReadLease, - chunkSize uint64, - leaser lease.FileLeaser, - bucket gcs.Bucket) (rp lease.ReadProxy) { - // Sanity check: the read lease's size should match the object's size if it - // is present. - if rl != nil && uint64(rl.Size()) != o.Size { - panic(fmt.Sprintf( - "Read lease size %d doesn't match object size %d", - rl.Size(), - o.Size)) - } - - // Special case: don't bring in the complication of a multi-read proxy if we - // have only one refresher. - refreshers := makeRefreshers(chunkSize, o, bucket) - if len(refreshers) == 1 { - rp = lease.NewReadProxy(leaser, refreshers[0], rl) - } else { - rp = lease.NewMultiReadProxy(leaser, refreshers, rl) - } - - return -} - -//////////////////////////////////////////////////////////////////////// -// Helpers -//////////////////////////////////////////////////////////////////////// - -func makeRefreshers( - chunkSize uint64, - o *gcs.Object, - bucket gcs.Bucket) (refreshers []lease.Refresher) { - // Iterate over each chunk of the object. - for startOff := uint64(0); startOff < o.Size; startOff += chunkSize { - r := gcs.ByteRange{startOff, startOff + chunkSize} - - // Clip the range so that objectRefresher can report the correct size. - if r.Limit > o.Size { - r.Limit = o.Size - } - - refresher := &objectRefresher{ - O: o, - Bucket: bucket, - Range: &r, - } - - refreshers = append(refreshers, refresher) - } - - return -} - -// A refresher that returns the contents of a particular generation of a GCS -// object. Optionally, only a particular range is returned. -type objectRefresher struct { - Bucket gcs.Bucket - O *gcs.Object - Range *gcs.ByteRange -} - -func (r *objectRefresher) Size() (size int64) { - if r.Range != nil { - size = int64(r.Range.Limit - r.Range.Start) - return - } - - size = int64(r.O.Size) - return -} - -func (r *objectRefresher) Refresh( - ctx context.Context) (rc io.ReadCloser, err error) { - req := &gcs.ReadObjectRequest{ - Name: r.O.Name, - Generation: r.O.Generation, - Range: r.Range, - } - - rc, err = r.Bucket.NewReader(ctx, req) - if err != nil { - err = fmt.Errorf("NewReader: %v", err) - return - } - - return -} diff --git a/internal/fs/handle/file.go b/internal/fs/handle/file.go new file mode 100644 index 0000000000..a83df511d8 --- /dev/null +++ b/internal/fs/handle/file.go @@ -0,0 +1,173 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package handle + +import ( + "fmt" + "io" + + "github.com/googlecloudplatform/gcsfuse/fs/inode" + "github.com/googlecloudplatform/gcsfuse/internal/gcsx" + "github.com/jacobsa/gcloud/gcs" + "github.com/jacobsa/syncutil" + "golang.org/x/net/context" +) + +type FileHandle struct { + inode *inode.FileInode + bucket gcs.Bucket + + mu syncutil.InvariantMutex + + // A random reader configured to some (potentially previous) generation of + // the object backing the inode, or nil. + // + // INVARIANT: If reader != nil, reader.CheckInvariants() doesn't panic. + // + // GUARDED_BY(mu) + reader gcsx.RandomReader +} + +func NewFileHandle( + inode *inode.FileInode, + bucket gcs.Bucket) (fh *FileHandle) { + fh = &FileHandle{ + inode: inode, + bucket: bucket, + } + + fh.mu = syncutil.NewInvariantMutex(fh.checkInvariants) + + return +} + +// Destroy any resources associated with the handle, which must not be used +// again. +func (fh *FileHandle) Destroy() { + if fh.reader != nil { + fh.reader.Destroy() + } +} + +// Return the inode backing this handle. +func (fh *FileHandle) Inode() *inode.FileInode { + return fh.inode +} + +func (fh *FileHandle) Lock() { + fh.mu.Lock() +} + +func (fh *FileHandle) Unlock() { + fh.mu.Unlock() +} + +// Equivalent to locking fh.Inode() and calling fh.Inode().Read, but may be +// more efficient. +// +// LOCKS_REQUIRED(fh) +// LOCKS_EXCLUDED(fh.inode) +func (fh *FileHandle) Read( + ctx context.Context, + dst []byte, + offset int64) (n int, err error) { + // Lock the inode and attempt to ensure that we have a reader for its current + // state, or clear fh.reader if it's not possible to create one (probably + // because the inode is dirty). + fh.inode.Lock() + err = fh.tryEnsureReader() + if err != nil { + fh.inode.Unlock() + err = fmt.Errorf("tryEnsureReader: %v", err) + return + } + + // If we have an appropriate reader, unlock the inode and use that. This + // allows reads to proceed concurrently with other operations; in particular, + // multiple reads can run concurrently. It's safe because the user can't tell + // if a concurrent write started during or after a read. + if fh.reader != nil { + fh.inode.Unlock() + + n, err = fh.reader.ReadAt(ctx, dst, offset) + switch { + case err == io.EOF: + return + + case err != nil: + err = fmt.Errorf("fh.reader.ReadAt: %v", err) + return + } + + return + } + + // Otherwise we must fall through to the inode. + defer fh.inode.Unlock() + n, err = fh.inode.Read(ctx, dst, offset) + + return +} + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +// LOCKS_REQUIRED(fh.mu) +func (fh *FileHandle) checkInvariants() { + // INVARIANT: If reader != nil, reader.CheckInvariants() doesn't panic. + if fh.reader != nil { + fh.reader.CheckInvariants() + } +} + +// If possible, ensure that fh.reader is set to an appropriate random reader +// for the current state of the inode. Otherwise set it to nil. +// +// LOCKS_REQUIRED(fh) +// LOCKS_REQUIRED(fh.inode) +func (fh *FileHandle) tryEnsureReader() (err error) { + // If the inode is dirty, there's nothing we can do. Throw away our reader if + // we have one. + if !fh.inode.SourceGenerationIsAuthoritative() { + if fh.reader != nil { + fh.reader.Destroy() + fh.reader = nil + } + + return + } + + // If we already have a reader, and it's at the appropriate generation, we + // can use it. Otherwise we must throw it away. + if fh.reader != nil { + if fh.reader.Object().Generation == fh.inode.SourceGeneration() { + return + } + + fh.reader.Destroy() + fh.reader = nil + } + + // Attempt to create an appropriate reader. + rr, err := gcsx.NewRandomReader(fh.inode.Source(), fh.bucket) + if err != nil { + err = fmt.Errorf("NewRandomReader: %v", err) + return + } + + fh.reader = rr + return +} diff --git a/gcsproxy/append_object_creator.go b/internal/gcsx/append_object_creator.go similarity index 99% rename from gcsproxy/append_object_creator.go rename to internal/gcsx/append_object_creator.go index e2dcfff5a6..17937994e5 100644 --- a/gcsproxy/append_object_creator.go +++ b/internal/gcsx/append_object_creator.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package gcsproxy +package gcsx import ( "crypto/rand" diff --git a/gcsproxy/append_object_creator_test.go b/internal/gcsx/append_object_creator_test.go similarity index 99% rename from gcsproxy/append_object_creator_test.go rename to internal/gcsx/append_object_creator_test.go index b8f705c573..b78b2bc52a 100644 --- a/gcsproxy/append_object_creator_test.go +++ b/internal/gcsx/append_object_creator_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package gcsproxy +package gcsx import ( "errors" diff --git a/gcsproxy/integration_test.go b/internal/gcsx/integration_test.go similarity index 60% rename from gcsproxy/integration_test.go rename to internal/gcsx/integration_test.go index de82694a2f..838d6bfbc3 100644 --- a/gcsproxy/integration_test.go +++ b/internal/gcsx/integration_test.go @@ -12,13 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package gcsproxy_test +package gcsx_test import ( "bytes" "fmt" "io" - "io/ioutil" "math" "math/rand" "testing" @@ -26,9 +25,7 @@ import ( "golang.org/x/net/context" - "github.com/googlecloudplatform/gcsfuse/gcsproxy" - "github.com/googlecloudplatform/gcsfuse/lease" - "github.com/googlecloudplatform/gcsfuse/mutable" + "github.com/googlecloudplatform/gcsfuse/internal/gcsx" "github.com/jacobsa/gcloud/gcs" "github.com/jacobsa/gcloud/gcs/gcsfake" "github.com/jacobsa/gcloud/gcs/gcsutil" @@ -65,18 +62,13 @@ func randBytes(n int) (b []byte) { // Boilerplate //////////////////////////////////////////////////////////////////////// -const chunkSize = 1<<18 + 3 -const fileLeaserLimitNumFiles = math.MaxInt32 -const fileLeaserLimitBytes = 1 << 21 - type IntegrationTest struct { ctx context.Context bucket gcs.Bucket - leaser lease.FileLeaser clock timeutil.SimulatedClock - syncer gcsproxy.ObjectSyncer + syncer gcsx.Syncer - mc mutable.Content + tf gcsx.TempFile } var _ SetUpInterface = &IntegrationTest{} @@ -87,41 +79,44 @@ func init() { RegisterTestSuite(&IntegrationTest{}) } func (t *IntegrationTest) SetUp(ti *TestInfo) { t.ctx = ti.Ctx t.bucket = gcsfake.NewFakeBucket(&t.clock, "some_bucket") - t.leaser = lease.NewFileLeaser( - "", - fileLeaserLimitNumFiles, - fileLeaserLimitBytes) // Set up a fixed, non-zero time. t.clock.SetTime(time.Date(2012, 8, 15, 22, 56, 0, 0, time.Local)) - // Set up the object syncer. + // Set up the syncer. const appendThreshold = 0 const tmpObjectPrefix = ".gcsfuse_tmp/" - t.syncer = gcsproxy.NewObjectSyncer( + t.syncer = gcsx.NewSyncer( appendThreshold, tmpObjectPrefix, t.bucket) } func (t *IntegrationTest) TearDown() { - if t.mc != nil { - t.mc.Destroy() + if t.tf != nil { + t.tf.Destroy() } } func (t *IntegrationTest) create(o *gcs.Object) { - // Set up the read proxy. - rp := gcsproxy.NewReadProxy( - o, - nil, - chunkSize, - t.leaser, - t.bucket) + // Set up a reader. + rc, err := t.bucket.NewReader( + t.ctx, + &gcs.ReadObjectRequest{ + Name: o.Name, + Generation: o.Generation, + }) + + AssertEq(nil, err) + + // Use it to create the temp file. + t.tf, err = gcsx.NewTempFile(rc, "", &t.clock) + AssertEq(nil, err) - // Use it to create the mutable content. - t.mc = mutable.NewContent(rp, &t.clock) + // Close it. + err = rc.Close() + AssertEq(nil, err) } // Return the object generation, or -1 if non-existent. Panic on error. @@ -148,11 +143,10 @@ func (t *IntegrationTest) objectGeneration(name string) (gen int64) { return } -func (t *IntegrationTest) sync(src *gcs.Object) ( - rl lease.ReadLease, o *gcs.Object, err error) { - rl, o, err = t.syncer.SyncObject(t.ctx, src, t.mc) - if err == nil && rl != nil { - t.mc = nil +func (t *IntegrationTest) sync(src *gcs.Object) (o *gcs.Object, err error) { + o, err = t.syncer.SyncObject(t.ctx, src, t.tf) + if err == nil && o != nil { + t.tf = nil } return @@ -171,17 +165,16 @@ func (t *IntegrationTest) ReadThenSync() { // Read the contents. buf := make([]byte, 1024) - n, err := t.mc.ReadAt(t.ctx, buf, 0) + n, err := t.tf.ReadAt(buf, 0) AssertThat(err, AnyOf(io.EOF, nil)) ExpectEq(len("taco"), n) ExpectEq("taco", string(buf[:n])) // Sync doesn't need to do anything. - rl, newObj, err := t.sync(o) + newObj, err := t.sync(o) AssertEq(nil, err) - ExpectEq(nil, rl) ExpectEq(nil, newObj) } @@ -193,13 +186,13 @@ func (t *IntegrationTest) WriteThenSync() { t.create(o) // Overwrite the first byte. - n, err := t.mc.WriteAt(t.ctx, []byte("p"), 0) + n, err := t.tf.WriteAt([]byte("p"), 0) AssertEq(nil, err) ExpectEq(1, n) // Sync should save out the new generation. - rl, newObj, err := t.sync(o) + newObj, err := t.sync(o) AssertEq(nil, err) ExpectNe(o.Generation, newObj.Generation) @@ -210,14 +203,6 @@ func (t *IntegrationTest) WriteThenSync() { AssertEq(nil, err) ExpectEq("paco", string(contents)) - // Read via the lease. - _, err = rl.Seek(0, 0) - AssertEq(nil, err) - - contents, err = ioutil.ReadAll(rl) - AssertEq(nil, err) - ExpectEq("paco", string(contents)) - // There should be no junk left over in the bucket besides the object of // interest. objects, runs, err := gcsutil.ListAll( @@ -240,13 +225,13 @@ func (t *IntegrationTest) AppendThenSync() { t.create(o) // Append some data. - n, err := t.mc.WriteAt(t.ctx, []byte("burrito"), 4) + n, err := t.tf.WriteAt([]byte("burrito"), 4) AssertEq(nil, err) ExpectEq(len("burrito"), n) // Sync should save out the new generation. - rl, newObj, err := t.sync(o) + newObj, err := t.sync(o) AssertEq(nil, err) ExpectNe(o.Generation, newObj.Generation) @@ -257,14 +242,6 @@ func (t *IntegrationTest) AppendThenSync() { AssertEq(nil, err) ExpectEq("tacoburrito", string(contents)) - // Read via the lease. - _, err = rl.Seek(0, 0) - AssertEq(nil, err) - - contents, err = ioutil.ReadAll(rl) - AssertEq(nil, err) - ExpectEq("tacoburrito", string(contents)) - // There should be no junk left over in the bucket besides the object of // interest. objects, runs, err := gcsutil.ListAll( @@ -287,11 +264,11 @@ func (t *IntegrationTest) TruncateThenSync() { t.create(o) // Truncate. - err = t.mc.Truncate(t.ctx, 2) + err = t.tf.Truncate(2) AssertEq(nil, err) // Sync should save out the new generation. - rl, newObj, err := t.sync(o) + newObj, err := t.sync(o) AssertEq(nil, err) ExpectNe(o.Generation, newObj.Generation) @@ -300,14 +277,6 @@ func (t *IntegrationTest) TruncateThenSync() { contents, err := gcsutil.ReadObject(t.ctx, t.bucket, "foo") AssertEq(nil, err) ExpectEq("ta", string(contents)) - - // Read via the lease. - _, err = rl.Seek(0, 0) - AssertEq(nil, err) - - contents, err = ioutil.ReadAll(rl) - AssertEq(nil, err) - ExpectEq("ta", string(contents)) } func (t *IntegrationTest) Stat_InitialState() { @@ -318,7 +287,7 @@ func (t *IntegrationTest) Stat_InitialState() { t.create(o) // Stat. - sr, err := t.mc.Stat(t.ctx) + sr, err := t.tf.Stat() AssertEq(nil, err) ExpectEq(o.Size, sr.Size) @@ -337,13 +306,13 @@ func (t *IntegrationTest) Stat_Dirty() { t.clock.AdvanceTime(time.Second) truncateTime := t.clock.Now() - err = t.mc.Truncate(t.ctx, 2) + err = t.tf.Truncate(2) AssertEq(nil, err) t.clock.AdvanceTime(time.Second) // Stat. - sr, err := t.mc.Stat(t.ctx) + sr, err := t.tf.Stat() AssertEq(nil, err) ExpectEq(2, sr.Size) @@ -351,103 +320,7 @@ func (t *IntegrationTest) Stat_Dirty() { ExpectThat(sr.Mtime, Pointee(timeutil.TimeEq(truncateTime))) } -func (t *IntegrationTest) WithinLeaserLimit() { - AssertLt(len("taco"), fileLeaserLimitBytes) - - // Create. - o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", []byte("taco")) - AssertEq(nil, err) - - t.create(o) - - // Extend to be up against the leaser limit, then write out to GCS, which - // should downgrade to a read lease. - err = t.mc.Truncate(t.ctx, fileLeaserLimitBytes) - AssertEq(nil, err) - - rl, _, err := t.sync(o) - AssertEq(nil, err) - - // The backing object should be present and contain the correct contents. - contents, err := gcsutil.ReadObject(t.ctx, t.bucket, o.Name) - AssertEq(nil, err) - ExpectEq(fileLeaserLimitBytes, len(contents)) - - // Delete the backing object. - err = t.bucket.DeleteObject(t.ctx, &gcs.DeleteObjectRequest{Name: o.Name}) - AssertEq(nil, err) - - // We should still be able to read the contents, because the read lease - // should still be valid. - buf := make([]byte, 4) - n, err := rl.ReadAt(buf, 0) - - AssertEq(nil, err) - ExpectEq("taco", string(buf[0:n])) -} - -func (t *IntegrationTest) LargerThanLeaserLimit() { - AssertLt(len("taco"), fileLeaserLimitBytes) - - // Create. - o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", []byte("taco")) - AssertEq(nil, err) - - t.create(o) - - // Extend to be past the leaser limit, then write out to GCS, which should - // downgrade to a read lease. - err = t.mc.Truncate(t.ctx, fileLeaserLimitBytes+1) - AssertEq(nil, err) - - rl, _, err := t.sync(o) - AssertEq(nil, err) - - // The backing object should be present and contain the correct contents. - contents, err := gcsutil.ReadObject(t.ctx, t.bucket, o.Name) - AssertEq(nil, err) - ExpectEq(fileLeaserLimitBytes+1, len(contents)) - - // Delete the backing object. - err = t.bucket.DeleteObject(t.ctx, &gcs.DeleteObjectRequest{Name: o.Name}) - AssertEq(nil, err) - - // The contents should be lost, because the leaser should have revoked the - // read lease. - _, err = rl.ReadAt(make([]byte, len(contents)), 0) - ExpectThat(err, Error(HasSubstr("revoked"))) -} - -func (t *IntegrationTest) BackingObjectHasBeenDeleted_BeforeReading() { - // Create an object to obtain a record, then delete it. - o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", []byte("taco")) - AssertEq(nil, err) - - err = t.bucket.DeleteObject(t.ctx, &gcs.DeleteObjectRequest{Name: o.Name}) - AssertEq(nil, err) - - // Create a mutable object around it. - t.create(o) - - // Sync doesn't need to do anything. - rl, newObj, err := t.sync(o) - - AssertEq(nil, err) - ExpectEq(nil, rl) - ExpectEq(nil, newObj) - - // Anything that needs to fault in the contents should fail. - _, err = t.mc.ReadAt(t.ctx, []byte{}, 0) - ExpectThat(err, Error(HasSubstr("not found"))) - - err = t.mc.Truncate(t.ctx, 10) - ExpectThat(err, Error(HasSubstr("not found"))) - - _, err = t.mc.WriteAt(t.ctx, []byte{}, 0) - ExpectThat(err, Error(HasSubstr("not found"))) -} - -func (t *IntegrationTest) BackingObjectHasBeenDeleted_AfterReading() { +func (t *IntegrationTest) BackingObjectHasBeenDeleted() { // Create. o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", []byte("taco")) AssertEq(nil, err) @@ -455,7 +328,7 @@ func (t *IntegrationTest) BackingObjectHasBeenDeleted_AfterReading() { t.create(o) // Fault in the contents. - _, err = t.mc.ReadAt(t.ctx, []byte{}, 0) + _, err = t.tf.ReadAt([]byte{}, 0) AssertEq(nil, err) // Delete the backing object. @@ -463,19 +336,19 @@ func (t *IntegrationTest) BackingObjectHasBeenDeleted_AfterReading() { AssertEq(nil, err) // Reading and modications should still work. - _, err = t.mc.ReadAt(t.ctx, []byte{}, 0) + _, err = t.tf.ReadAt([]byte{}, 0) AssertEq(nil, err) - _, err = t.mc.WriteAt(t.ctx, []byte("a"), 0) + _, err = t.tf.WriteAt([]byte("a"), 0) AssertEq(nil, err) truncateTime := t.clock.Now() - err = t.mc.Truncate(t.ctx, 1) + err = t.tf.Truncate(1) AssertEq(nil, err) t.clock.AdvanceTime(time.Second) // Stat should see the current state. - sr, err := t.mc.Stat(t.ctx) + sr, err := t.tf.Stat() AssertEq(nil, err) ExpectEq(1, sr.Size) @@ -483,7 +356,7 @@ func (t *IntegrationTest) BackingObjectHasBeenDeleted_AfterReading() { ExpectThat(sr.Mtime, Pointee(timeutil.TimeEq(truncateTime))) // Sync should fail with a precondition error. - _, _, err = t.sync(o) + _, err = t.sync(o) ExpectThat(err, HasSameTypeAs(&gcs.PreconditionError{})) // Nothing should have been created. @@ -491,36 +364,7 @@ func (t *IntegrationTest) BackingObjectHasBeenDeleted_AfterReading() { ExpectThat(err, HasSameTypeAs(&gcs.NotFoundError{})) } -func (t *IntegrationTest) BackingObjectHasBeenOverwritten_BeforeReading() { - // Create an object, then create the mutable object wrapper around it. - o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", []byte("taco")) - AssertEq(nil, err) - - t.create(o) - - // Overwrite the GCS object. - _, err = gcsutil.CreateObject(t.ctx, t.bucket, "foo", []byte("burrito")) - AssertEq(nil, err) - - // Sync doesn't need to do anything. - rl, newObj, err := t.sync(o) - - AssertEq(nil, err) - ExpectEq(nil, rl) - ExpectEq(nil, newObj) - - // Anything that needs to fault in the contents should fail. - _, err = t.mc.ReadAt(t.ctx, []byte{}, 0) - ExpectThat(err, Error(HasSubstr("not found"))) - - err = t.mc.Truncate(t.ctx, 10) - ExpectThat(err, Error(HasSubstr("not found"))) - - _, err = t.mc.WriteAt(t.ctx, []byte{}, 0) - ExpectThat(err, Error(HasSubstr("not found"))) -} - -func (t *IntegrationTest) BackingObjectHasBeenOverwritten_AfterReading() { +func (t *IntegrationTest) BackingObjectHasBeenOverwritten() { // Create. o, err := gcsutil.CreateObject(t.ctx, t.bucket, "foo", []byte("taco")) AssertEq(nil, err) @@ -528,7 +372,7 @@ func (t *IntegrationTest) BackingObjectHasBeenOverwritten_AfterReading() { t.create(o) // Fault in the contents. - _, err = t.mc.ReadAt(t.ctx, []byte{}, 0) + _, err = t.tf.ReadAt([]byte{}, 0) AssertEq(nil, err) // Overwrite the backing object. @@ -536,19 +380,19 @@ func (t *IntegrationTest) BackingObjectHasBeenOverwritten_AfterReading() { AssertEq(nil, err) // Reading and modications should still work. - _, err = t.mc.ReadAt(t.ctx, []byte{}, 0) + _, err = t.tf.ReadAt([]byte{}, 0) AssertEq(nil, err) - _, err = t.mc.WriteAt(t.ctx, []byte("a"), 0) + _, err = t.tf.WriteAt([]byte("a"), 0) AssertEq(nil, err) truncateTime := t.clock.Now() - err = t.mc.Truncate(t.ctx, 3) + err = t.tf.Truncate(3) AssertEq(nil, err) t.clock.AdvanceTime(time.Second) // Stat should see the current state. - sr, err := t.mc.Stat(t.ctx) + sr, err := t.tf.Stat() AssertEq(nil, err) ExpectEq(3, sr.Size) @@ -556,7 +400,7 @@ func (t *IntegrationTest) BackingObjectHasBeenOverwritten_AfterReading() { ExpectThat(sr.Mtime, Pointee(timeutil.TimeEq(truncateTime))) // Sync should fail with a precondition error. - _, _, err = t.sync(o) + _, err = t.sync(o) ExpectThat(err, HasSameTypeAs(&gcs.PreconditionError{})) // The newer version should still be present. @@ -571,18 +415,9 @@ func (t *IntegrationTest) MultipleInteractions() { sizes := []int{ 0, 1, - chunkSize - 1, - chunkSize, - chunkSize + 1, - 3*chunkSize - 1, - 3 * chunkSize, - 3*chunkSize + 1, - fileLeaserLimitBytes - 1, - fileLeaserLimitBytes, - fileLeaserLimitBytes + 1, - ((fileLeaserLimitBytes / chunkSize) - 1) * chunkSize, - (fileLeaserLimitBytes / chunkSize) * chunkSize, - ((fileLeaserLimitBytes / chunkSize) + 1) * chunkSize, + 1 << 19, + 1 << 20, + 1 << 21, } // Generate random contents for the maximum size. @@ -614,11 +449,11 @@ func (t *IntegrationTest) MultipleInteractions() { AssertEq(nil, err) - // Create a mutable object around it. + // Create a temp file around it. t.create(o) - // Read the contents of the mutable object. - _, err = t.mc.ReadAt(t.ctx, buf, 0) + // Read the contents of the temp file. + _, err = t.tf.ReadAt(buf, 0) AssertThat(err, AnyOf(nil, io.EOF)) if !bytes.Equal(buf, expectedContents) { @@ -632,18 +467,18 @@ func (t *IntegrationTest) MultipleInteractions() { expectedContents[size/2] = 19 expectedContents[size-1] = 23 - _, err = t.mc.WriteAt(t.ctx, []byte{17}, 0) + _, err = t.tf.WriteAt([]byte{17}, 0) AssertEq(nil, err) - _, err = t.mc.WriteAt(t.ctx, []byte{19}, int64(size/2)) + _, err = t.tf.WriteAt([]byte{19}, int64(size/2)) AssertEq(nil, err) - _, err = t.mc.WriteAt(t.ctx, []byte{23}, int64(size-1)) + _, err = t.tf.WriteAt([]byte{23}, int64(size-1)) AssertEq(nil, err) } // Compare contents again. - _, err = t.mc.ReadAt(t.ctx, buf, 0) + _, err = t.tf.ReadAt(buf, 0) AssertThat(err, AnyOf(nil, io.EOF)) if !bytes.Equal(buf, expectedContents) { @@ -652,7 +487,7 @@ func (t *IntegrationTest) MultipleInteractions() { } // Sync and recreate if necessary. - _, newObj, err := t.sync(o) + newObj, err := t.sync(o) AssertEq(nil, err) if newObj != nil { @@ -668,7 +503,7 @@ func (t *IntegrationTest) MultipleInteractions() { } // Compare contents again. - _, err = t.mc.ReadAt(t.ctx, buf, 0) + _, err = t.tf.ReadAt(buf, 0) AssertThat(err, AnyOf(nil, io.EOF)) if !bytes.Equal(buf, expectedContents) { @@ -680,12 +515,12 @@ func (t *IntegrationTest) MultipleInteractions() { if size > 0 { expectedContents[0] = 29 - _, err = t.mc.WriteAt(t.ctx, []byte{29}, 0) + _, err = t.tf.WriteAt([]byte{29}, 0) AssertEq(nil, err) } // Compare contents again. - _, err = t.mc.ReadAt(t.ctx, buf, 0) + _, err = t.tf.ReadAt(buf, 0) AssertThat(err, AnyOf(nil, io.EOF)) if !bytes.Equal(buf, expectedContents) { diff --git a/internal/gcsx/random_reader.go b/internal/gcsx/random_reader.go new file mode 100644 index 0000000000..d9b8d008e3 --- /dev/null +++ b/internal/gcsx/random_reader.go @@ -0,0 +1,293 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcsx + +import ( + "fmt" + "io" + "math" + + "github.com/jacobsa/gcloud/gcs" + "golang.org/x/net/context" +) + +// We will not send a request to GCS for less than this many bytes (unless the +// end of the object comes first). +const minReadSize = 1 << 20 + +// An object that knows how to read ranges within a particular generation of a +// particular GCS object. May make optimizations when it e.g. detects large +// sequential reads. +// +// Not safe for concurrent access. +type RandomReader interface { + // Panic if any internal invariants are violated. + CheckInvariants() + + // Matches the semantics of io.ReaderAt, with the addition of context + // support. + ReadAt(ctx context.Context, p []byte, offset int64) (n int, err error) + + // Return the record for the object to which the reader is bound. + Object() (o *gcs.Object) + + // Clean up any resources associated with the reader, which must not be used + // again. + Destroy() +} + +// Create a random reader for the supplied object record that reads using the +// given bucket. +func NewRandomReader( + o *gcs.Object, + bucket gcs.Bucket) (rr RandomReader, err error) { + rr = &randomReader{ + object: o, + bucket: bucket, + start: -1, + limit: -1, + } + + return +} + +type randomReader struct { + object *gcs.Object + bucket gcs.Bucket + + // If non-nil, an in-flight read request and a function for cancelling it. + // + // INVARIANT: (reader == nil) == (cancel == nil) + reader io.ReadCloser + cancel func() + + // The range of the object that we expect reader to yield, when reader is + // non-nil. When reader is nil, limit is the limit of the previous read + // operation, or -1 if there has never been one. + // + // INVARIANT: start <= limit + // INVARIANT: limit < 0 implies reader != nil + start int64 + limit int64 +} + +func (rr *randomReader) CheckInvariants() { + // INVARIANT: (reader == nil) == (cancel == nil) + if (rr.reader == nil) != (rr.cancel == nil) { + panic(fmt.Sprintf("Mismatch: %v vs. %v", rr.reader, rr.cancel)) + } + + // INVARIANT: start <= limit + if !(rr.start <= rr.limit) { + panic(fmt.Sprintf("Unexpected range: [%d, %d)", rr.start, rr.limit)) + } + + // INVARIANT: limit < 0 implies reader != nil + if rr.limit < 0 && rr.reader != nil { + panic(fmt.Sprintf("Unexpected non-nil reader with limit == %d", rr.limit)) + } +} + +func (rr *randomReader) ReadAt( + ctx context.Context, + p []byte, + offset int64) (n int, err error) { + for len(p) > 0 { + // Have we blown past the end of the object? + if offset >= int64(rr.object.Size) { + err = io.EOF + return + } + + // If we have an existing reader but it's positioned at the wrong place, + // clean it up and throw it away. + if rr.reader != nil && rr.start != offset { + rr.reader.Close() + rr.reader = nil + rr.cancel = nil + } + + // If we don't have a reader, start a read operation. + if rr.reader == nil { + err = rr.startRead(offset, int64(len(p))) + if err != nil { + err = fmt.Errorf("startRead: %v", err) + return + } + } + + // Now we have a reader positioned at the correct place. Consume as much from + // it as possible. + var tmp int + tmp, err = rr.readFull(ctx, p) + + n += tmp + p = p[tmp:] + rr.start += int64(tmp) + offset += int64(tmp) + + // Sanity check. + if rr.start > rr.limit { + err = fmt.Errorf("Reader returned %d too many bytes", rr.start-rr.limit) + + // Don't attempt to reuse the reader when it's behaving wackily. + rr.reader.Close() + rr.reader = nil + rr.cancel = nil + rr.start = -1 + rr.limit = -1 + + return + } + + // Are we finished with this reader now? + if rr.start == rr.limit { + rr.reader.Close() + rr.reader = nil + rr.cancel = nil + } + + // Handle errors. + switch { + case err == io.EOF || err == io.ErrUnexpectedEOF: + // For a non-empty buffer, ReadFull returns EOF or ErrUnexpectedEOF only + // if the reader peters out early. That's fine, but it means we should + // have hit the limit above. + if rr.reader != nil { + err = fmt.Errorf("Reader returned %d too few bytes", rr.limit-rr.start) + return + } + + err = nil + + case err != nil: + // Propagate other errors. + err = fmt.Errorf("readFull: %v", err) + return + } + } + + return +} + +func (rr *randomReader) Object() (o *gcs.Object) { + o = rr.object + return +} + +func (rr *randomReader) Destroy() { + // Close out the reader, if we have one. + if rr.reader != nil { + rr.reader.Close() + rr.reader = nil + rr.cancel = nil + } +} + +// Like io.ReadFull, but deals with the cancellation issues. +// +// REQUIRES: rr.reader != nil +func (rr *randomReader) readFull( + ctx context.Context, + p []byte) (n int, err error) { + // Start a goroutine that will cancel the read operation we block on below if + // the calling context is cancelled, but only if this method has not already + // returned (to avoid souring the reader for the next read if this one is + // successful, since the calling context will eventually be cancelled). + readDone := make(chan struct{}) + defer close(readDone) + + go func() { + select { + case <-readDone: + return + + case <-ctx.Done(): + select { + case <-readDone: + return + + default: + rr.cancel() + } + } + }() + + // Call through. + n, err = io.ReadFull(rr.reader, p) + + return +} + +// Ensure that rr.reader is set up for a range for which [start, start+size) is +// a prefix. +func (rr *randomReader) startRead( + start int64, + size int64) (err error) { + // Make sure start and size are legal. + if start < 0 || uint64(start) > rr.object.Size || size < 0 { + err = fmt.Errorf( + "Range [%d, %d) is illegal for %d-byte object", + start, + start+size, + rr.object.Size) + return + } + + // We always read a decent amount from GCS, no matter how silly small the + // user's read is, because GCS requests are expensive. + actualSize := int64(size) + if actualSize < minReadSize { + actualSize = minReadSize + } + + // If this read starts where the previous one left off, we take this as a + // sign that the user is reading sequentially within the object. It's + // probably worth it to just request the entire rest of the object, and let + // them sip from the fire house with each call to ReadAt. + if start == rr.limit { + actualSize = math.MaxInt64 + } + + // Clip to the end of the object. + if actualSize > int64(rr.object.Size)-start { + actualSize = int64(rr.object.Size) - start + } + + // Begin the read. + ctx, cancel := context.WithCancel(context.Background()) + rc, err := rr.bucket.NewReader( + ctx, + &gcs.ReadObjectRequest{ + Name: rr.object.Name, + Generation: rr.object.Generation, + Range: &gcs.ByteRange{ + Start: uint64(start), + Limit: uint64(start + actualSize), + }, + }) + + if err != nil { + err = fmt.Errorf("NewReader: %v", err) + return + } + + rr.reader = rc + rr.cancel = cancel + rr.start = start + rr.limit = start + actualSize + + return +} diff --git a/internal/gcsx/random_reader_test.go b/internal/gcsx/random_reader_test.go new file mode 100644 index 0000000000..7a6536fd88 --- /dev/null +++ b/internal/gcsx/random_reader_test.go @@ -0,0 +1,526 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcsx + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + "testing/iotest" + "time" + + "github.com/jacobsa/gcloud/gcs" + "github.com/jacobsa/gcloud/gcs/mock_gcs" + . "github.com/jacobsa/oglematchers" + . "github.com/jacobsa/oglemock" + . "github.com/jacobsa/ogletest" + "golang.org/x/net/context" +) + +func TestRandomReader(t *testing.T) { RunTests(t) } + +//////////////////////////////////////////////////////////////////////// +// Invariant-checking random reader +//////////////////////////////////////////////////////////////////////// + +type checkingRandomReader struct { + ctx context.Context + wrapped *randomReader +} + +func (rr *checkingRandomReader) ReadAt(p []byte, offset int64) (int, error) { + rr.wrapped.CheckInvariants() + defer rr.wrapped.CheckInvariants() + return rr.wrapped.ReadAt(rr.ctx, p, offset) +} + +func (rr *checkingRandomReader) Destroy() { + rr.wrapped.CheckInvariants() + rr.wrapped.Destroy() +} + +//////////////////////////////////////////////////////////////////////// +// Counting closer +//////////////////////////////////////////////////////////////////////// + +type countingCloser struct { + io.Reader + closeCount int +} + +func (cc *countingCloser) Close() (err error) { + cc.closeCount++ + return +} + +//////////////////////////////////////////////////////////////////////// +// Blocking reader +//////////////////////////////////////////////////////////////////////// + +// A reader that blocks until a channel is closed, then returns an error. +type blockingReader struct { + c chan struct{} +} + +func (br *blockingReader) Read(p []byte) (n int, err error) { + <-br.c + err = errors.New("blockingReader") + return +} + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +func rangeStartIs(expected uint64) (m Matcher) { + pred := func(c interface{}) (err error) { + req := c.(*gcs.ReadObjectRequest) + if req.Range == nil { + err = errors.New("which has a nil range") + return + } + + if req.Range.Start != expected { + err = fmt.Errorf("which has Start == %d", req.Range.Start) + return + } + + return + } + + m = NewMatcher(pred, fmt.Sprintf("has range start %d", expected)) + return +} + +func rangeLimitIs(expected uint64) (m Matcher) { + pred := func(c interface{}) (err error) { + req := c.(*gcs.ReadObjectRequest) + if req.Range == nil { + err = errors.New("which has a nil range") + return + } + + if req.Range.Limit != expected { + err = fmt.Errorf("which has Limit == %d", req.Range.Limit) + return + } + + return + } + + m = NewMatcher(pred, fmt.Sprintf("has range limit %d", expected)) + return +} + +//////////////////////////////////////////////////////////////////////// +// Boilerplate +//////////////////////////////////////////////////////////////////////// + +type RandomReaderTest struct { + object *gcs.Object + bucket mock_gcs.MockBucket + rr checkingRandomReader +} + +func init() { RegisterTestSuite(&RandomReaderTest{}) } + +var _ SetUpInterface = &RandomReaderTest{} +var _ TearDownInterface = &RandomReaderTest{} + +func (t *RandomReaderTest) SetUp(ti *TestInfo) { + t.rr.ctx = ti.Ctx + + // Manufacture an object record. + t.object = &gcs.Object{ + Name: "foo", + Size: 17, + Generation: 1234, + } + + // Create the bucket. + t.bucket = mock_gcs.NewMockBucket(ti.MockController, "bucket") + + // Set up the reader. + rr, err := NewRandomReader(t.object, t.bucket) + AssertEq(nil, err) + t.rr.wrapped = rr.(*randomReader) +} + +func (t *RandomReaderTest) TearDown() { + t.rr.Destroy() +} + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *RandomReaderTest) EmptyRead() { + // Nothing should happen. + buf := make([]byte, 0) + + n, err := t.rr.ReadAt(buf, 0) + ExpectEq(0, n) + ExpectEq(nil, err) +} + +func (t *RandomReaderTest) ReadAtEndOfObject() { + buf := make([]byte, 1) + + n, err := t.rr.ReadAt(buf, int64(t.object.Size)) + ExpectEq(0, n) + ExpectEq(io.EOF, err) +} + +func (t *RandomReaderTest) ReadPastEndOfObject() { + buf := make([]byte, 1) + + n, err := t.rr.ReadAt(buf, int64(t.object.Size)+1) + ExpectEq(0, n) + ExpectEq(io.EOF, err) +} + +func (t *RandomReaderTest) NoExistingReader() { + // The bucket should be called to set up a new reader. + ExpectCall(t.bucket, "NewReader")(Any(), Any()). + WillOnce(Return(nil, errors.New(""))) + + buf := make([]byte, 1) + t.rr.ReadAt(buf, 0) +} + +func (t *RandomReaderTest) ExistingReader_WrongOffset() { + // Simulate an existing reader. + t.rr.wrapped.reader = ioutil.NopCloser(strings.NewReader("xxx")) + t.rr.wrapped.cancel = func() {} + t.rr.wrapped.start = 2 + t.rr.wrapped.limit = 5 + + // The bucket should be called to set up a new reader. + ExpectCall(t.bucket, "NewReader")(Any(), Any()). + WillOnce(Return(nil, errors.New(""))) + + buf := make([]byte, 1) + t.rr.ReadAt(buf, 0) +} + +func (t *RandomReaderTest) NewReaderReturnsError() { + ExpectCall(t.bucket, "NewReader")(Any(), Any()). + WillOnce(Return(nil, errors.New("taco"))) + + buf := make([]byte, 1) + _, err := t.rr.ReadAt(buf, 0) + + ExpectThat(err, Error(HasSubstr("NewReader"))) + ExpectThat(err, Error(HasSubstr("taco"))) +} + +func (t *RandomReaderTest) ReaderFails() { + // Bucket + r := iotest.OneByteReader(iotest.TimeoutReader(strings.NewReader("xxx"))) + rc := ioutil.NopCloser(r) + + ExpectCall(t.bucket, "NewReader")(Any(), Any()). + WillOnce(Return(rc, nil)) + + // Call + buf := make([]byte, 3) + _, err := t.rr.ReadAt(buf, 0) + + ExpectThat(err, Error(HasSubstr("readFull"))) + ExpectThat(err, Error(HasSubstr(iotest.ErrTimeout.Error()))) +} + +func (t *RandomReaderTest) ReaderOvershootsRange() { + // Simulate a reader that is supposed to return two more bytes, but actually + // returns three when asked to. + t.rr.wrapped.reader = ioutil.NopCloser(strings.NewReader("xxx")) + t.rr.wrapped.cancel = func() {} + t.rr.wrapped.start = 0 + t.rr.wrapped.limit = 2 + + // Try to read three bytes. + buf := make([]byte, 3) + _, err := t.rr.ReadAt(buf, 0) + + ExpectThat(err, Error(HasSubstr("1 too many bytes"))) +} + +func (t *RandomReaderTest) ReaderNotExhausted() { + // Set up a reader that has three bytes left to give. + rc := &countingCloser{ + Reader: strings.NewReader("abc"), + } + + t.rr.wrapped.reader = rc + t.rr.wrapped.cancel = func() {} + t.rr.wrapped.start = 1 + t.rr.wrapped.limit = 4 + + // Read two bytes. + buf := make([]byte, 2) + n, err := t.rr.ReadAt(buf, 1) + + ExpectEq(2, n) + ExpectEq(nil, err) + ExpectEq("ab", string(buf[:n])) + + ExpectEq(0, rc.closeCount) + ExpectEq(rc, t.rr.wrapped.reader) + ExpectEq(3, t.rr.wrapped.start) + ExpectEq(4, t.rr.wrapped.limit) +} + +func (t *RandomReaderTest) ReaderExhausted_ReadFinished() { + // Set up a reader that has three bytes left to give. + rc := &countingCloser{ + Reader: strings.NewReader("abc"), + } + + t.rr.wrapped.reader = rc + t.rr.wrapped.cancel = func() {} + t.rr.wrapped.start = 1 + t.rr.wrapped.limit = 4 + + // Read three bytes. + buf := make([]byte, 3) + n, err := t.rr.ReadAt(buf, 1) + + ExpectEq(3, n) + ExpectEq(nil, err) + ExpectEq("abc", string(buf[:n])) + + ExpectEq(1, rc.closeCount) + ExpectEq(nil, t.rr.wrapped.reader) + ExpectEq(nil, t.rr.wrapped.cancel) + ExpectEq(4, t.rr.wrapped.limit) +} + +func (t *RandomReaderTest) ReaderExhausted_ReadNotFinished() { + // Set up a reader that has three bytes left to give. + rc := &countingCloser{ + Reader: strings.NewReader("abc"), + } + + t.rr.wrapped.reader = rc + t.rr.wrapped.cancel = func() {} + t.rr.wrapped.start = 1 + t.rr.wrapped.limit = 4 + + // The bucket should be called at the previous limit to obtain a new reader. + ExpectCall(t.bucket, "NewReader")(Any(), rangeStartIs(4)). + WillOnce(Return(nil, errors.New(""))) + + // Attempt to read four bytes. + buf := make([]byte, 4) + n, _ := t.rr.ReadAt(buf, 1) + + AssertGe(n, 3) + ExpectEq("abc", string(buf[:3])) + + ExpectEq(1, rc.closeCount) + ExpectEq(nil, t.rr.wrapped.reader) + ExpectEq(nil, t.rr.wrapped.cancel) + ExpectEq(4, t.rr.wrapped.limit) +} + +func (t *RandomReaderTest) PropagatesCancellation() { + // Set up a reader that will block until we tell it to return. + finishRead := make(chan struct{}) + rc := ioutil.NopCloser(&blockingReader{finishRead}) + + t.rr.wrapped.reader = rc + t.rr.wrapped.start = 1 + t.rr.wrapped.limit = 4 + + // Snoop on when cancel is called. + cancelCalled := make(chan struct{}) + t.rr.wrapped.cancel = func() { close(cancelCalled) } + + // Start a read in the background using a context that we control. It should + // not yet return. + readReturned := make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + buf := make([]byte, 2) + t.rr.wrapped.ReadAt(ctx, buf, 1) + close(readReturned) + }() + + select { + case <-time.After(10 * time.Millisecond): + case <-readReturned: + AddFailure("Read returned early.") + AbortTest() + } + + // When we cancel our context, the random reader should cancel the read + // context. + cancel() + <-cancelCalled + + // Clean up. + close(finishRead) + <-readReturned +} + +func (t *RandomReaderTest) DoesntPropagateCancellationAfterReturning() { + // Set up a reader that will return three bytes. + t.rr.wrapped.reader = ioutil.NopCloser(strings.NewReader("xxx")) + t.rr.wrapped.start = 1 + t.rr.wrapped.limit = 4 + + // Snoop on when cancel is called. + cancelCalled := make(chan struct{}) + t.rr.wrapped.cancel = func() { close(cancelCalled) } + + // Successfully read two bytes using a context whose cancellation we control. + ctx, cancel := context.WithCancel(context.Background()) + buf := make([]byte, 2) + n, err := t.rr.wrapped.ReadAt(ctx, buf, 1) + + AssertEq(nil, err) + AssertEq(2, n) + + // If we cancel the calling context now, it should not cause the underlying + // read context to be cancelled. + cancel() + select { + case <-time.After(10 * time.Millisecond): + case <-cancelCalled: + AddFailure("Read context unexpectedly cancelled.") + AbortTest() + } +} + +func (t *RandomReaderTest) UpgradesReadsToMinimumSize() { + t.object.Size = 1 << 40 + + const readSize = 10 + AssertLt(readSize, minReadSize) + + // Simulate an existing reader at a mismatched offset. + t.rr.wrapped.reader = ioutil.NopCloser(strings.NewReader("xxx")) + t.rr.wrapped.cancel = func() {} + t.rr.wrapped.start = 2 + t.rr.wrapped.limit = 5 + + // The bucket should be asked to read minReadSize bytes, even though we only + // ask for a few bytes below. + r := strings.NewReader(strings.Repeat("x", minReadSize)) + rc := ioutil.NopCloser(r) + + ExpectCall(t.bucket, "NewReader")( + Any(), + AllOf(rangeStartIs(1), rangeLimitIs(1+minReadSize))). + WillOnce(Return(rc, nil)) + + // Call through. + buf := make([]byte, readSize) + t.rr.ReadAt(buf, 1) + + // Check the state now. + ExpectEq(1+readSize, t.rr.wrapped.start) + ExpectEq(1+minReadSize, t.rr.wrapped.limit) +} + +func (t *RandomReaderTest) DoesntChangeReadsOfAppropriateSize() { + t.object.Size = 1 << 40 + const readSize = 2 * minReadSize + + // Simulate an existing reader at a mismatched offset. + t.rr.wrapped.reader = ioutil.NopCloser(strings.NewReader("xxx")) + t.rr.wrapped.cancel = func() {} + t.rr.wrapped.start = 2 + t.rr.wrapped.limit = 5 + + // The bucket should be asked to read readSize bytes. + r := strings.NewReader(strings.Repeat("x", readSize)) + rc := ioutil.NopCloser(r) + + ExpectCall(t.bucket, "NewReader")( + Any(), + AllOf(rangeStartIs(1), rangeLimitIs(1+readSize))). + WillOnce(Return(rc, nil)) + + // Call through. + buf := make([]byte, readSize) + t.rr.ReadAt(buf, 1) + + // Check the state now. + ExpectEq(1+readSize, t.rr.wrapped.limit) +} + +func (t *RandomReaderTest) UpgradesSequentialReads_ExistingReader() { + t.object.Size = 1 << 40 + const readSize = 10 + + // Simulate an existing reader at the correct offset, which will be exhausted + // by the read below. + const existingSize = 3 + r := strings.NewReader(strings.Repeat("x", existingSize)) + + t.rr.wrapped.reader = ioutil.NopCloser(r) + t.rr.wrapped.cancel = func() {} + t.rr.wrapped.start = 1 + t.rr.wrapped.limit = 1 + existingSize + + // The bucket should be asked to read up to the end of the object. + r = strings.NewReader(strings.Repeat("x", readSize-existingSize)) + rc := ioutil.NopCloser(r) + + ExpectCall(t.bucket, "NewReader")( + Any(), + AllOf(rangeStartIs(1+existingSize), rangeLimitIs(t.object.Size))). + WillOnce(Return(rc, nil)) + + // Call through. + buf := make([]byte, readSize) + t.rr.ReadAt(buf, 1) + + // Check the state now. + ExpectEq(1+readSize, t.rr.wrapped.start) + ExpectEq(t.object.Size, t.rr.wrapped.limit) +} + +func (t *RandomReaderTest) UpgradesSequentialReads_NoExistingReader() { + t.object.Size = 1 << 40 + const readSize = 10 + + // Simulate a previous exhausted reader that ended at the offset from which + // we read below. + t.rr.wrapped.start = 1 + t.rr.wrapped.limit = 1 + + // The bucket should be asked to read up to the end of the object. + r := strings.NewReader(strings.Repeat("x", readSize)) + rc := ioutil.NopCloser(r) + + ExpectCall(t.bucket, "NewReader")( + Any(), + AllOf(rangeStartIs(1), rangeLimitIs(t.object.Size))). + WillOnce(Return(rc, nil)) + + // Call through. + buf := make([]byte, readSize) + t.rr.ReadAt(buf, 1) + + // Check the state now. + ExpectEq(1+readSize, t.rr.wrapped.start) + ExpectEq(t.object.Size, t.rr.wrapped.limit) +} diff --git a/gcsproxy/object_syncer.go b/internal/gcsx/syncer.go similarity index 68% rename from gcsproxy/object_syncer.go rename to internal/gcsx/syncer.go index 3d00a295cc..dc61bceca1 100644 --- a/gcsproxy/object_syncer.go +++ b/internal/gcsx/syncer.go @@ -12,39 +12,35 @@ // See the License for the specific language governing permissions and // limitations under the License. -package gcsproxy +package gcsx import ( "fmt" "io" - "github.com/googlecloudplatform/gcsfuse/lease" - "github.com/googlecloudplatform/gcsfuse/mutable" "github.com/jacobsa/gcloud/gcs" "golang.org/x/net/context" ) // Safe for concurrent access. -type ObjectSyncer interface { +type Syncer interface { // Given an object record and content that was originally derived from that // object's contents (and potentially modified): // - // * If the content has not been modified, return a nil read lease and a - // nil new object. + // * If the temp file has not been modified, return a nil new object. // // * Otherwise, write out a new generation in the bucket (failing with - // *gcs.PreconditionError if the source generation is no longer current) - // and return a read lease for that object's contents. + // *gcs.PreconditionError if the source generation is no longer current). // - // In the second case, the mutable.Content is destroyed. Otherwise, including - // when this function fails, it is guaranteed to still be valid. + // In the second case, the TempFile is destroyed. Otherwise, including when + // this function fails, it is guaranteed to still be valid. SyncObject( ctx context.Context, srcObject *gcs.Object, - content mutable.Content) (rl lease.ReadLease, o *gcs.Object, err error) + content TempFile) (o *gcs.Object, err error) } -// Create an object syncer that syncs into the supplied bucket. +// Create a syncer that syncs into the supplied bucket. // // When the source object has been changed only by appending, and the source // object's size is at least appendThreshold, we will "append" to it by writing @@ -53,10 +49,10 @@ type ObjectSyncer interface { // Temporary blobs have names beginning with tmpObjectPrefix. We make an effort // to delete them, but if we are interrupted for some reason we may not be able // to do so. Therefore the user should arrange for garbage collection. -func NewObjectSyncer( +func NewSyncer( appendThreshold int64, tmpObjectPrefix string, - bucket gcs.Bucket) (os ObjectSyncer) { + bucket gcs.Bucket) (os Syncer) { // Create the object creators. fullCreator := &fullObjectCreator{ bucket: bucket, @@ -66,8 +62,8 @@ func NewObjectSyncer( tmpObjectPrefix, bucket) - // And the object syncer. - os = newObjectSyncer(appendThreshold, fullCreator, appendCreator) + // And the syncer. + os = newSyncer(appendThreshold, fullCreator, appendCreator) return } @@ -105,11 +101,10 @@ func (oc *fullObjectCreator) Create( } //////////////////////////////////////////////////////////////////////// -// objectSyncer +// syncer //////////////////////////////////////////////////////////////////////// -// An implementation detail of objectSyncer. See notes on -// newObjectSyncer. +// An implementation detail of syncer. See notes on newSyncer. type objectCreator interface { Create( ctx context.Context, @@ -117,8 +112,8 @@ type objectCreator interface { r io.Reader) (o *gcs.Object, err error) } -// Create an object syncer that stats the mutable content to see if it's dirty -// before calling through to one of two object creators if the content is dirty: +// Create a syncer that stats the mutable content to see if it's dirty before +// calling through to one of two object creators if the content is dirty: // // * fullCreator accepts the source object and the full contents with which it // should be overwritten. @@ -130,11 +125,11 @@ type objectCreator interface { // worthwhile to make the append optimization. It should be set to a value on // the order of the bandwidth to GCS times three times the round trip latency // to GCS (for a small create, a compose, and a delete). -func newObjectSyncer( +func newSyncer( appendThreshold int64, fullCreator objectCreator, - appendCreator objectCreator) (os ObjectSyncer) { - os = &objectSyncer{ + appendCreator objectCreator) (os Syncer) { + os = &syncer{ appendThreshold: appendThreshold, fullCreator: fullCreator, appendCreator: appendCreator, @@ -143,18 +138,18 @@ func newObjectSyncer( return } -type objectSyncer struct { +type syncer struct { appendThreshold int64 fullCreator objectCreator appendCreator objectCreator } -func (os *objectSyncer) SyncObject( +func (os *syncer) SyncObject( ctx context.Context, srcObject *gcs.Object, - content mutable.Content) (rl lease.ReadLease, o *gcs.Object, err error) { + content TempFile) (o *gcs.Object, err error) { // Stat the content. - sr, err := content.Stat(ctx) + sr, err := content.Stat() if err != nil { err = fmt.Errorf("Stat: %v", err) return @@ -184,22 +179,21 @@ func (os *objectSyncer) SyncObject( if srcSize >= os.appendThreshold && sr.DirtyThreshold == srcSize && srcObject.ComponentCount < gcs.MaxComponentCount { - o, err = os.appendCreator.Create( - ctx, - srcObject, - &mutableContentReader{ - Ctx: ctx, - Content: content, - Offset: srcSize, - }) + _, err = content.Seek(srcSize, 0) + if err != nil { + err = fmt.Errorf("Seek: %v", err) + return + } + + o, err = os.appendCreator.Create(ctx, srcObject, content) } else { - o, err = os.fullCreator.Create( - ctx, - srcObject, - &mutableContentReader{ - Ctx: ctx, - Content: content, - }) + _, err = content.Seek(0, 0) + if err != nil { + err = fmt.Errorf("Seek: %v", err) + return + } + + o, err = os.fullCreator.Create(ctx, srcObject, content) } // Deal with errors. @@ -213,26 +207,8 @@ func (os *objectSyncer) SyncObject( return } - // Yank out the contents. - rl = content.Release().Downgrade() - - return -} - -//////////////////////////////////////////////////////////////////////// -// mutableContentReader -//////////////////////////////////////////////////////////////////////// - -// An io.Reader that wraps a mutable.Content object, reading starting from a -// base offset. -type mutableContentReader struct { - Ctx context.Context - Content mutable.Content - Offset int64 -} + // Destroy the temp file. + content.Destroy() -func (mcr *mutableContentReader) Read(p []byte) (n int, err error) { - n, err = mcr.Content.ReadAt(mcr.Ctx, p, mcr.Offset) - mcr.Offset += int64(n) return } diff --git a/gcsproxy/object_syncer_test.go b/internal/gcsx/syncer_test.go similarity index 67% rename from gcsproxy/object_syncer_test.go rename to internal/gcsx/syncer_test.go index 7f1250c0c9..1a433ad070 100644 --- a/gcsproxy/object_syncer_test.go +++ b/internal/gcsx/syncer_test.go @@ -12,19 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package gcsproxy +package gcsx import ( "errors" "io" "io/ioutil" - "math" "strings" "testing" "time" - "github.com/googlecloudplatform/gcsfuse/lease" - "github.com/googlecloudplatform/gcsfuse/mutable" "github.com/jacobsa/gcloud/gcs" "github.com/jacobsa/gcloud/gcs/gcsfake" . "github.com/jacobsa/oglematchers" @@ -33,7 +30,7 @@ import ( "golang.org/x/net/context" ) -func TestObjectSyncer(t *testing.T) { RunTests(t) } +func TestSyncer(t *testing.T) { RunTests(t) } //////////////////////////////////////////////////////////////////////// // fakeObjectCreator @@ -78,33 +75,31 @@ func (oc *fakeObjectCreator) Create( const srcObjectContents = "taco" const appendThreshold = int64(len(srcObjectContents)) -type ObjectSyncerTest struct { +type SyncerTest struct { ctx context.Context fullCreator fakeObjectCreator appendCreator fakeObjectCreator bucket gcs.Bucket - leaser lease.FileLeaser - syncer ObjectSyncer + syncer Syncer clock timeutil.SimulatedClock srcObject *gcs.Object - content mutable.Content + content TempFile } -var _ SetUpInterface = &ObjectSyncerTest{} +var _ SetUpInterface = &SyncerTest{} -func init() { RegisterTestSuite(&ObjectSyncerTest{}) } +func init() { RegisterTestSuite(&SyncerTest{}) } -func (t *ObjectSyncerTest) SetUp(ti *TestInfo) { +func (t *SyncerTest) SetUp(ti *TestInfo) { var err error t.ctx = ti.Ctx // Set up dependencies. t.bucket = gcsfake.NewFakeBucket(&t.clock, "some_bucket") - t.leaser = lease.NewFileLeaser("", math.MaxInt32, math.MaxInt32) - t.syncer = newObjectSyncer( + t.syncer = newSyncer( appendThreshold, &t.fullCreator, &t.appendCreator) @@ -121,24 +116,21 @@ func (t *ObjectSyncerTest) SetUp(ti *TestInfo) { AssertEq(nil, err) - // Wrap a mutable.Content around it. - t.content = mutable.NewContent( - NewReadProxy( - t.srcObject, - nil, // Initial read lease - math.MaxUint64, // Chunk size - t.leaser, - t.bucket), + // Wrap a TempFile around it. + t.content, err = NewTempFile( + strings.NewReader(srcObjectContents), + "", &t.clock) + AssertEq(nil, err) + // Return errors from the fakes by default. t.fullCreator.err = errors.New("Fake error") t.appendCreator.err = errors.New("Fake error") } -func (t *ObjectSyncerTest) call() ( - rl lease.ReadLease, o *gcs.Object, err error) { - rl, o, err = t.syncer.SyncObject(t.ctx, t.srcObject, t.content) +func (t *SyncerTest) call() (o *gcs.Object, err error) { + o, err = t.syncer.SyncObject(t.ctx, t.srcObject, t.content) return } @@ -146,12 +138,11 @@ func (t *ObjectSyncerTest) call() ( // Tests //////////////////////////////////////////////////////////////////////// -func (t *ObjectSyncerTest) NotDirty() { +func (t *SyncerTest) NotDirty() { // Call - rl, o, err := t.call() + o, err := t.call() AssertEq(nil, err) - ExpectEq(nil, rl) ExpectEq(nil, o) // Neither creater should have been called. @@ -159,9 +150,9 @@ func (t *ObjectSyncerTest) NotDirty() { ExpectFalse(t.appendCreator.called) } -func (t *ObjectSyncerTest) SmallerThanSource() { +func (t *SyncerTest) SmallerThanSource() { // Truncate downward. - err := t.content.Truncate(t.ctx, int64(len(srcObjectContents)-1)) + err := t.content.Truncate(int64(len(srcObjectContents) - 1)) AssertEq(nil, err) // The full creator should be called. @@ -171,10 +162,9 @@ func (t *ObjectSyncerTest) SmallerThanSource() { ExpectFalse(t.appendCreator.called) } -func (t *ObjectSyncerTest) SameSizeAsSource() { +func (t *SyncerTest) SameSizeAsSource() { // Dirty a byte without changing the length. _, err := t.content.WriteAt( - t.ctx, []byte("a"), int64(len(srcObjectContents)-1)) @@ -187,16 +177,15 @@ func (t *ObjectSyncerTest) SameSizeAsSource() { ExpectFalse(t.appendCreator.called) } -func (t *ObjectSyncerTest) LargerThanSource_ThresholdInSource() { +func (t *SyncerTest) LargerThanSource_ThresholdInSource() { var err error // Extend the length of the content. - err = t.content.Truncate(t.ctx, int64(len(srcObjectContents)+100)) + err = t.content.Truncate(int64(len(srcObjectContents) + 100)) AssertEq(nil, err) // But dirty a byte within the initial content. _, err = t.content.WriteAt( - t.ctx, []byte("a"), int64(len(srcObjectContents)-1)) @@ -209,17 +198,17 @@ func (t *ObjectSyncerTest) LargerThanSource_ThresholdInSource() { ExpectFalse(t.appendCreator.called) } -func (t *ObjectSyncerTest) SourceTooShortForAppend() { +func (t *SyncerTest) SourceTooShortForAppend() { var err error // Recreate the syncer with a higher append threshold. - t.syncer = newObjectSyncer( + t.syncer = newSyncer( int64(len(srcObjectContents)+1), &t.fullCreator, &t.appendCreator) // Extend the length of the content. - err = t.content.Truncate(t.ctx, int64(len(srcObjectContents)+1)) + err = t.content.Truncate(int64(len(srcObjectContents) + 1)) AssertEq(nil, err) // The full creator should be called. @@ -229,14 +218,14 @@ func (t *ObjectSyncerTest) SourceTooShortForAppend() { ExpectFalse(t.appendCreator.called) } -func (t *ObjectSyncerTest) SourceComponentCountTooHigh() { +func (t *SyncerTest) SourceComponentCountTooHigh() { var err error // Simulate a large component count. t.srcObject.ComponentCount = gcs.MaxComponentCount // Extend the length of the content. - err = t.content.Truncate(t.ctx, int64(len(srcObjectContents)+1)) + err = t.content.Truncate(int64(len(srcObjectContents) + 1)) AssertEq(nil, err) // The full creator should be called. @@ -246,11 +235,11 @@ func (t *ObjectSyncerTest) SourceComponentCountTooHigh() { ExpectFalse(t.appendCreator.called) } -func (t *ObjectSyncerTest) LargerThanSource_ThresholdAtEndOfSource() { +func (t *SyncerTest) LargerThanSource_ThresholdAtEndOfSource() { var err error // Extend the length of the content. - err = t.content.Truncate(t.ctx, int64(len(srcObjectContents)+1)) + err = t.content.Truncate(int64(len(srcObjectContents) + 1)) AssertEq(nil, err) // The append creator should be called. @@ -260,12 +249,12 @@ func (t *ObjectSyncerTest) LargerThanSource_ThresholdAtEndOfSource() { ExpectTrue(t.appendCreator.called) } -func (t *ObjectSyncerTest) CallsFullCreator() { +func (t *SyncerTest) CallsFullCreator() { var err error AssertLt(2, t.srcObject.Size) // Truncate downward. - err = t.content.Truncate(t.ctx, 2) + err = t.content.Truncate(2) AssertEq(nil, err) // Call @@ -276,64 +265,56 @@ func (t *ObjectSyncerTest) CallsFullCreator() { ExpectEq(srcObjectContents[:2], string(t.fullCreator.contents)) } -func (t *ObjectSyncerTest) FullCreatorFails() { +func (t *SyncerTest) FullCreatorFails() { var err error t.fullCreator.err = errors.New("taco") // Truncate downward. - err = t.content.Truncate(t.ctx, 2) + err = t.content.Truncate(2) AssertEq(nil, err) // Call - _, _, err = t.call() + _, err = t.call() ExpectThat(err, Error(HasSubstr("Create"))) ExpectThat(err, Error(HasSubstr("taco"))) } -func (t *ObjectSyncerTest) FullCreatorReturnsPreconditionError() { +func (t *SyncerTest) FullCreatorReturnsPreconditionError() { var err error t.fullCreator.err = &gcs.PreconditionError{} // Truncate downward. - err = t.content.Truncate(t.ctx, 2) + err = t.content.Truncate(2) AssertEq(nil, err) // Call - _, _, err = t.call() + _, err = t.call() ExpectEq(t.fullCreator.err, err) } -func (t *ObjectSyncerTest) FullCreatorSucceeds() { +func (t *SyncerTest) FullCreatorSucceeds() { var err error t.fullCreator.o = &gcs.Object{} t.fullCreator.err = nil // Truncate downward. - err = t.content.Truncate(t.ctx, 2) + err = t.content.Truncate(2) AssertEq(nil, err) // Call - rl, o, err := t.call() + o, err := t.call() AssertEq(nil, err) ExpectEq(t.fullCreator.o, o) - - // Check the read lease. - _, err = rl.Seek(0, 0) - AssertEq(nil, err) - - buf, err := ioutil.ReadAll(rl) - AssertEq(nil, err) - ExpectEq(srcObjectContents[:2], string(buf)) } -func (t *ObjectSyncerTest) CallsAppendCreator() { +func (t *SyncerTest) CallsAppendCreator() { var err error // Append some data. - _, err = t.content.WriteAt(t.ctx, []byte("burrito"), int64(t.srcObject.Size)) + _, err = t.content.WriteAt([]byte("burrito"), int64(t.srcObject.Size)) AssertEq(nil, err) // Call @@ -344,55 +325,47 @@ func (t *ObjectSyncerTest) CallsAppendCreator() { ExpectEq("burrito", string(t.appendCreator.contents)) } -func (t *ObjectSyncerTest) AppendCreatorFails() { +func (t *SyncerTest) AppendCreatorFails() { var err error t.appendCreator.err = errors.New("taco") // Append some data. - _, err = t.content.WriteAt(t.ctx, []byte("burrito"), int64(t.srcObject.Size)) + _, err = t.content.WriteAt([]byte("burrito"), int64(t.srcObject.Size)) AssertEq(nil, err) // Call - _, _, err = t.call() + _, err = t.call() ExpectThat(err, Error(HasSubstr("Create"))) ExpectThat(err, Error(HasSubstr("taco"))) } -func (t *ObjectSyncerTest) AppendCreatorReturnsPreconditionError() { +func (t *SyncerTest) AppendCreatorReturnsPreconditionError() { var err error t.appendCreator.err = &gcs.PreconditionError{} // Append some data. - _, err = t.content.WriteAt(t.ctx, []byte("burrito"), int64(t.srcObject.Size)) + _, err = t.content.WriteAt([]byte("burrito"), int64(t.srcObject.Size)) AssertEq(nil, err) // Call - _, _, err = t.call() + _, err = t.call() ExpectEq(t.appendCreator.err, err) } -func (t *ObjectSyncerTest) AppendCreatorSucceeds() { +func (t *SyncerTest) AppendCreatorSucceeds() { var err error t.appendCreator.o = &gcs.Object{} t.appendCreator.err = nil // Append some data. - _, err = t.content.WriteAt(t.ctx, []byte("burrito"), int64(t.srcObject.Size)) + _, err = t.content.WriteAt([]byte("burrito"), int64(t.srcObject.Size)) AssertEq(nil, err) // Call - rl, o, err := t.call() + o, err := t.call() AssertEq(nil, err) ExpectEq(t.appendCreator.o, o) - - // Check the read lease. - _, err = rl.Seek(0, 0) - AssertEq(nil, err) - - buf, err := ioutil.ReadAll(rl) - AssertEq(nil, err) - ExpectEq(srcObjectContents+"burrito", string(buf)) } diff --git a/internal/gcsx/temp_file.go b/internal/gcsx/temp_file.go new file mode 100644 index 0000000000..f109ca1652 --- /dev/null +++ b/internal/gcsx/temp_file.go @@ -0,0 +1,227 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcsx + +import ( + "fmt" + "io" + "os" + "time" + + "github.com/jacobsa/fuse/fsutil" + "github.com/jacobsa/timeutil" +) + +// A temporary file that keeps track of the lowest offset at which it has been +// modified. +// +// Not safe for concurrent access. +type TempFile interface { + // Panic if any internal invariants are violated. + CheckInvariants() + + // Semantics matching os.File. + io.ReadSeeker + io.ReaderAt + io.WriterAt + Truncate(n int64) (err error) + + // Return information about the current state of the content. May invalidate + // the seek position. + Stat() (sr StatResult, err error) + + // Throw away the resources used by the temporary file. The object must not + // be used again. + Destroy() +} + +type StatResult struct { + // The current size in bytes of the content. + Size int64 + + // It is guaranteed that all bytes in the range [0, DirtyThreshold) are + // unmodified from the original content with which the mutable content object + // was created. + DirtyThreshold int64 + + // The time at which the content was last updated, or nil if we've never + // changed it. + Mtime *time.Time +} + +// Create a temp file whose initial contents are given by the supplied reader. +// dir is a directory on whose file system the inode will live, or the system +// default temporary location if empty. +func NewTempFile( + content io.Reader, + dir string, + clock timeutil.Clock) (tf TempFile, err error) { + // Create an anonymous file to wrap. When we close it, its resources will be + // magically cleaned up. + f, err := fsutil.AnonymousFile(dir) + if err != nil { + err = fmt.Errorf("AnonymousFile: %v", err) + return + } + + // Copy into the file. + size, err := io.Copy(f, content) + if err != nil { + err = fmt.Errorf("copy: %v", err) + return + } + + tf = &tempFile{ + clock: clock, + f: f, + dirtyThreshold: size, + } + + return +} + +type tempFile struct { + ///////////////////////// + // Dependencies + ///////////////////////// + + clock timeutil.Clock + + ///////////////////////// + // Mutable state + ///////////////////////// + + destroyed bool + + // A file containing our current contents. + f *os.File + + // The lowest byte index that has been modified from the initial contents. + // + // INVARIANT: Stat().DirtyThreshold <= Stat().Size + dirtyThreshold int64 + + // The time at which a method that modifies our contents was last called, or + // nil if never. + // + // INVARIANT: mtime == nil => Stat().DirtyThreshold == Stat().Size + mtime *time.Time +} + +//////////////////////////////////////////////////////////////////////// +// Public interface +//////////////////////////////////////////////////////////////////////// + +func (tf *tempFile) CheckInvariants() { + if tf.destroyed { + panic("Use of destroyed tempFile object.") + } + + // Restore the seek position after using Stat below. + pos, err := tf.Seek(0, 1) + if err != nil { + panic(fmt.Sprintf("Seek: %v", err)) + } + + defer func() { + _, err := tf.Seek(pos, 0) + if err != nil { + panic(fmt.Sprintf("Seek: %v", err)) + } + }() + + // INVARIANT: Stat().DirtyThreshold <= Stat().Size + sr, err := tf.Stat() + if err != nil { + panic(fmt.Sprintf("Stat: %v", err)) + } + + if !(sr.DirtyThreshold <= sr.Size) { + panic(fmt.Sprintf("Mismatch: %d vs. %d", sr.DirtyThreshold, sr.Size)) + } + + // INVARIANT: mtime == nil => Stat().DirtyThreshold == Stat().Size + if tf.mtime == nil && sr.DirtyThreshold != sr.Size { + panic(fmt.Sprintf("Mismatch: %d vs. %d", sr.DirtyThreshold, sr.Size)) + } +} + +func (tf *tempFile) Destroy() { + tf.destroyed = true + + // Throw away the file. + tf.f.Close() + tf.f = nil +} + +func (tf *tempFile) Read(p []byte) (int, error) { + return tf.f.Read(p) +} + +func (tf *tempFile) Seek(offset int64, whence int) (int64, error) { + return tf.f.Seek(offset, whence) +} + +func (tf *tempFile) ReadAt(p []byte, offset int64) (int, error) { + return tf.f.ReadAt(p, offset) +} + +func (tf *tempFile) Stat() (sr StatResult, err error) { + sr.DirtyThreshold = tf.dirtyThreshold + sr.Mtime = tf.mtime + + // Get the size from the file. + sr.Size, err = tf.f.Seek(0, 2) + if err != nil { + err = fmt.Errorf("Seek: %v", err) + return + } + + return +} + +func (tf *tempFile) WriteAt(p []byte, offset int64) (int, error) { + // Update our state regarding being dirty. + tf.dirtyThreshold = minInt64(tf.dirtyThreshold, offset) + + newMtime := tf.clock.Now() + tf.mtime = &newMtime + + // Call through. + return tf.f.WriteAt(p, offset) +} + +func (tf *tempFile) Truncate(n int64) error { + // Update our state regarding being dirty. + tf.dirtyThreshold = minInt64(tf.dirtyThreshold, n) + + newMtime := tf.clock.Now() + tf.mtime = &newMtime + + // Call through. + return tf.f.Truncate(n) +} + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +func minInt64(a int64, b int64) int64 { + if a < b { + return a + } + + return b +} diff --git a/internal/gcsx/temp_file_test.go b/internal/gcsx/temp_file_test.go new file mode 100644 index 0000000000..9997aaaf55 --- /dev/null +++ b/internal/gcsx/temp_file_test.go @@ -0,0 +1,216 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gcsx_test + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + "time" + + "github.com/googlecloudplatform/gcsfuse/internal/gcsx" + . "github.com/jacobsa/oglematchers" + . "github.com/jacobsa/ogletest" + "github.com/jacobsa/timeutil" + "golang.org/x/net/context" +) + +func TestTempFile(t *testing.T) { RunTests(t) } + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +func readAll(rs io.ReadSeeker) (content []byte, err error) { + _, err = rs.Seek(0, 0) + if err != nil { + err = fmt.Errorf("Seek: %v", err) + return + } + + content, err = ioutil.ReadAll(rs) + if err != nil { + err = fmt.Errorf("ReadFull: %v", err) + return + } + + return +} + +//////////////////////////////////////////////////////////////////////// +// Invariant-checking temp file +//////////////////////////////////////////////////////////////////////// + +// A wrapper around a TempFile that calls CheckInvariants whenever invariants +// should hold. For catching logic errors early in the test. +type checkingTempFile struct { + wrapped gcsx.TempFile +} + +func (tf *checkingTempFile) Stat() (gcsx.StatResult, error) { + tf.wrapped.CheckInvariants() + defer tf.wrapped.CheckInvariants() + return tf.wrapped.Stat() +} + +func (tf *checkingTempFile) Read(b []byte) (int, error) { + tf.wrapped.CheckInvariants() + defer tf.wrapped.CheckInvariants() + return tf.wrapped.Read(b) +} + +func (tf *checkingTempFile) Seek(offset int64, whence int) (int64, error) { + tf.wrapped.CheckInvariants() + defer tf.wrapped.CheckInvariants() + return tf.wrapped.Seek(offset, whence) +} + +func (tf *checkingTempFile) ReadAt(b []byte, o int64) (int, error) { + tf.wrapped.CheckInvariants() + defer tf.wrapped.CheckInvariants() + return tf.wrapped.ReadAt(b, o) +} + +func (tf *checkingTempFile) WriteAt(b []byte, o int64) (int, error) { + tf.wrapped.CheckInvariants() + defer tf.wrapped.CheckInvariants() + return tf.wrapped.WriteAt(b, o) +} + +func (tf *checkingTempFile) Truncate(n int64) error { + tf.wrapped.CheckInvariants() + defer tf.wrapped.CheckInvariants() + return tf.wrapped.Truncate(n) +} + +func (tf *checkingTempFile) Destroy() { + tf.wrapped.CheckInvariants() + tf.wrapped.Destroy() +} + +//////////////////////////////////////////////////////////////////////// +// Boilerplate +//////////////////////////////////////////////////////////////////////// + +const initialContent = "tacoburrito" + +const initialContentSize = len(initialContent) + +type TempFileTest struct { + ctx context.Context + clock timeutil.SimulatedClock + + tf checkingTempFile +} + +func init() { RegisterTestSuite(&TempFileTest{}) } + +var _ SetUpInterface = &TempFileTest{} + +func (t *TempFileTest) SetUp(ti *TestInfo) { + var err error + t.ctx = ti.Ctx + + // Set up the clock. + t.clock.SetTime(time.Date(2012, 8, 15, 22, 56, 0, 0, time.Local)) + + // And the temp file. + t.tf.wrapped, err = gcsx.NewTempFile( + strings.NewReader(initialContent), + "", + &t.clock) + + AssertEq(nil, err) +} + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *TempFileTest) Stat() { + sr, err := t.tf.Stat() + + AssertEq(nil, err) + ExpectEq(initialContentSize, sr.Size) + ExpectEq(initialContentSize, sr.DirtyThreshold) + ExpectEq(nil, sr.Mtime) +} + +func (t *TempFileTest) ReadAt() { + // Call + var buf [2]byte + n, err := t.tf.ReadAt(buf[:], 1) + + ExpectEq(2, n) + ExpectEq(nil, err) + ExpectEq(initialContent[1:3], string(buf[:])) + + // Check Stat. + sr, err := t.tf.Stat() + + AssertEq(nil, err) + ExpectEq(initialContentSize, sr.Size) + ExpectEq(initialContentSize, sr.DirtyThreshold) + ExpectEq(nil, sr.Mtime) +} + +func (t *TempFileTest) WriteAt() { + // Call + p := []byte("fo") + n, err := t.tf.WriteAt(p, 1) + + ExpectEq(2, n) + ExpectEq(nil, err) + + // Check Stat. + sr, err := t.tf.Stat() + + AssertEq(nil, err) + ExpectEq(initialContentSize, sr.Size) + ExpectEq(1, sr.DirtyThreshold) + ExpectThat(sr.Mtime, Pointee(timeutil.TimeEq(t.clock.Now()))) + + // Read back. + expected := []byte(initialContent) + expected[1] = 'f' + expected[2] = 'o' + + actual, err := readAll(&t.tf) + AssertEq(nil, err) + ExpectEq(string(expected), string(actual)) +} + +func (t *TempFileTest) Truncate() { + // Call + err := t.tf.Truncate(2) + ExpectEq(nil, err) + + // Check Stat. + sr, err := t.tf.Stat() + + AssertEq(nil, err) + ExpectEq(2, sr.Size) + ExpectEq(2, sr.DirtyThreshold) + ExpectThat(sr.Mtime, Pointee(timeutil.TimeEq(t.clock.Now()))) + + // Read back. + expected := initialContent[0:2] + + actual, err := readAll(&t.tf) + AssertEq(nil, err) + ExpectEq(expected, string(actual)) +} diff --git a/lease/doc.go b/lease/doc.go deleted file mode 100644 index f38824a4fd..0000000000 --- a/lease/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package lease contains types to help manage disk usage of temporary files. -package lease diff --git a/lease/file_leaser.go b/lease/file_leaser.go deleted file mode 100644 index f4900dff50..0000000000 --- a/lease/file_leaser.go +++ /dev/null @@ -1,427 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "container/list" - "fmt" - "log" - "os" - - "github.com/jacobsa/fuse/fsutil" - "github.com/jacobsa/syncutil" -) - -// A type that manages read and read/write leases for anonymous temporary files. -// -// Safe for concurrent access. -type FileLeaser interface { - // Create a new anonymous file, and return a read/write lease for it. The - // read/write lease will pin resources until rwl.Downgrade is called. It need - // not be called if the process is exiting. - NewFile() (rwl ReadWriteLease, err error) - - // Revoke all read leases that have been issued. For testing use only. - RevokeReadLeases() -} - -// Create a new file leaser that uses the supplied directory for temporary -// files (before unlinking them) and attempts to keep usage in number of files -// and bytes below the given limits. If dir is empty, the system default will be -// used. -// -// Usage may exceed the given limits if there are read/write leases whose total -// size exceeds the limits, since such leases cannot be revoked. -func NewFileLeaser( - dir string, - limitNumFiles int, - limitBytes int64) (fl FileLeaser) { - typed := &fileLeaser{ - dir: dir, - limitNumFiles: limitNumFiles, - limitBytes: limitBytes, - readLeasesIndex: make(map[*readLease]*list.Element), - } - - typed.mu = syncutil.NewInvariantMutex(typed.checkInvariants) - - fl = typed - return -} - -type fileLeaser struct { - ///////////////////////// - // Constant data - ///////////////////////// - - dir string - limitNumFiles int - limitBytes int64 - - ///////////////////////// - // Mutable state - ///////////////////////// - - // A lock that guards the mutable state in this struct. Usually this is used - // only for light weight operations, but while evicting it may require - // waiting on a goroutine that is holding a read lease lock while reading - // from a file. - // - // Lock ordering - // ------------- - // - // Define < to be the minimum strict partial order satisfying: - // - // 1. For any read/write lease W, W < leaser. - // 2. For any read lease R, leaser < R. - // - // In other words: read/write before leaser before read, and never hold two - // locks from the same category together. - mu syncutil.InvariantMutex - - // The number of outstanding read/write leases. - // - // INVARIANT: readWriteCount >= 0 - readWriteCount int - - // The current estimated total size of outstanding read/write leases. This is - // only an estimate because we can't synchronize its update with a call to - // the wrapped file to e.g. write or truncate. - readWriteBytes int64 - - // All outstanding read leases, ordered by recency of use. - // - // INVARIANT: Each element is of type *readLease - // INVARIANT: No element has been revoked. - // INVARIANT: 0 <= readLeases.Len() <= max(0, limitNumFiles - readWriteCount) - readLeases list.List - - // The sum of all outstanding read lease sizes. - // - // INVARIANT: Equal to the sum over readLeases sizes. - // INVARIANT: 0 <= readOutstanding - // INVARIANT: readOutstanding <= max(0, limitBytes - readWriteBytes) - readOutstanding int64 - - // Index of read leases by pointer. - // - // INVARIANT: Is an index of exactly the elements of readLeases - readLeasesIndex map[*readLease]*list.Element -} - -// LOCKS_EXCLUDED(fl.mu) -func (fl *fileLeaser) NewFile() (rwl ReadWriteLease, err error) { - // Create an anonymous file. - f, err := fsutil.AnonymousFile(fl.dir) - if err != nil { - err = fmt.Errorf("AnonymousFile: %v", err) - return - } - - // Wrap a lease around it. - rwl = newReadWriteLease(fl, 0, f) - - // Update state. - fl.mu.Lock() - fl.readWriteCount++ - fl.evict(fl.limitNumFiles, fl.limitBytes) - fl.mu.Unlock() - - return -} - -// LOCKS_EXCLUDED(fl.mu) -func (fl *fileLeaser) RevokeReadLeases() { - fl.mu.Lock() - defer fl.mu.Unlock() - - fl.evict(0, 0) -} - -//////////////////////////////////////////////////////////////////////// -// Helpers -//////////////////////////////////////////////////////////////////////// - -func maxInt(a int, b int) int { - if a > b { - return a - } - - return b -} - -func maxInt64(a int64, b int64) int64 { - if a > b { - return a - } - - return b -} - -// LOCKS_REQUIRED(fl.mu) -func (fl *fileLeaser) checkInvariants() { - // INVARIANT: readWriteCount >= 0 - if fl.readWriteCount < 0 { - panic(fmt.Sprintf("Unexpected read/write count: %d", fl.readWriteCount)) - } - - // INVARIANT: Each element is of type *readLease - // INVARIANT: No element has been revoked. - for e := fl.readLeases.Front(); e != nil; e = e.Next() { - rl := e.Value.(*readLease) - func() { - rl.Mu.Lock() - defer rl.Mu.Unlock() - - if rl.revoked() { - panic("Found revoked read lease") - } - }() - } - - // INVARIANT: 0 <= readLeases.Len() <= max(0, limitNumFiles - readWriteCount) - if !(0 <= fl.readLeases.Len() && - fl.readLeases.Len() <= maxInt(0, fl.limitNumFiles-fl.readWriteCount)) { - panic(fmt.Sprintf( - "Out of range read lease count: %d, limitNumFiles: %d, readWriteCount: %d", - fl.readLeases.Len(), - fl.limitNumFiles, - fl.readWriteCount)) - } - - // INVARIANT: Equal to the sum over readLeases sizes. - var sum int64 - for e := fl.readLeases.Front(); e != nil; e = e.Next() { - rl := e.Value.(*readLease) - sum += rl.Size() - } - - if fl.readOutstanding != sum { - panic(fmt.Sprintf( - "readOutstanding mismatch: %v vs. %v", - fl.readOutstanding, - sum)) - } - - // INVARIANT: 0 <= readOutstanding - if !(0 <= fl.readOutstanding) { - panic(fmt.Sprintf("Unexpected readOutstanding: %v", fl.readOutstanding)) - } - - // INVARIANT: readOutstanding <= max(0, limitBytes - readWriteBytes) - if !(fl.readOutstanding <= maxInt64(0, fl.limitBytes-fl.readWriteBytes)) { - panic(fmt.Sprintf( - "Unexpected readOutstanding: %v. limitBytes: %v, readWriteBytes: %v", - fl.readOutstanding, - fl.limitBytes, - fl.readWriteBytes)) - } - - // INVARIANT: Is an index of exactly the elements of readLeases - if len(fl.readLeasesIndex) != fl.readLeases.Len() { - panic(fmt.Sprintf( - "readLeasesIndex length mismatch: %v vs. %v", - len(fl.readLeasesIndex), - fl.readLeases.Len())) - } - - for e := fl.readLeases.Front(); e != nil; e = e.Next() { - if fl.readLeasesIndex[e.Value.(*readLease)] != e { - panic("Mismatch in readLeasesIndex") - } - } -} - -// Add the supplied delta to the leaser's view of outstanding read/write lease -// bytes, then revoke read leases until we're under limitBytes or we run out of -// leases to revoke. -// -// Called by readWriteLease while holding its lock. -// -// LOCKS_EXCLUDED(fl.mu) -func (fl *fileLeaser) addReadWriteByteDelta(delta int64) { - fl.mu.Lock() - defer fl.mu.Unlock() - - fl.readWriteBytes += delta - fl.evict(fl.limitNumFiles, fl.limitBytes) -} - -// LOCKS_REQUIRED(fl.mu) -func (fl *fileLeaser) overLimit(limitNumFiles int, limitBytes int64) bool { - return fl.readLeases.Len()+fl.readWriteCount > limitNumFiles || - fl.readOutstanding+fl.readWriteBytes > limitBytes -} - -// Revoke read leases until we're within the given limitBytes or we run out of -// things to revoke. -// -// LOCKS_REQUIRED(fl.mu) -func (fl *fileLeaser) evict(limitNumFiles int, limitBytes int64) { - for fl.overLimit(limitNumFiles, limitBytes) { - // Do we have anything to revoke? - lru := fl.readLeases.Back() - if lru == nil { - return - } - - // Revoke it. - rl := lru.Value.(*readLease) - func() { - rl.Mu.Lock() - defer rl.Mu.Unlock() - - fl.revoke(rl) - }() - } -} - -// Note that a read/write lease of the given size is destroying itself, and -// turn it into a read lease of the supplied size wrapped around the given -// file. -// -// Called by readWriteLease with its lock held. -// -// LOCKS_EXCLUDED(fl.mu) -func (fl *fileLeaser) downgrade( - size int64, - file *os.File) (rl ReadLease) { - // Create the read lease. - rlTyped := newReadLease(size, fl, file) - rl = rlTyped - - // Update the leaser's state, noting the new read lease and that the - // read/write lease has gone away. - fl.mu.Lock() - defer fl.mu.Unlock() - - fl.readWriteCount-- - fl.readWriteBytes -= size - fl.readOutstanding += size - - e := fl.readLeases.PushFront(rl) - fl.readLeasesIndex[rlTyped] = e - - // Ensure that we're not now over capacity. - fl.evict(fl.limitNumFiles, fl.limitBytes) - - return -} - -// Upgrade the supplied read lease. -// -// Called by readLease with no lock held. -// -// LOCKS_EXCLUDED(fl.mu, rl.Mu) -func (fl *fileLeaser) upgrade(rl *readLease) (rwl ReadWriteLease, err error) { - // Grab each lock in turn. - fl.mu.Lock() - defer fl.mu.Unlock() - - rl.Mu.Lock() - defer rl.Mu.Unlock() - - // Has the lease already been revoked? - if rl.revoked() { - err = &RevokedError{} - return - } - - size := rl.Size() - - // Update leaser state. - fl.readWriteCount++ - fl.readWriteBytes += size - fl.readOutstanding -= size - - e := fl.readLeasesIndex[rl] - delete(fl.readLeasesIndex, rl) - fl.readLeases.Remove(e) - - // Extract the interesting information from the read lease, leaving it an - // empty husk. - file := rl.release() - - // Create the read/write lease, telling it that we already know its initial - // size. - rwl = newReadWriteLease(fl, size, file) - - return -} - -// Promote the given read lease to most recently used, if we still know it. -// Because our lock order forbids us from acquiring the leaser lock while -// holding a read lease lock, this of course races with other promotions. -// -// Called by readLease without holding a lock. -// -// LOCKS_EXCLUDED(fl.mu) -func (fl *fileLeaser) promoteToMostRecent(rl *readLease) { - fl.mu.Lock() - defer fl.mu.Unlock() - - e := fl.readLeasesIndex[rl] - if e != nil { - fl.readLeases.MoveToFront(e) - } -} - -// Forcibly revoke the supplied read lease. -// -// REQUIRES: !rl.revoked() -// -// LOCKS_REQUIRED(fl.mu) -// LOCKS_REQUIRED(rl.Mu) -func (fl *fileLeaser) revoke(rl *readLease) { - if rl.revoked() { - panic("Already revoked") - } - - size := rl.Size() - - // Update leaser state. - fl.readOutstanding -= size - - e := fl.readLeasesIndex[rl] - delete(fl.readLeasesIndex, rl) - fl.readLeases.Remove(e) - - // Kill the lease and close its file. - file := rl.release() - if err := file.Close(); err != nil { - log.Println("Error closing file for revoked lease:", err) - } -} - -// Called by the read lease when the user wants to manually revoke it. -// -// LOCKS_EXCLUDED(fl.mu) -// LOCKS_EXCLUDED(rl.Mu) -func (fl *fileLeaser) revokeVoluntarily(rl *readLease) { - // Grab each lock in turn. - fl.mu.Lock() - defer fl.mu.Unlock() - - rl.Mu.Lock() - defer rl.Mu.Unlock() - - // Has the lease already been revoked? - if rl.revoked() { - return - } - - // Revoke it. - fl.revoke(rl) -} diff --git a/lease/file_leaser_test.go b/lease/file_leaser_test.go deleted file mode 100644 index efa312a5f7..0000000000 --- a/lease/file_leaser_test.go +++ /dev/null @@ -1,591 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease_test - -import ( - "bytes" - "fmt" - "io" - "testing" - - "github.com/googlecloudplatform/gcsfuse/lease" - . "github.com/jacobsa/oglematchers" - . "github.com/jacobsa/ogletest" -) - -func TestFileLeaser(t *testing.T) { RunTests(t) } - -//////////////////////////////////////////////////////////////////////// -// Helpers -//////////////////////////////////////////////////////////////////////// - -func panicIf(err *error) { - if *err != nil { - panic(*err) - } -} - -// Create a read/write lease and fill it in with data of the specified length. -// Panic on failure. -func newFileOfLength( - fl lease.FileLeaser, - length int) (rwl lease.ReadWriteLease) { - var err error - defer panicIf(&err) - - // Create the lease. - rwl, err = fl.NewFile() - if err != nil { - err = fmt.Errorf("NewFile: %v", err) - return - } - - defer func() { - if err != nil { - rwl.Downgrade().Revoke() - rwl = nil - } - }() - - // Write the contents. - _, err = rwl.Write(bytes.Repeat([]byte("a"), length)) - if err != nil { - err = fmt.Errorf("Write: %v", err) - return - } - - return -} - -// Upgrade the supplied lease or panic. -func upgrade(rl lease.ReadLease) (rwl lease.ReadWriteLease) { - var err error - defer panicIf(&err) - - // Attempt to upgrade. - rwl, err = rl.Upgrade() - - return -} - -func growBy(w io.WriteSeeker, n int) { - var err error - defer panicIf(&err) - - // Seek to the end. - _, err = w.Seek(0, 2) - if err != nil { - err = fmt.Errorf("Seek: %v", err) - return - } - - // Write. - _, err = w.Write(bytes.Repeat([]byte("a"), n)) - if err != nil { - err = fmt.Errorf("Write: %v", err) - return - } - - return -} - -//////////////////////////////////////////////////////////////////////// -// Boilerplate -//////////////////////////////////////////////////////////////////////// - -const limitNumFiles = 5 -const limitBytes = 17 - -type FileLeaserTest struct { - fl lease.FileLeaser -} - -var _ SetUpInterface = &FileLeaserTest{} - -func init() { RegisterTestSuite(&FileLeaserTest{}) } - -func (t *FileLeaserTest) SetUp(ti *TestInfo) { - t.fl = lease.NewFileLeaser("", limitNumFiles, limitBytes) -} - -//////////////////////////////////////////////////////////////////////// -// Tests -//////////////////////////////////////////////////////////////////////// - -func (t *FileLeaserTest) ReadWriteLeaseInitialState() { - var n int - var off int64 - var err error - buf := make([]byte, 1024) - - // Create - rwl, err := t.fl.NewFile() - AssertEq(nil, err) - defer func() { rwl.Downgrade().Revoke() }() - - // Size - size, err := rwl.Size() - AssertEq(nil, err) - ExpectEq(0, size) - - // Seek - off, err = rwl.Seek(0, 2) - AssertEq(nil, err) - ExpectEq(0, off) - - // Read - n, err = rwl.Read(buf) - ExpectEq(io.EOF, err) - ExpectEq(0, n) - - // ReadAt - n, err = rwl.ReadAt(buf, 0) - ExpectEq(io.EOF, err) - ExpectEq(0, n) -} - -func (t *FileLeaserTest) ModifyThenObserveReadWriteLease() { - var n int - var off int64 - var size int64 - var err error - buf := make([]byte, 1024) - - // Create - rwl, err := t.fl.NewFile() - AssertEq(nil, err) - defer func() { rwl.Downgrade().Revoke() }() - - // Write, then check size and offset. - n, err = rwl.Write([]byte("tacoburrito")) - AssertEq(nil, err) - ExpectEq(len("tacoburrito"), n) - - size, err = rwl.Size() - AssertEq(nil, err) - ExpectEq(len("tacoburrito"), size) - - off, err = rwl.Seek(0, 1) - AssertEq(nil, err) - ExpectEq(len("tacoburrito"), off) - - // Pwrite, then check size. - n, err = rwl.WriteAt([]byte("enchilada"), 4) - AssertEq(nil, err) - ExpectEq(len("enchilada"), n) - - size, err = rwl.Size() - AssertEq(nil, err) - ExpectEq(len("tacoenchilada"), size) - - // Truncate downward, then check size. - err = rwl.Truncate(4) - AssertEq(nil, err) - - size, err = rwl.Size() - AssertEq(nil, err) - ExpectEq(len("taco"), size) - - // Seek, then read everything. - off, err = rwl.Seek(0, 0) - AssertEq(nil, err) - ExpectEq(0, off) - - n, err = rwl.Read(buf) - ExpectThat(err, AnyOf(nil, io.EOF)) - ExpectEq("taco", string(buf[0:n])) -} - -func (t *FileLeaserTest) DowngradeThenObserve() { - var n int - var off int64 - var size int64 - var err error - buf := make([]byte, 1024) - - // Create and write some data. - rwl, err := t.fl.NewFile() - AssertEq(nil, err) - - n, err = rwl.Write([]byte("taco")) - AssertEq(nil, err) - - // Downgrade. - rl := rwl.Downgrade() - rwl = nil - - // Observing via the read lease should work fine. - size = rl.Size() - ExpectEq(len("taco"), size) - - off, err = rl.Seek(-4, 2) - AssertEq(nil, err) - ExpectEq(0, off) - - n, err = rl.Read(buf) - ExpectThat(err, AnyOf(nil, io.EOF)) - ExpectEq("taco", string(buf[0:n])) - - n, err = rl.ReadAt(buf[0:2], 1) - AssertEq(nil, err) - ExpectEq("ac", string(buf[0:2])) -} - -func (t *FileLeaserTest) DowngradeThenUpgradeThenObserve() { - var n int - var off int64 - var size int64 - var err error - buf := make([]byte, 1024) - - // Create and write some data. - rwl, err := t.fl.NewFile() - AssertEq(nil, err) - - n, err = rwl.Write([]byte("taco")) - AssertEq(nil, err) - - // Downgrade. - rl := rwl.Downgrade() - rwl = nil - - // Upgrade again. - rwl, err = rl.Upgrade() - AssertEq(nil, err) - defer func() { rwl.Downgrade().Revoke() }() - - // Interacting with the read lease should no longer work. - _, err = rl.Read(buf) - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - - _, err = rl.Seek(0, 0) - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - - _, err = rl.ReadAt(buf, 0) - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - - tmp, err := rl.Upgrade() - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - ExpectEq(nil, tmp) - - // Calling Revoke should cause nothing nasty to happen. - rl.Revoke() - - // Observing via the new read/write lease should work fine. - size, err = rwl.Size() - AssertEq(nil, err) - ExpectEq(len("taco"), size) - - off, err = rwl.Seek(-4, 2) - AssertEq(nil, err) - ExpectEq(0, off) - - n, err = rwl.Read(buf) - ExpectThat(err, AnyOf(nil, io.EOF)) - ExpectEq("taco", string(buf[0:n])) - - n, err = rwl.ReadAt(buf[0:2], 1) - AssertEq(nil, err) - ExpectEq("ac", string(buf[0:2])) -} - -func (t *FileLeaserTest) DowngradeFileWhoseSizeIsAboveLimit() { - var err error - buf := make([]byte, 1024) - - // Create and write data larger than the capacity. - rwl, err := t.fl.NewFile() - AssertEq(nil, err) - - _, err = rwl.Write(bytes.Repeat([]byte("a"), limitBytes+1)) - AssertEq(nil, err) - - // Downgrade. - rl := rwl.Downgrade() - rwl = nil - - // The read lease should be revoked on arrival. - _, err = rl.Read(buf) - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - - _, err = rl.Seek(0, 0) - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - - _, err = rl.ReadAt(buf, 0) - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - - tmp, err := rl.Upgrade() - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - ExpectEq(nil, tmp) -} - -func (t *FileLeaserTest) NewFileCausesEviction() { - // Set up limitNumFiles read leases. - var rls []lease.ReadLease - for i := 0; i < limitNumFiles; i++ { - rls = append(rls, newFileOfLength(t.fl, 0).Downgrade()) - } - - // All should still be good. - for _, rl := range rls { - AssertFalse(rl.Revoked()) - } - - // Creating two more write leases should cause two to be revoked. - rwl0, err := t.fl.NewFile() - AssertEq(nil, err) - defer func() { rwl0.Downgrade().Revoke() }() - - rwl1, err := t.fl.NewFile() - AssertEq(nil, err) - defer func() { rwl1.Downgrade().Revoke() }() - - revoked := 0 - for _, rl := range rls { - if rl.Revoked() { - revoked++ - } - } - - ExpectEq(2, revoked) -} - -func (t *FileLeaserTest) WriteCausesEviction() { - var err error - - // Set up a read lease whose size is right at the limit. - rl := newFileOfLength(t.fl, limitBytes).Downgrade() - AssertFalse(rl.Revoked()) - - // Set up a new read/write lease. The read lease should still be unrevoked. - rwl, err := t.fl.NewFile() - AssertEq(nil, err) - defer func() { rwl.Downgrade().Revoke() }() - - AssertFalse(rl.Revoked()) - - // Writing zero bytes shouldn't cause trouble. - _, err = rwl.Write([]byte("")) - AssertEq(nil, err) - - AssertFalse(rl.Revoked()) - - // But the next byte should. - _, err = rwl.Write([]byte("a")) - AssertEq(nil, err) - - ExpectTrue(rl.Revoked()) -} - -func (t *FileLeaserTest) WriteAtCausesEviction() { - var err error - AssertLt(3, limitBytes) - - // Set up a read lease whose size is three bytes below the limit. - rl := newFileOfLength(t.fl, limitBytes-3).Downgrade() - AssertFalse(rl.Revoked()) - - // Set up a new read/write lease. The read lease should still be unrevoked. - rwl, err := t.fl.NewFile() - AssertEq(nil, err) - defer func() { rwl.Downgrade().Revoke() }() - - AssertFalse(rl.Revoked()) - - // Write in three bytes. Everything should be fine. - _, err = rwl.Write([]byte("foo")) - AssertEq(nil, err) - - // Overwriting a byte shouldn't cause trouble. - _, err = rwl.WriteAt([]byte("p"), 0) - AssertEq(nil, err) - - AssertFalse(rl.Revoked()) - - // But extending the file by one byte should. - _, err = rwl.WriteAt([]byte("taco"), 0) - AssertEq(nil, err) - - ExpectTrue(rl.Revoked()) -} - -func (t *FileLeaserTest) TruncateCausesEviction() { - var err error - AssertLt(3, limitBytes) - - // Set up a read lease whose size is three bytes below the limit. - rl := newFileOfLength(t.fl, limitBytes-3).Downgrade() - AssertFalse(rl.Revoked()) - - // Set up a new read/write lease. The read lease should still be unrevoked. - rwl, err := t.fl.NewFile() - AssertEq(nil, err) - defer func() { rwl.Downgrade().Revoke() }() - - AssertFalse(rl.Revoked()) - - // Truncate up to the limit. Nothing should happen. - err = rwl.Truncate(3) - AssertEq(nil, err) - - AssertFalse(rl.Revoked()) - - // Truncate downward. Again, nothing should happen. - err = rwl.Truncate(2) - AssertEq(nil, err) - - AssertFalse(rl.Revoked()) - - // But extending to four bytes should cause revocation. - err = rwl.Truncate(4) - AssertEq(nil, err) - - ExpectTrue(rl.Revoked()) -} - -func (t *FileLeaserTest) EvictionIsLRU() { - AssertLt(4, limitBytes) - - // Arrange for four read leases, with a known order of recency of usage. Make - // each the most recent in turn using different methods that we expect to - // promote to most recent. - rl0 := newFileOfLength(t.fl, 1).Downgrade() - rl2 := newFileOfLength(t.fl, 1).Downgrade() - rl3 := newFileOfLength(t.fl, 1).Downgrade() - - rl0.Read([]byte{}) // Least recent - rl1 := newFileOfLength(t.fl, 1).Downgrade() // Second least recent - rl2.Read([]byte{}) // Third least recent - rl3.ReadAt([]byte{}, 0) // Fourth least recent - - // Fill up the remaining space. All read leases should still be valid. - rwl := newFileOfLength(t.fl, limitBytes-4) - - AssertFalse(rl0.Revoked()) - AssertFalse(rl1.Revoked()) - AssertFalse(rl2.Revoked()) - AssertFalse(rl3.Revoked()) - - // Use up one more byte. The least recently used lease should be revoked. - growBy(rwl, 1) - - AssertTrue(rl0.Revoked()) - AssertFalse(rl1.Revoked()) - AssertFalse(rl2.Revoked()) - AssertFalse(rl3.Revoked()) - - // Two more bytes. Now the next two should go. - growBy(rwl, 2) - - AssertTrue(rl0.Revoked()) - AssertTrue(rl1.Revoked()) - AssertTrue(rl2.Revoked()) - AssertFalse(rl3.Revoked()) - - // Downgrading and upgrading the read/write lease should change nothing. - rwl = upgrade(rwl.Downgrade()) - AssertNe(nil, rwl) - defer func() { rwl.Downgrade().Revoke() }() - - AssertTrue(rl0.Revoked()) - AssertTrue(rl1.Revoked()) - AssertTrue(rl2.Revoked()) - AssertFalse(rl3.Revoked()) - - // But writing one more byte should boot the last one. - growBy(rwl, 1) - - AssertTrue(rl0.Revoked()) - AssertTrue(rl1.Revoked()) - AssertTrue(rl2.Revoked()) - AssertTrue(rl3.Revoked()) -} - -func (t *FileLeaserTest) RevokeVoluntarily() { - var err error - buf := make([]byte, 1024) - - AssertLt(3, limitBytes) - - // Set up two read leases, together occupying all space, and an empty - // read/write lease. - rl0 := newFileOfLength(t.fl, 3).Downgrade() - rl1 := newFileOfLength(t.fl, limitBytes-3).Downgrade() - rwl := newFileOfLength(t.fl, 0) - defer func() { rwl.Downgrade().Revoke() }() - - AssertFalse(rl0.Revoked()) - AssertFalse(rl1.Revoked()) - - // Voluntarily revoke the first. Nothing should work anymore. - rl0.Revoke() - AssertTrue(rl0.Revoked()) - - _, err = rl0.Read(buf) - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - - _, err = rl0.Seek(0, 0) - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - - _, err = rl0.ReadAt(buf, 0) - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - - // Calling Revoke more times should be harmless. - rl0.Revoke() - rl0.Revoke() - rl0.Revoke() - - // The other lease should be fine. - AssertFalse(rl1.Revoked()) - - // The revocation should have freed up credit that can be used by the - // read/write lease without booting the other read lease. - growBy(rwl, 3) - ExpectFalse(rl1.Revoked()) - - // But one more byte should evict it, as usual. - growBy(rwl, 1) - ExpectTrue(rl1.Revoked()) -} - -func (t *FileLeaserTest) RevokeAllReadLeases() { - var err error - buf := make([]byte, 1024) - - AssertLt(3, limitBytes) - - // Set up two read leases, together occupying all space. - rl0 := newFileOfLength(t.fl, 3).Downgrade() - rl1 := newFileOfLength(t.fl, limitBytes-3).Downgrade() - - AssertFalse(rl0.Revoked()) - AssertFalse(rl1.Revoked()) - - // Revoke all read leases. None of them should work anymore. - t.fl.RevokeReadLeases() - - AssertTrue(rl0.Revoked()) - AssertTrue(rl1.Revoked()) - - _, err = rl0.Read(buf) - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - - _, err = rl1.Read(buf) - ExpectThat(err, HasSameTypeAs(&lease.RevokedError{})) - - // Calling Revoke more times should be harmless. - rl0.Revoke() - rl1.Revoke() -} diff --git a/lease/mock_lease/mock_file_leaser.go b/lease/mock_lease/mock_file_leaser.go deleted file mode 100644 index f5d7a7958e..0000000000 --- a/lease/mock_lease/mock_file_leaser.go +++ /dev/null @@ -1,90 +0,0 @@ -// This file was auto-generated using createmock. See the following page for -// more information: -// -// https://github.com/jacobsa/oglemock -// - -package mock_lease - -import ( - fmt "fmt" - lease "github.com/googlecloudplatform/gcsfuse/lease" - oglemock "github.com/jacobsa/oglemock" - runtime "runtime" - unsafe "unsafe" -) - -type MockFileLeaser interface { - lease.FileLeaser - oglemock.MockObject -} - -type mockFileLeaser struct { - controller oglemock.Controller - description string -} - -func NewMockFileLeaser( - c oglemock.Controller, - desc string) MockFileLeaser { - return &mockFileLeaser{ - controller: c, - description: desc, - } -} - -func (m *mockFileLeaser) Oglemock_Id() uintptr { - return uintptr(unsafe.Pointer(m)) -} - -func (m *mockFileLeaser) Oglemock_Description() string { - return m.description -} - -func (m *mockFileLeaser) NewFile() (o0 lease.ReadWriteLease, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "NewFile", - file, - line, - []interface{}{}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockFileLeaser.NewFile: invalid return values: %v", retVals)) - } - - // o0 lease.ReadWriteLease - if retVals[0] != nil { - o0 = retVals[0].(lease.ReadWriteLease) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} - -func (m *mockFileLeaser) RevokeReadLeases() { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "RevokeReadLeases", - file, - line, - []interface{}{}) - - if len(retVals) != 0 { - panic(fmt.Sprintf("mockFileLeaser.RevokeReadLeases: invalid return values: %v", retVals)) - } - - return -} diff --git a/lease/mock_lease/mock_read_lease.go b/lease/mock_lease/mock_read_lease.go deleted file mode 100644 index 5d3645f861..0000000000 --- a/lease/mock_lease/mock_read_lease.go +++ /dev/null @@ -1,225 +0,0 @@ -// This file was auto-generated using createmock. See the following page for -// more information: -// -// https://github.com/jacobsa/oglemock -// - -package mock_lease - -import ( - fmt "fmt" - lease "github.com/googlecloudplatform/gcsfuse/lease" - oglemock "github.com/jacobsa/oglemock" - runtime "runtime" - unsafe "unsafe" -) - -type MockReadLease interface { - lease.ReadLease - oglemock.MockObject -} - -type mockReadLease struct { - controller oglemock.Controller - description string -} - -func NewMockReadLease( - c oglemock.Controller, - desc string) MockReadLease { - return &mockReadLease{ - controller: c, - description: desc, - } -} - -func (m *mockReadLease) Oglemock_Id() uintptr { - return uintptr(unsafe.Pointer(m)) -} - -func (m *mockReadLease) Oglemock_Description() string { - return m.description -} - -func (m *mockReadLease) Read(p0 []uint8) (o0 int, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Read", - file, - line, - []interface{}{p0}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockReadLease.Read: invalid return values: %v", retVals)) - } - - // o0 int - if retVals[0] != nil { - o0 = retVals[0].(int) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} - -func (m *mockReadLease) ReadAt(p0 []uint8, p1 int64) (o0 int, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "ReadAt", - file, - line, - []interface{}{p0, p1}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockReadLease.ReadAt: invalid return values: %v", retVals)) - } - - // o0 int - if retVals[0] != nil { - o0 = retVals[0].(int) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} - -func (m *mockReadLease) Revoke() { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Revoke", - file, - line, - []interface{}{}) - - if len(retVals) != 0 { - panic(fmt.Sprintf("mockReadLease.Revoke: invalid return values: %v", retVals)) - } - - return -} - -func (m *mockReadLease) Revoked() (o0 bool) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Revoked", - file, - line, - []interface{}{}) - - if len(retVals) != 1 { - panic(fmt.Sprintf("mockReadLease.Revoked: invalid return values: %v", retVals)) - } - - // o0 bool - if retVals[0] != nil { - o0 = retVals[0].(bool) - } - - return -} - -func (m *mockReadLease) Seek(p0 int64, p1 int) (o0 int64, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Seek", - file, - line, - []interface{}{p0, p1}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockReadLease.Seek: invalid return values: %v", retVals)) - } - - // o0 int64 - if retVals[0] != nil { - o0 = retVals[0].(int64) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} - -func (m *mockReadLease) Size() (o0 int64) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Size", - file, - line, - []interface{}{}) - - if len(retVals) != 1 { - panic(fmt.Sprintf("mockReadLease.Size: invalid return values: %v", retVals)) - } - - // o0 int64 - if retVals[0] != nil { - o0 = retVals[0].(int64) - } - - return -} - -func (m *mockReadLease) Upgrade() (o0 lease.ReadWriteLease, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Upgrade", - file, - line, - []interface{}{}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockReadLease.Upgrade: invalid return values: %v", retVals)) - } - - // o0 lease.ReadWriteLease - if retVals[0] != nil { - o0 = retVals[0].(lease.ReadWriteLease) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} diff --git a/lease/mock_lease/mock_read_proxy.go b/lease/mock_lease/mock_read_proxy.go deleted file mode 100644 index 85d86892dc..0000000000 --- a/lease/mock_lease/mock_read_proxy.go +++ /dev/null @@ -1,163 +0,0 @@ -// This file was auto-generated using createmock. See the following page for -// more information: -// -// https://github.com/jacobsa/oglemock -// - -package mock_lease - -import ( - fmt "fmt" - lease "github.com/googlecloudplatform/gcsfuse/lease" - oglemock "github.com/jacobsa/oglemock" - context "golang.org/x/net/context" - runtime "runtime" - unsafe "unsafe" -) - -type MockReadProxy interface { - lease.ReadProxy - oglemock.MockObject -} - -type mockReadProxy struct { - controller oglemock.Controller - description string -} - -func NewMockReadProxy( - c oglemock.Controller, - desc string) MockReadProxy { - return &mockReadProxy{ - controller: c, - description: desc, - } -} - -func (m *mockReadProxy) Oglemock_Id() uintptr { - return uintptr(unsafe.Pointer(m)) -} - -func (m *mockReadProxy) Oglemock_Description() string { - return m.description -} - -func (m *mockReadProxy) CheckInvariants() { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "CheckInvariants", - file, - line, - []interface{}{}) - - if len(retVals) != 0 { - panic(fmt.Sprintf("mockReadProxy.CheckInvariants: invalid return values: %v", retVals)) - } - - return -} - -func (m *mockReadProxy) Destroy() { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Destroy", - file, - line, - []interface{}{}) - - if len(retVals) != 0 { - panic(fmt.Sprintf("mockReadProxy.Destroy: invalid return values: %v", retVals)) - } - - return -} - -func (m *mockReadProxy) ReadAt(p0 context.Context, p1 []uint8, p2 int64) (o0 int, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "ReadAt", - file, - line, - []interface{}{p0, p1, p2}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockReadProxy.ReadAt: invalid return values: %v", retVals)) - } - - // o0 int - if retVals[0] != nil { - o0 = retVals[0].(int) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} - -func (m *mockReadProxy) Size() (o0 int64) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Size", - file, - line, - []interface{}{}) - - if len(retVals) != 1 { - panic(fmt.Sprintf("mockReadProxy.Size: invalid return values: %v", retVals)) - } - - // o0 int64 - if retVals[0] != nil { - o0 = retVals[0].(int64) - } - - return -} - -func (m *mockReadProxy) Upgrade(p0 context.Context) (o0 lease.ReadWriteLease, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Upgrade", - file, - line, - []interface{}{p0}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockReadProxy.Upgrade: invalid return values: %v", retVals)) - } - - // o0 lease.ReadWriteLease - if retVals[0] != nil { - o0 = retVals[0].(lease.ReadWriteLease) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} diff --git a/lease/mock_lease/mock_read_write_lease.go b/lease/mock_lease/mock_read_write_lease.go deleted file mode 100644 index 1effff4ed9..0000000000 --- a/lease/mock_lease/mock_read_write_lease.go +++ /dev/null @@ -1,264 +0,0 @@ -// This file was auto-generated using createmock. See the following page for -// more information: -// -// https://github.com/jacobsa/oglemock -// - -package mock_lease - -import ( - fmt "fmt" - lease "github.com/googlecloudplatform/gcsfuse/lease" - oglemock "github.com/jacobsa/oglemock" - runtime "runtime" - unsafe "unsafe" -) - -type MockReadWriteLease interface { - lease.ReadWriteLease - oglemock.MockObject -} - -type mockReadWriteLease struct { - controller oglemock.Controller - description string -} - -func NewMockReadWriteLease( - c oglemock.Controller, - desc string) MockReadWriteLease { - return &mockReadWriteLease{ - controller: c, - description: desc, - } -} - -func (m *mockReadWriteLease) Oglemock_Id() uintptr { - return uintptr(unsafe.Pointer(m)) -} - -func (m *mockReadWriteLease) Oglemock_Description() string { - return m.description -} - -func (m *mockReadWriteLease) Downgrade() (o0 lease.ReadLease) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Downgrade", - file, - line, - []interface{}{}) - - if len(retVals) != 1 { - panic(fmt.Sprintf("mockReadWriteLease.Downgrade: invalid return values: %v", retVals)) - } - - // o0 lease.ReadLease - if retVals[0] != nil { - o0 = retVals[0].(lease.ReadLease) - } - - return -} - -func (m *mockReadWriteLease) Read(p0 []uint8) (o0 int, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Read", - file, - line, - []interface{}{p0}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockReadWriteLease.Read: invalid return values: %v", retVals)) - } - - // o0 int - if retVals[0] != nil { - o0 = retVals[0].(int) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} - -func (m *mockReadWriteLease) ReadAt(p0 []uint8, p1 int64) (o0 int, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "ReadAt", - file, - line, - []interface{}{p0, p1}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockReadWriteLease.ReadAt: invalid return values: %v", retVals)) - } - - // o0 int - if retVals[0] != nil { - o0 = retVals[0].(int) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} - -func (m *mockReadWriteLease) Seek(p0 int64, p1 int) (o0 int64, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Seek", - file, - line, - []interface{}{p0, p1}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockReadWriteLease.Seek: invalid return values: %v", retVals)) - } - - // o0 int64 - if retVals[0] != nil { - o0 = retVals[0].(int64) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} - -func (m *mockReadWriteLease) Size() (o0 int64, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Size", - file, - line, - []interface{}{}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockReadWriteLease.Size: invalid return values: %v", retVals)) - } - - // o0 int64 - if retVals[0] != nil { - o0 = retVals[0].(int64) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} - -func (m *mockReadWriteLease) Truncate(p0 int64) (o0 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Truncate", - file, - line, - []interface{}{p0}) - - if len(retVals) != 1 { - panic(fmt.Sprintf("mockReadWriteLease.Truncate: invalid return values: %v", retVals)) - } - - // o0 error - if retVals[0] != nil { - o0 = retVals[0].(error) - } - - return -} - -func (m *mockReadWriteLease) Write(p0 []uint8) (o0 int, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Write", - file, - line, - []interface{}{p0}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockReadWriteLease.Write: invalid return values: %v", retVals)) - } - - // o0 int - if retVals[0] != nil { - o0 = retVals[0].(int) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} - -func (m *mockReadWriteLease) WriteAt(p0 []uint8, p1 int64) (o0 int, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "WriteAt", - file, - line, - []interface{}{p0, p1}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockReadWriteLease.WriteAt: invalid return values: %v", retVals)) - } - - // o0 int - if retVals[0] != nil { - o0 = retVals[0].(int) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} diff --git a/lease/multi_read_proxy.go b/lease/multi_read_proxy.go deleted file mode 100644 index 2191e87767..0000000000 --- a/lease/multi_read_proxy.go +++ /dev/null @@ -1,438 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "fmt" - "io" - "sort" - - "golang.org/x/net/context" -) - -// Create a read proxy consisting of the contents defined by the supplied -// refreshers concatenated. See NewReadProxy for more. -// -// If rl is non-nil, it will be used as the first temporary copy of the -// contents, and must match the concatenation of the content returned by the -// refreshers. -func NewMultiReadProxy( - fl FileLeaser, - refreshers []Refresher, - rl ReadLease) (rp ReadProxy) { - // Create one wrapped read proxy per refresher. - var wrappedProxies []readProxyAndOffset - var size int64 - - for _, r := range refreshers { - wrapped := NewReadProxy(fl, r, nil) - wrappedProxies = append(wrappedProxies, readProxyAndOffset{size, wrapped}) - size += wrapped.Size() - } - - // Check that the lease the user gave us, if any, is consistent. - if rl != nil && rl.Size() != size { - panic(fmt.Sprintf( - "Provided read lease of size %d bytes doesn't match combined size "+ - "%d bytes for %d refreshers", - rl.Size(), - size, - len(refreshers))) - } - - // Create the multi-read proxy. - rp = &multiReadProxy{ - size: size, - leaser: fl, - rps: wrappedProxies, - lease: rl, - } - - return -} - -//////////////////////////////////////////////////////////////////////// -// Implementation -//////////////////////////////////////////////////////////////////////// - -type multiReadProxy struct { - ///////////////////////// - // Constant data - ///////////////////////// - - // The size of the proxied content. - size int64 - - ///////////////////////// - // Dependencies - ///////////////////////// - - leaser FileLeaser - - // The wrapped read proxies, indexed by their logical starting offset. - // - // INVARIANT: If len(rps) != 0, rps[0].off == 0 - // INVARIANT: For each x, x.rp.Size() >= 0 - // INVARIANT: For each i>0, rps[i].off == rps[i-i].off + rps[i-i].rp.Size() - // INVARIANT: size is the sum over the wrapped proxy sizes. - rps []readProxyAndOffset - - ///////////////////////// - // Mutable state - ///////////////////////// - - // A read lease for the entire contents. May be nil. - // - // INVARIANT: If lease != nil, size == lease.Size() - lease ReadLease - - destroyed bool -} - -func (mrp *multiReadProxy) Size() (size int64) { - size = mrp.size - return -} - -func (mrp *multiReadProxy) ReadAt( - ctx context.Context, - p []byte, - off int64) (n int, err error) { - // Special case: can we read directly from our initial read lease? - if mrp.lease != nil { - n, err = mrp.lease.ReadAt(p, off) - - // Successful? - if err == nil { - return - } - - // Revoked? - if _, ok := err.(*RevokedError); ok { - mrp.lease = nil - err = nil - } else { - // Propagate other errors - return - } - } - - // Special case: we don't support negative offsets, silly user. - if off < 0 { - err = fmt.Errorf("Invalid offset: %v", off) - return - } - - // Special case: offsets at or beyond the end of our content can never yield - // any content, and the io.ReaderAt spec allows us to return EOF. Knock them - // out here so we know off is in range when we start below. - if off >= mrp.Size() { - err = io.EOF - return - } - - // The read proxy that contains off is the *last* read proxy whose start - // offset is less than or equal to off. Find the first that is greater and - // move back one. - // - // Because we handled the special cases above, this must be in range. - wrappedIndex := mrp.upperBound(off) - 1 - - if wrappedIndex < 0 || wrappedIndex >= len(mrp.rps) { - panic(fmt.Sprintf("Unexpected index: %v", wrappedIndex)) - } - - // Keep going until we've got nothing left to do. - for len(p) > 0 { - // Have we run out of wrapped read proxies? - if wrappedIndex == len(mrp.rps) { - err = io.EOF - return - } - - // Read from the wrapped proxy, accumulating into our total before checking - // for a read error. - wrappedN, wrappedErr := mrp.readFromOne(ctx, wrappedIndex, p, off) - n += wrappedN - if wrappedErr != nil { - err = wrappedErr - return - } - - // readFromOne guarantees to either fill our buffer or exhaust the wrapped - // proxy. So advance the buffer, the offset, and the wrapped proxy index - // and go again. - p = p[wrappedN:] - off += int64(wrappedN) - wrappedIndex++ - } - - return -} - -func (mrp *multiReadProxy) Upgrade( - ctx context.Context) (rwl ReadWriteLease, err error) { - // This function is destructive; the user is not allowed to call us again. - mrp.destroyed = true - - // Special case: can we upgrade directly from our initial read lease? - if mrp.lease != nil { - rwl, err = mrp.lease.Upgrade() - - // Successful? - if err == nil { - return - } - - // Revoked? - if _, ok := err.(*RevokedError); ok { - mrp.lease = nil - err = nil - } else { - // Propagate other errors - return - } - } - - // Create a new read/write lease to return to the user. Ensure that it is - // destroyed if we return in error. - rwl, err = mrp.leaser.NewFile() - if err != nil { - err = fmt.Errorf("NewFile: %v", err) - return - } - - defer func() { - if err != nil { - rwl.Downgrade().Revoke() - } - }() - - // Accumulate each wrapped read proxy in turn. - for i, entry := range mrp.rps { - err = mrp.upgradeOne(ctx, rwl, entry.rp) - if err != nil { - err = fmt.Errorf("upgradeOne(%d): %v", i, err) - return - } - } - - return -} - -func (mrp *multiReadProxy) Destroy() { - // Destroy all of the wrapped proxies. - for _, entry := range mrp.rps { - entry.rp.Destroy() - } - - // Destroy the lease for the entire contents, if any. - if mrp.lease != nil { - mrp.lease.Revoke() - } - - // Crash early if called again. - mrp.rps = nil - mrp.lease = nil - mrp.destroyed = true -} - -func (mrp *multiReadProxy) CheckInvariants() { - if mrp.destroyed { - panic("Use after destroyed") - } - - // INVARIANT: If len(rps) != 0, rps[0].off == 0 - if len(mrp.rps) != 0 && mrp.rps[0].off != 0 { - panic(fmt.Sprintf("Unexpected starting point: %v", mrp.rps[0].off)) - } - - // INVARIANT: For each x, x.rp.Size() >= 0 - for _, x := range mrp.rps { - if x.rp.Size() < 0 { - panic(fmt.Sprintf("Negative size: %v", x.rp.Size())) - } - } - - // INVARIANT: For each i>0, rps[i].off == rps[i-i].off + rps[i-i].rp.Size() - for i := range mrp.rps { - if i > 0 && !(mrp.rps[i].off == mrp.rps[i-1].off+mrp.rps[i-1].rp.Size()) { - panic("Offsets are not indexed correctly.") - } - } - - // INVARIANT: size is the sum over the wrapped proxy sizes. - var sum int64 - for _, wrapped := range mrp.rps { - sum += wrapped.rp.Size() - } - - if sum != mrp.size { - panic(fmt.Sprintf("Size mismatch: %v vs. %v", sum, mrp.size)) - } - - // INVARIANT: If lease != nil, size == lease.Size() - if mrp.lease != nil && mrp.size != mrp.lease.Size() { - panic(fmt.Sprintf("Size mismatch: %v vs. %v", mrp.size, mrp.lease.Size())) - } -} - -//////////////////////////////////////////////////////////////////////// -// Helpers -//////////////////////////////////////////////////////////////////////// - -type readProxyAndOffset struct { - off int64 - rp ReadProxy -} - -// Return the index within mrp.rps of the first read proxy whose logical offset -// is greater than off. If there is none, return len(mrp.rps). -func (mrp *multiReadProxy) upperBound(off int64) (index int) { - pred := func(i int) bool { - return mrp.rps[i].off > off - } - - return sort.Search(len(mrp.rps), pred) -} - -// Serve a read from the wrapped proxy at the given index within our array of -// wrapped proxies. The offset is relative to the start of the multiReadProxy, -// not the wrapped proxy. -// -// Guarantees, letting wrapped be mrp.rps[i].rp and wrappedStart be -// mrp.rps[i].off: -// -// * If err == nil, n == len(p) || off + n == wrappedStart + wrapped.Size(). -// * Never returns err == io.EOF. -// -// REQUIRES: index < len(mrp.rps) -// REQUIRES: mrp.rps[index].off <= off < mrp.rps[index].off + wrapped.Size() -func (mrp *multiReadProxy) readFromOne( - ctx context.Context, - index int, - p []byte, - off int64) (n int, err error) { - // Check input requirements. - if !(index < len(mrp.rps)) { - panic(fmt.Sprintf("Out of range wrapped index: %v", index)) - } - - wrapped := mrp.rps[index].rp - wrappedStart := mrp.rps[index].off - wrappedSize := wrapped.Size() - - if !(wrappedStart <= off && off < wrappedStart+wrappedSize) { - panic(fmt.Sprintf( - "Offset %v not in range [%v, %v)", - off, - wrappedStart, - wrappedStart+wrappedSize)) - } - - // Check guarantees on return. - defer func() { - if err == nil && - !(n == len(p) || off+int64(n) == wrappedStart+wrappedSize) { - panic(fmt.Sprintf( - "Failed to serve full read. "+ - "off: %d n: %d, len(p): %d, wrapped start: %d, wrapped size: %d", - off, - n, - len(p), - wrappedStart, - wrappedSize)) - - return - } - - if err == io.EOF { - panic("Unexpected EOF.") - } - }() - - // Read from the wrapped reader, translating the offset. We rely on the - // wrapped reader to properly implement ReadAt, not returning a short read. - wrappedOff := off - wrappedStart - n, err = wrapped.ReadAt(ctx, p, wrappedOff) - - // Sanity check: the wrapped read proxy is supposed to return err == nil only - // if the entire read was satisfied. - if err == nil && n != len(p) { - err = fmt.Errorf( - "Wrapped proxy %d returned only %d bytes for a %d-byte read "+ - "starting at wrapped offset %d", - index, - n, - len(p), - wrappedOff) - - return - } - - // Don't return io.EOF, as guaranteed. - if err == io.EOF { - // Sanity check: if we hit EOF, that should mean that we read up to the end - // of the wrapped range. - if int64(n) != wrappedSize-wrappedOff { - err = fmt.Errorf( - "Wrapped proxy %d returned unexpected EOF. n: %d, wrapped size: %d, "+ - "wrapped offset: %d", - index, - n, - wrappedSize, - wrappedOff) - - return - } - - err = nil - } - - return -} - -// Upgrade the read proxy and copy its contents into the supplied read/write -// lease, then destroy it. -func (mrp *multiReadProxy) upgradeOne( - ctx context.Context, - dst ReadWriteLease, - rp ReadProxy) (err error) { - // Upgrade. - src, err := rp.Upgrade(ctx) - if err != nil { - err = fmt.Errorf("Upgrade: %v", err) - return - } - - defer func() { - src.Downgrade().Revoke() - }() - - // Seek to the start and copy. - _, err = src.Seek(0, 0) - if err != nil { - err = fmt.Errorf("Seek: %v", err) - return - } - - _, err = io.Copy(dst, src) - if err != nil { - err = fmt.Errorf("Copy: %v", err) - return - } - - return -} diff --git a/lease/multi_read_proxy_test.go b/lease/multi_read_proxy_test.go deleted file mode 100644 index 0ac6918f7a..0000000000 --- a/lease/multi_read_proxy_test.go +++ /dev/null @@ -1,565 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease_test - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "math" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/googlecloudplatform/gcsfuse/lease" - . "github.com/jacobsa/oglematchers" - . "github.com/jacobsa/ogletest" -) - -func TestMultiReadProxy(t *testing.T) { RunTests(t) } - -//////////////////////////////////////////////////////////////////////// -// Helpers -//////////////////////////////////////////////////////////////////////// - -// A ReadProxy that wraps another, calling CheckInvariants before and after -// each action. -type checkingReadProxy struct { - Wrapped lease.ReadProxy -} - -func (crp *checkingReadProxy) Size() (size int64) { - crp.Wrapped.CheckInvariants() - defer crp.Wrapped.CheckInvariants() - - size = crp.Wrapped.Size() - return -} - -func (crp *checkingReadProxy) ReadAt( - ctx context.Context, - p []byte, - off int64) (n int, err error) { - crp.Wrapped.CheckInvariants() - defer crp.Wrapped.CheckInvariants() - - n, err = crp.Wrapped.ReadAt(ctx, p, off) - return -} - -func (crp *checkingReadProxy) Upgrade( - ctx context.Context) (rwl lease.ReadWriteLease, err error) { - crp.Wrapped.CheckInvariants() - rwl, err = crp.Wrapped.Upgrade(ctx) - return -} - -func (crp *checkingReadProxy) Destroy() { - crp.Wrapped.CheckInvariants() - crp.Wrapped.Destroy() -} - -func (crp *checkingReadProxy) CheckInvariants() { - crp.Wrapped.CheckInvariants() -} - -// A range to read, the contents we expect to get back, and a matcher for the -// returned error. Special case: a nil matcher means Equals(nil). -type readAtTestCase struct { - start int64 - limit int64 - errMatcher Matcher - expectedContents string -} - -func runReadAtTestCases( - rp lease.ReadProxy, - cases []readAtTestCase) { - for i, tc := range cases { - desc := fmt.Sprintf("Test case %d: [%d, %d)", i, tc.start, tc.limit) - - AssertLe(tc.start, tc.limit) - buf := make([]byte, tc.limit-tc.start) - - n, err := rp.ReadAt(context.Background(), buf, tc.start) - AssertEq(tc.expectedContents, string(buf[:n]), "%s", desc) - - if tc.errMatcher == nil { - AssertEq(nil, err, "%s", desc) - } else { - ExpectThat(err, tc.errMatcher, desc) - } - } -} - -//////////////////////////////////////////////////////////////////////// -// Boilerplate -//////////////////////////////////////////////////////////////////////// - -type MultiReadProxyTest struct { - ctx context.Context - - // Canned content and errors returned by the refreshers. - refresherContents []string - refresherErrors []error - - leaser lease.FileLeaser - initialLease lease.ReadLease - proxy *checkingReadProxy -} - -var _ SetUpInterface = &MultiReadProxyTest{} -var _ TearDownInterface = &MultiReadProxyTest{} - -func init() { RegisterTestSuite(&MultiReadProxyTest{}) } - -func (t *MultiReadProxyTest) SetUp(ti *TestInfo) { - t.ctx = ti.Ctx - t.leaser = lease.NewFileLeaser("", math.MaxInt32, math.MaxInt64) - - // Set up default refresher contents and nil errors. - t.refresherContents = []string{ - "taco", - "burrito", - "enchilada", - } - t.refresherErrors = make([]error, len(t.refresherContents)) - - // Create the proxy. - t.resetProxy() -} - -func (t *MultiReadProxyTest) TearDown() { - // Make sure nothing goes crazy. - if t.proxy != nil { - t.proxy.Destroy() - } -} - -// Recreate refreshers using makeRefreshers and reset the proxy to use them and -// t.initialLease. -func (t *MultiReadProxyTest) resetProxy() { - t.proxy = &checkingReadProxy{ - Wrapped: lease.NewMultiReadProxy( - t.leaser, - t.makeRefreshers(), - t.initialLease), - } -} - -// Create refreshers based on the current contents of t.refresherContents. -// t.refresherErrors will be inspected only when Refresh is called. -func (t *MultiReadProxyTest) makeRefreshers() (refreshers []lease.Refresher) { - for i := range t.refresherContents { - iCopy := i - contents := t.refresherContents[i] - - r := &funcRefresher{ - N: int64(len(contents)), - F: func(ctx context.Context) (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents)) - err = t.refresherErrors[iCopy] - return - }, - } - - refreshers = append(refreshers, r) - } - - return -} - -//////////////////////////////////////////////////////////////////////// -// Tests -//////////////////////////////////////////////////////////////////////// - -func (t *MultiReadProxyTest) SizeZero_NoRefreshers() { - t.refresherContents = []string{} - t.refresherErrors = []error{} - t.resetProxy() - - // Size - ExpectEq(0, t.proxy.Size()) - - // ReadAt - eofMatcher := Equals(io.EOF) - testCases := []readAtTestCase{ - readAtTestCase{0, 0, eofMatcher, ""}, - readAtTestCase{0, 10, eofMatcher, ""}, - readAtTestCase{5, 10, eofMatcher, ""}, - } - - runReadAtTestCases(t.proxy, testCases) -} - -func (t *MultiReadProxyTest) SizeZero_WithRefreshers() { - t.refresherContents = []string{"", "", "", ""} - t.refresherErrors = make([]error, len(t.refresherContents)) - t.resetProxy() - - // Size - ExpectEq(0, t.proxy.Size()) - - // ReadAt - eofMatcher := Equals(io.EOF) - testCases := []readAtTestCase{ - readAtTestCase{0, 0, eofMatcher, ""}, - readAtTestCase{0, 10, eofMatcher, ""}, - readAtTestCase{5, 10, eofMatcher, ""}, - } - - runReadAtTestCases(t.proxy, testCases) -} - -func (t *MultiReadProxyTest) Size() { - var expected int64 - for _, contents := range t.refresherContents { - expected += int64(len(contents)) - } - - ExpectEq(expected, t.proxy.Size()) -} - -func (t *MultiReadProxyTest) ReadAt_NegativeOffset() { - // Test cases. - m := Error(HasSubstr("Invalid offset")) - testCases := []readAtTestCase{ - readAtTestCase{-1, 0, m, ""}, - readAtTestCase{-1, 1, m, ""}, - } - - runReadAtTestCases(t.proxy, testCases) -} - -func (t *MultiReadProxyTest) ReadAt_OneRefresherReturnsError() { - AssertThat( - t.refresherContents, - ElementsAre( - "taco", - "burrito", - "enchilada", - )) - - AssertEq(4, len(t.refresherContents[0])) - AssertEq(7, len(t.refresherContents[1])) - AssertEq(9, len(t.refresherContents[2])) - - // Configure an error for the middle read lease. - someErr := errors.New("foobar") - t.refresherErrors[1] = someErr - - // Test cases. - someErrMatcher := Error(HasSubstr(someErr.Error())) - eofMatcher := Equals(io.EOF) - - testCases := []readAtTestCase{ - // First read lease only. - readAtTestCase{0, 0, nil, ""}, - readAtTestCase{0, 1, nil, "t"}, - readAtTestCase{0, 4, nil, "taco"}, - readAtTestCase{1, 4, nil, "aco"}, - readAtTestCase{4, 4, nil, ""}, - - // First and second read leases. - readAtTestCase{0, 5, someErrMatcher, "taco"}, - readAtTestCase{1, 11, someErrMatcher, "aco"}, - - // All read leases. - readAtTestCase{0, 20, someErrMatcher, "taco"}, - readAtTestCase{1, 20, someErrMatcher, "aco"}, - readAtTestCase{1, 100, someErrMatcher, "aco"}, - - // Second read lease only. - readAtTestCase{4, 4, nil, ""}, - readAtTestCase{4, 5, someErrMatcher, ""}, - readAtTestCase{4, 11, someErrMatcher, ""}, - - // Second and third read leases. - readAtTestCase{4, 12, someErrMatcher, ""}, - readAtTestCase{4, 20, someErrMatcher, ""}, - readAtTestCase{5, 100, someErrMatcher, ""}, - - // Third read lease only. - readAtTestCase{11, 20, nil, "enchilada"}, - readAtTestCase{11, 100, eofMatcher, "enchilada"}, - readAtTestCase{12, 20, nil, "nchilada"}, - readAtTestCase{19, 20, nil, "a"}, - readAtTestCase{20, 20, eofMatcher, ""}, - - // Past end. - readAtTestCase{21, 21, eofMatcher, ""}, - readAtTestCase{21, 22, eofMatcher, ""}, - readAtTestCase{21, 100, eofMatcher, ""}, - readAtTestCase{100, 1000, eofMatcher, ""}, - } - - runReadAtTestCases(t.proxy, testCases) -} - -func (t *MultiReadProxyTest) ReadAt_AllSuccessful() { - AssertThat( - t.refresherContents, - ElementsAre( - "taco", - "burrito", - "enchilada", - )) - - AssertEq(4, len(t.refresherContents[0])) - AssertEq(7, len(t.refresherContents[1])) - AssertEq(9, len(t.refresherContents[2])) - - // Test cases. - eofMatcher := Equals(io.EOF) - testCases := []readAtTestCase{ - // First read lease only. - readAtTestCase{0, 0, nil, ""}, - readAtTestCase{0, 1, nil, "t"}, - readAtTestCase{0, 4, nil, "taco"}, - readAtTestCase{1, 4, nil, "aco"}, - readAtTestCase{4, 4, nil, ""}, - - // First and second read leases. - readAtTestCase{0, 5, nil, "tacob"}, - readAtTestCase{1, 11, nil, "acoburrito"}, - - // All read leases. - readAtTestCase{0, 20, nil, "tacoburritoenchilada"}, - readAtTestCase{1, 19, nil, "acoburritoenchilad"}, - readAtTestCase{3, 17, nil, "oburritoenchil"}, - - // Second read lease only. - readAtTestCase{4, 4, nil, ""}, - readAtTestCase{4, 5, nil, "b"}, - readAtTestCase{4, 11, nil, "burrito"}, - - // Second and third read leases. - readAtTestCase{4, 12, nil, "burritoe"}, - readAtTestCase{4, 20, nil, "burritoenchilada"}, - readAtTestCase{5, 100, eofMatcher, "urritoenchilada"}, - - // Third read lease only. - readAtTestCase{11, 20, nil, "enchilada"}, - readAtTestCase{11, 100, eofMatcher, "enchilada"}, - readAtTestCase{12, 20, nil, "nchilada"}, - readAtTestCase{19, 20, nil, "a"}, - readAtTestCase{20, 20, eofMatcher, ""}, - - // Past end. - readAtTestCase{21, 21, eofMatcher, ""}, - readAtTestCase{21, 22, eofMatcher, ""}, - readAtTestCase{21, 100, eofMatcher, ""}, - readAtTestCase{100, 1000, eofMatcher, ""}, - } - - runReadAtTestCases(t.proxy, testCases) -} - -func (t *MultiReadProxyTest) ReadAt_ContentAlreadyCached() { - AssertThat( - t.refresherContents, - ElementsAre( - "taco", - "burrito", - "enchilada", - )) - - // Read the entire contents, causing read leases to be issued for each - // sub-proxy. - buf := make([]byte, 1024) - n, err := t.proxy.ReadAt(context.Background(), buf, 0) - - AssertThat(err, AnyOf(nil, io.EOF)) - AssertEq("tacoburritoenchilada", string(buf[:n])) - - // Set up all refreshers to return errors when invoked. - for i, _ := range t.refresherErrors { - t.refresherErrors[i] = errors.New("foo") - } - - // Despite this, the content should still be available. - n, err = t.proxy.ReadAt(context.Background(), buf, 0) - - AssertThat(err, AnyOf(nil, io.EOF)) - AssertEq("tacoburritoenchilada", string(buf[:n])) -} - -func (t *MultiReadProxyTest) Upgrade_OneRefresherReturnsError() { - AssertEq(3, len(t.refresherErrors)) - - // Configure an error for the middle read lease. - t.refresherErrors[1] = errors.New("foobar") - - // Upgrade - _, err := t.proxy.Upgrade(context.Background()) - t.proxy = nil - - ExpectThat(err, Error(HasSubstr("foobar"))) -} - -func (t *MultiReadProxyTest) Upgrade_AllSuccessful() { - AssertThat( - t.refresherContents, - ElementsAre( - "taco", - "burrito", - "enchilada", - )) - - // Upgrade - rwl, err := t.proxy.Upgrade(context.Background()) - t.proxy = nil - AssertEq(nil, err) - - defer func() { rwl.Downgrade().Revoke() }() - - // Check the contents of the read/write lease. - _, err = rwl.Seek(0, 0) - AssertEq(nil, err) - - contents, err := ioutil.ReadAll(rwl) - AssertEq(nil, err) - ExpectEq("tacoburritoenchilada", string(contents)) -} - -func (t *MultiReadProxyTest) Upgrade_ContentAlreadyCached() { - AssertThat( - t.refresherContents, - ElementsAre( - "taco", - "burrito", - "enchilada", - )) - - // Read the entire contents, causing read leases to be issued for each - // sub-proxy. - buf := make([]byte, 1024) - n, err := t.proxy.ReadAt(context.Background(), buf, 0) - - AssertThat(err, AnyOf(nil, io.EOF)) - AssertEq("tacoburritoenchilada", string(buf[:n])) - - // Set up all refreshers to return errors when invoked. - for i, _ := range t.refresherErrors { - t.refresherErrors[i] = errors.New("foo") - } - - // Despite this, the content should still be available. - rwl, err := t.proxy.Upgrade(context.Background()) - t.proxy = nil - AssertEq(nil, err) - - defer func() { rwl.Downgrade().Revoke() }() - - _, err = rwl.Seek(0, 0) - AssertEq(nil, err) - - contents, err := ioutil.ReadAll(rwl) - AssertEq(nil, err) - ExpectEq("tacoburritoenchilada", string(contents)) -} - -func (t *MultiReadProxyTest) InitialReadLeaseValid() { - AssertThat( - t.refresherContents, - ElementsAre( - "taco", - "burrito", - "enchilada", - )) - - // Set up an initial read lease. - rwl, err := t.leaser.NewFile() - AssertEq(nil, err) - - _, err = rwl.Write([]byte("tacoburritoenchilada")) - AssertEq(nil, err) - - t.initialLease = rwl.Downgrade() - rwl = nil - - // Recreate the proxy using that lease. - t.resetProxy() - - // Set up all refreshers to return errors when invoked. - for i, _ := range t.refresherErrors { - t.refresherErrors[i] = errors.New("foo") - } - - // Despite this, the content should still be available for reading. - buf := make([]byte, 1024) - n, err := t.proxy.ReadAt(context.Background(), buf, 0) - - AssertThat(err, AnyOf(nil, io.EOF)) - AssertEq("tacoburritoenchilada", string(buf[:n])) - - // And for upgrading. - rwl, err = t.proxy.Upgrade(context.Background()) - t.proxy = nil - AssertEq(nil, err) - - defer func() { rwl.Downgrade().Revoke() }() - - _, err = rwl.Seek(0, 0) - AssertEq(nil, err) - - contents, err := ioutil.ReadAll(rwl) - AssertEq(nil, err) - ExpectEq("tacoburritoenchilada", string(contents)) -} - -func (t *MultiReadProxyTest) InitialReadLeaseRevoked() { - AssertThat( - t.refresherContents, - ElementsAre( - "taco", - "burrito", - "enchilada", - )) - - // Set up an initial read lease with the correct length that has been revoked. - rwl, err := t.leaser.NewFile() - AssertEq(nil, err) - - _, err = rwl.Write([]byte("tacoburritoenchilada")) - AssertEq(nil, err) - - t.initialLease = rwl.Downgrade() - rwl = nil - - t.leaser.RevokeReadLeases() - - // Recreate the proxy using that lease. - t.resetProxy() - - // Set up all refreshers to return errors when invoked. - for i, _ := range t.refresherErrors { - t.refresherErrors[i] = errors.New("foobar") - } - - // Reading should fall through to the refreshers, and fail. - buf := make([]byte, 1024) - _, err = t.proxy.ReadAt(context.Background(), buf, 0) - - ExpectThat(err, Error(HasSubstr("foobar"))) - - // Ditto upgrading. - _, err = t.proxy.Upgrade(context.Background()) - t.proxy = nil - - ExpectThat(err, Error(HasSubstr("foobar"))) -} diff --git a/lease/read_lease.go b/lease/read_lease.go deleted file mode 100644 index 43d665af89..0000000000 --- a/lease/read_lease.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "io" - "os" - "sync" -) - -// A sentinel error used when a lease has been revoked. -type RevokedError struct { -} - -func (re *RevokedError) Error() string { - return "Lease revoked" -} - -// A read-only wrapper around a file that may be revoked, when e.g. there is -// temporary disk space pressure. A read lease may also be upgraded to a write -// lease, if it is still valid. -// -// All methods are safe for concurrent access. -type ReadLease interface { - io.ReadSeeker - io.ReaderAt - - // Return the size of the underlying file, or what the size used to be if the - // lease has been revoked. - Size() (size int64) - - // Has the lease been revoked? Note that this is completely racy in the - // absence of external synchronization on all leases and the file leaser, so - // is suitable only for testing purposes. - Revoked() (revoked bool) - - // Attempt to upgrade the lease to a read/write lease. After successfully - // upgrading, it is as if the lease has been revoked. - Upgrade() (rwl ReadWriteLease, err error) - - // Cause the lease to be revoked and any associated resources to be cleaned - // up, if it has not already been revoked. - Revoke() -} - -type readLease struct { - // Used internally and by fileLeaser eviction logic. - Mu sync.Mutex - - ///////////////////////// - // Constant data - ///////////////////////// - - size int64 - - ///////////////////////// - // Dependencies - ///////////////////////// - - // The leaser that issued this lease. - leaser *fileLeaser - - // The underlying file, set to nil once revoked. - // - // GUARDED_BY(Mu) - file *os.File -} - -var _ ReadLease = &readLease{} - -func newReadLease( - size int64, - leaser *fileLeaser, - file *os.File) (rl *readLease) { - rl = &readLease{ - size: size, - leaser: leaser, - file: file, - } - - return -} - -//////////////////////////////////////////////////////////////////////// -// Public interface -//////////////////////////////////////////////////////////////////////// - -// LOCKS_EXCLUDED(rl.Mu) -func (rl *readLease) Read(p []byte) (n int, err error) { - rl.leaser.promoteToMostRecent(rl) - - rl.Mu.Lock() - defer rl.Mu.Unlock() - - // Have we been revoked? - if rl.revoked() { - err = &RevokedError{} - return - } - - n, err = rl.file.Read(p) - return -} - -// LOCKS_EXCLUDED(rl.Mu) -func (rl *readLease) Seek( - offset int64, - whence int) (off int64, err error) { - rl.Mu.Lock() - defer rl.Mu.Unlock() - - // Have we been revoked? - if rl.revoked() { - err = &RevokedError{} - return - } - - off, err = rl.file.Seek(offset, whence) - return -} - -// LOCKS_EXCLUDED(rl.Mu) -func (rl *readLease) ReadAt(p []byte, off int64) (n int, err error) { - rl.leaser.promoteToMostRecent(rl) - - rl.Mu.Lock() - defer rl.Mu.Unlock() - - // Have we been revoked? - if rl.revoked() { - err = &RevokedError{} - return - } - - n, err = rl.file.ReadAt(p, off) - return -} - -// No lock necessary. -func (rl *readLease) Size() (size int64) { - size = rl.size - return -} - -// LOCKS_EXCLUDED(rl.Mu) -func (rl *readLease) Revoked() (revoked bool) { - rl.Mu.Lock() - defer rl.Mu.Unlock() - - revoked = rl.revoked() - return -} - -// LOCKS_EXCLUDED(rl.leaser.mu) -// LOCKS_EXCLUDED(rl.Mu) -func (rl *readLease) Upgrade() (rwl ReadWriteLease, err error) { - // Let the leaser do the heavy lifting. - rwl, err = rl.leaser.upgrade(rl) - return -} - -// LOCKS_EXCLUDED(rl.leaser.mu) -// LOCKS_EXCLUDED(rl.Mu) -func (rl *readLease) Revoke() { - // Let the leaser do the heavy lifting. - rl.leaser.revokeVoluntarily(rl) -} - -//////////////////////////////////////////////////////////////////////// -// Helpers -//////////////////////////////////////////////////////////////////////// - -// Has the lease been revoked? -// -// LOCKS_REQUIRED(rl.Mu || rl.leaser.mu) -func (rl *readLease) revoked() bool { - return rl.file == nil -} - -// Relinquish control of the file, marking the lease as revoked. -// -// REQUIRES: Not yet revoked. -// -// LOCKS_REQUIRED(rl.Mu) -func (rl *readLease) release() (file *os.File) { - if rl.revoked() { - panic("Already revoked") - } - - file = rl.file - rl.file = nil - - return -} diff --git a/lease/read_proxy.go b/lease/read_proxy.go deleted file mode 100644 index 9f58b5f818..0000000000 --- a/lease/read_proxy.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "fmt" - "io" - - "golang.org/x/net/context" -) - -// A type used by read proxies to refresh their contents. See notes on -// NewReadProxy. -type Refresher interface { - // Return the size of the underlying contents. - Size() (size int64) - - // Return a read-closer for the contents. The same contents will always be - // returned, and they will always be of length Size(). - Refresh(ctx context.Context) (rc io.ReadCloser, err error) -} - -// A wrapper around a read lease, exposing a similar interface with the -// following differences: -// -// * Contents are fetched and re-fetched automatically when needed. Therefore -// the user need not worry about lease expiration. -// -// * Methods that may involve fetching the contents (reading, seeking) accept -// context arguments, so as to be cancellable. -// -// * Only random access reading is supported. -// -// External synchronization is required. -type ReadProxy interface { - // Return the size of the proxied content. Guarantees to not block. - Size() (size int64) - - // Semantics matching io.ReaderAt, except with context support and without - // the guarantee of being thread-safe. - ReadAt(ctx context.Context, p []byte, off int64) (n int, err error) - - // Return a read/write lease for the proxied contents, destroying the read - // proxy. The read proxy must not be used after calling this method. - Upgrade(ctx context.Context) (rwl ReadWriteLease, err error) - - // Destroy any resources in use by the read proxy. It must not be used - // further. - Destroy() - - // Panic if any internal invariants are violated. - CheckInvariants() -} - -// Create a read proxy. -// -// The supplied refresher will be used to obtain the proxy's contents whenever -// the file leaser decides to expire the temporary copy thus obtained. -// -// If rl is non-nil, it will be used as the first temporary copy of the -// contents, and must match what the refresher returns. -func NewReadProxy( - fl FileLeaser, - r Refresher, - rl ReadLease) (rp ReadProxy) { - rp = &readProxy{ - size: r.Size(), - leaser: fl, - refresher: r, - lease: rl, - } - - return -} - -// A wrapper around a read lease, exposing a similar interface with the -// following differences: -// -// * Contents are fetched and re-fetched automatically when needed. Therefore -// the user need not worry about lease expiration. -// -// * Methods that may involve fetching the contents (reading, seeking) accept -// context arguments, so as to be cancellable. -// -// External synchronization is required. -type readProxy struct { - ///////////////////////// - // Constant data - ///////////////////////// - - size int64 - - ///////////////////////// - // Dependencies - ///////////////////////// - - leaser FileLeaser - refresher Refresher - - ///////////////////////// - // Mutable state - ///////////////////////// - - // The current wrapped lease, or nil if one has never been issued. - lease ReadLease -} - -//////////////////////////////////////////////////////////////////////// -// Helpers -//////////////////////////////////////////////////////////////////////// - -func isRevokedErr(err error) bool { - _, ok := err.(*RevokedError) - return ok -} - -// Set up a read/write lease and fill in our contents. -// -// REQUIRES: The caller has observed that rp.lease has expired. -func (rp *readProxy) getContents( - ctx context.Context) (rwl ReadWriteLease, err error) { - // Obtain some space to write the contents. - rwl, err = rp.leaser.NewFile() - if err != nil { - err = fmt.Errorf("NewFile: %v", err) - return - } - - // Clean up if we exit early. - defer func() { - if err != nil { - rwl.Downgrade().Revoke() - } - }() - - // Obtain the reader for our contents. - rc, err := rp.refresher.Refresh(ctx) - if err != nil { - err = fmt.Errorf("User function: %v", err) - return - } - - defer func() { - closeErr := rc.Close() - if closeErr != nil && err == nil { - err = fmt.Errorf("Close: %v", closeErr) - } - }() - - // Copy into the read/write lease. Use a large buffer to avoid spending a lot - // of CPU time on syscalls. - buf := make([]byte, 1<<20) - copied, err := io.CopyBuffer(rwl, rc, buf) - if err != nil { - err = fmt.Errorf("Copy: %v", err) - return - } - - // Did the user lie about the size? - if copied != rp.Size() { - err = fmt.Errorf("Copied %v bytes; expected %v", copied, rp.Size()) - return - } - - return -} - -// Downgrade and save the supplied read/write lease obtained with getContents -// for later use. -func (rp *readProxy) saveContents(rwl ReadWriteLease) { - rp.lease = rwl.Downgrade() -} - -//////////////////////////////////////////////////////////////////////// -// Public interface -//////////////////////////////////////////////////////////////////////// - -func (rp *readProxy) CheckInvariants() { -} - -// Semantics matching io.ReaderAt, except with context support. -func (rp *readProxy) ReadAt( - ctx context.Context, - p []byte, - off int64) (n int, err error) { - // Common case: is the existing lease still valid? - if rp.lease != nil { - n, err = rp.lease.ReadAt(p, off) - if !isRevokedErr(err) { - return - } - - // Clear the revoked error. - err = nil - } - - // Get hold of a read/write lease containing our contents. - rwl, err := rp.getContents(ctx) - if err != nil { - err = fmt.Errorf("getContents: %v", err) - return - } - - defer rp.saveContents(rwl) - - // Serve from the read/write lease. - n, err = rwl.ReadAt(p, off) - - return -} - -// Return the size of the proxied content. Guarantees to not block. -func (rp *readProxy) Size() (size int64) { - size = rp.size - return -} - -// Return a read/write lease for the proxied contents, destroying the read -// proxy. The read proxy must not be used after calling this method. -func (rp *readProxy) Upgrade( - ctx context.Context) (rwl ReadWriteLease, err error) { - // If we succeed, we are now destroyed. - defer func() { - if err == nil { - rp.Destroy() - } - }() - - // Common case: is the existing lease still valid? - if rp.lease != nil { - rwl, err = rp.lease.Upgrade() - if !isRevokedErr(err) { - return - } - - // Clear the revoked error. - err = nil - } - - // Build the read/write lease anew. - rwl, err = rp.getContents(ctx) - if err != nil { - err = fmt.Errorf("getContents: %v", err) - return - } - - return -} - -// Destroy any resources in use by the read proxy. It must not be used further. -func (rp *readProxy) Destroy() { - if rp.lease != nil { - rp.lease.Revoke() - } - - // Make use-after-destroy errors obvious. - rp.size = 0 - rp.leaser = nil - rp.refresher = nil - rp.lease = nil -} diff --git a/lease/read_proxy_test.go b/lease/read_proxy_test.go deleted file mode 100644 index 1503d2e883..0000000000 --- a/lease/read_proxy_test.go +++ /dev/null @@ -1,657 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease_test - -import ( - "errors" - "io" - "io/ioutil" - "strings" - "testing" - "testing/iotest" - - "golang.org/x/net/context" - - "github.com/googlecloudplatform/gcsfuse/lease" - "github.com/googlecloudplatform/gcsfuse/lease/mock_lease" - . "github.com/jacobsa/oglematchers" - . "github.com/jacobsa/oglemock" - . "github.com/jacobsa/ogletest" -) - -func TestReadProxy(t *testing.T) { RunTests(t) } - -//////////////////////////////////////////////////////////////////////// -// Helpers -//////////////////////////////////////////////////////////////////////// - -const contents = "taco" - -// A function that always successfully returns our contents constant. -func returnContents() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents)) - return -} - -func successfulWrite(p []byte) (n int, err error) { - n = len(p) - return -} - -// A ReadCloser that returns the supplied error when closing. -type closeErrorReader struct { - Wrapped io.Reader - Err error -} - -func (rc *closeErrorReader) Read(p []byte) (n int, err error) { - n, err = rc.Wrapped.Read(p) - return -} - -func (rc *closeErrorReader) Close() (err error) { - err = rc.Err - return -} - -// A refresher that defers to a function. -type funcRefresher struct { - N int64 - F func(context.Context) (io.ReadCloser, error) -} - -func (r *funcRefresher) Size() (size int64) { - return r.N -} - -func (r *funcRefresher) Refresh( - ctx context.Context) (rc io.ReadCloser, err error) { - rc, err = r.F(ctx) - return -} - -//////////////////////////////////////////////////////////////////////// -// Boilerplate -//////////////////////////////////////////////////////////////////////// - -type ReadProxyTest struct { - // A function that will be invoked for each call to the refresher given to - // NewReadProxy. - f func() (io.ReadCloser, error) - - mockController Controller - leaser mock_lease.MockFileLeaser - proxy lease.ReadProxy -} - -var _ SetUpInterface = &ReadProxyTest{} - -func init() { RegisterTestSuite(&ReadProxyTest{}) } - -func (t *ReadProxyTest) SetUp(ti *TestInfo) { - t.mockController = ti.MockController - - // Set up the leaser. - t.leaser = mock_lease.NewMockFileLeaser(ti.MockController, "leaser") - - // Set up the lease. - t.proxy = lease.NewReadProxy( - t.leaser, - t.makeRefresher(), - nil) -} - -func (t *ReadProxyTest) makeRefresher() (r lease.Refresher) { - r = &funcRefresher{ - N: int64(len(contents)), - F: t.callF, - } - - return -} - -// Defer to whatever is currently set as t.f. -func (t *ReadProxyTest) callF(ctx context.Context) (io.ReadCloser, error) { - AssertNe(nil, t.f) - return t.f() -} - -//////////////////////////////////////////////////////////////////////// -// Tests -//////////////////////////////////////////////////////////////////////// - -func (t *ReadProxyTest) Size() { - ExpectEq(len(contents), t.proxy.Size()) -} - -func (t *ReadProxyTest) LeaserReturnsError() { - var err error - - // NewFile - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(nil, errors.New("taco"))) - - // Attempt to read. - _, err = t.proxy.ReadAt(context.Background(), []byte{}, 0) - ExpectThat(err, Error(HasSubstr("taco"))) -} - -func (t *ReadProxyTest) CallsFunc() { - // NewFile - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - // Downgrade and Revoke - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - ExpectCall(rl, "Revoke")() - - // Function - var called bool - t.f = func() (rc io.ReadCloser, err error) { - AssertFalse(called) - called = true - - err = errors.New("") - return - } - - // Attempt to read. - t.proxy.ReadAt(context.Background(), []byte{}, 0) - ExpectTrue(called) -} - -func (t *ReadProxyTest) FuncReturnsError() { - // NewFile - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - // Downgrade and Revoke - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - ExpectCall(rl, "Revoke")() - - // Function - t.f = func() (rc io.ReadCloser, err error) { - err = errors.New("taco") - return - } - - // Attempt to read. - _, err := t.proxy.ReadAt(context.Background(), []byte{}, 0) - ExpectThat(err, Error(HasSubstr("taco"))) -} - -func (t *ReadProxyTest) ContentsReturnReadError() { - // NewFile - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - // Write - ExpectCall(rwl, "Write")(Any()). - WillRepeatedly(Invoke(successfulWrite)) - - // Downgrade and Revoke - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - ExpectCall(rl, "Revoke")() - - // Function - t.f = func() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser( - iotest.TimeoutReader( - iotest.OneByteReader( - strings.NewReader(contents)))) - - return - } - - // Attempt to read. - _, err := t.proxy.ReadAt(context.Background(), []byte{}, 0) - ExpectThat(err, Error(HasSubstr("Copy"))) - ExpectThat(err, Error(HasSubstr("timeout"))) -} - -func (t *ReadProxyTest) ContentsReturnCloseError() { - // NewFile - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - // Write - ExpectCall(rwl, "Write")(Any()). - WillRepeatedly(Invoke(successfulWrite)) - - // Downgrade and Revoke - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - ExpectCall(rl, "Revoke")() - - // Function - t.f = func() (rc io.ReadCloser, err error) { - rc = &closeErrorReader{ - Wrapped: strings.NewReader(contents), - Err: errors.New("taco"), - } - - return - } - - // Attempt to read. - _, err := t.proxy.ReadAt(context.Background(), []byte{}, 0) - ExpectThat(err, Error(HasSubstr("Close"))) - ExpectThat(err, Error(HasSubstr("taco"))) -} - -func (t *ReadProxyTest) ContentsAreWrongLength() { - AssertEq(4, len(contents)) - - // NewFile - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - // Write - ExpectCall(rwl, "Write")(Any()). - WillRepeatedly(Invoke(successfulWrite)) - - // Downgrade and Revoke - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - ExpectCall(rl, "Revoke")() - - // Function - t.f = func() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents[:3])) - return - } - - // Attempt to read. - _, err := t.proxy.ReadAt(context.Background(), []byte{}, 0) - ExpectThat(err, Error(HasSubstr("Copied 3"))) - ExpectThat(err, Error(HasSubstr("expected 4"))) -} - -func (t *ReadProxyTest) WritesCorrectData() { - // NewFile - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - // Write - var written []byte - ExpectCall(rwl, "Write")(Any()). - WillRepeatedly(Invoke(func(p []byte) (n int, err error) { - written = append(written, p...) - n = len(p) - return - })) - - // Read - ExpectCall(rwl, "ReadAt")(Any(), Any()). - WillRepeatedly(Return(0, errors.New(""))) - - // Downgrade - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - - // Function - t.f = func() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents)) - return - } - - // Call. - t.proxy.ReadAt(context.Background(), []byte{}, 0) - ExpectEq(contents, string(written)) -} - -func (t *ReadProxyTest) WriteError() { - // NewFile - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - // Write - ExpectCall(rwl, "Write")(Any()). - WillOnce(Return(0, errors.New("taco"))) - - // Downgrade and Revoke - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - ExpectCall(rl, "Revoke")() - - // Function - t.f = func() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents)) - return - } - - // Attempt to read. - _, err := t.proxy.ReadAt(context.Background(), []byte{}, 0) - ExpectThat(err, Error(HasSubstr("Copy"))) - ExpectThat(err, Error(HasSubstr("taco"))) -} - -func (t *ReadProxyTest) ReadAt_CallsWrapped() { - const offset = 17 - - // NewFile - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - // Write - ExpectCall(rwl, "Write")(Any()). - WillRepeatedly(Invoke(successfulWrite)) - - // ReadAt - ExpectCall(rwl, "ReadAt")(Any(), 17). - WillOnce(Return(0, errors.New(""))) - - // Downgrade - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - - // Function - t.f = func() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents)) - return - } - - // Call. - t.proxy.ReadAt(context.Background(), []byte{}, offset) -} - -func (t *ReadProxyTest) ReadAt_Error() { - // NewFile - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - // Write - ExpectCall(rwl, "Write")(Any()). - WillRepeatedly(Invoke(successfulWrite)) - - // ReadAt - ExpectCall(rwl, "ReadAt")(Any(), Any()). - WillOnce(Return(0, errors.New("taco"))) - - // Downgrade - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - - // Function - t.f = func() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents)) - return - } - - // Call. - _, err := t.proxy.ReadAt(context.Background(), []byte{}, 0) - - ExpectThat(err, Error(HasSubstr("taco"))) -} - -func (t *ReadProxyTest) ReadAt_Successful() { - // NewFile - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - // Write - ExpectCall(rwl, "Write")(Any()). - WillRepeatedly(Invoke(successfulWrite)) - - // ReadAt - ExpectCall(rwl, "ReadAt")(Any(), Any()). - WillOnce(Return(0, nil)) - - // Downgrade - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - - // Function - t.f = func() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents)) - return - } - - // Call. - _, err := t.proxy.ReadAt(context.Background(), []byte{}, 0) - ExpectEq(nil, err) -} - -func (t *ReadProxyTest) Upgrade_Error() { - // NewFile - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - // Write - ExpectCall(rwl, "Write")(Any()). - WillRepeatedly(Return(0, errors.New("taco"))) - - // Downgrade and Revoke - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - ExpectCall(rl, "Revoke")() - - // Function - t.f = func() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents)) - return - } - - // Call. - _, err := t.proxy.Upgrade(context.Background()) - ExpectThat(err, Error(HasSubstr("taco"))) -} - -func (t *ReadProxyTest) Upgrade_Successful() { - // NewFile - expected := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(expected, nil)) - - // Write - ExpectCall(expected, "Write")(Any()). - WillRepeatedly(Invoke(successfulWrite)) - - // Function - t.f = func() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents)) - return - } - - // Call. - rwl, err := t.proxy.Upgrade(context.Background()) - AssertEq(nil, err) - ExpectEq(expected, rwl) -} - -func (t *ReadProxyTest) WrappedRevoked() { - // Arrange a successful wrapped read lease. - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - ExpectCall(rwl, "Write")(Any()). - WillRepeatedly(Invoke(successfulWrite)) - - ExpectCall(rwl, "ReadAt")(Any(), Any()). - WillOnce(Return(0, errors.New("taco"))) - - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - - t.f = func() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents)) - return - } - - t.proxy.ReadAt(context.Background(), []byte{}, 0) - - // Simulate it being revoked for all methods. - ExpectCall(rl, "ReadAt")(Any(), Any()). - WillOnce(Return(0, &lease.RevokedError{})) - - ExpectCall(rl, "Upgrade")(). - WillOnce(Return(nil, &lease.RevokedError{})) - - ExpectCall(t.leaser, "NewFile")(). - Times(2). - WillRepeatedly(Return(nil, errors.New(""))) - - t.proxy.ReadAt(context.Background(), []byte{}, 0) - t.proxy.Upgrade(context.Background()) -} - -func (t *ReadProxyTest) WrappedStillValid() { - var err error - - // Arrange a successful wrapped read lease. - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - ExpectCall(rwl, "Write")(Any()). - WillRepeatedly(Invoke(successfulWrite)) - - ExpectCall(rwl, "ReadAt")(Any(), Any()). - WillOnce(Return(0, errors.New("taco"))) - - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - - t.f = func() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents)) - return - } - - t.proxy.ReadAt(context.Background(), []byte{}, 0) - - // ReadAt - ExpectCall(rl, "ReadAt")(Any(), 11). - WillOnce(Return(0, errors.New("taco"))). - WillOnce(Return(17, nil)) - - _, err = t.proxy.ReadAt(context.Background(), []byte{}, 11) - ExpectThat(err, Error(HasSubstr("taco"))) - - n, err := t.proxy.ReadAt(context.Background(), []byte{}, 11) - ExpectEq(17, n) - - // Upgrade - ExpectCall(rl, "Revoke")() - ExpectCall(rl, "Upgrade")(). - WillOnce(Return(nil, errors.New("taco"))). - WillOnce(Return(rwl, nil)) - - _, err = t.proxy.Upgrade(context.Background()) - ExpectThat(err, Error(HasSubstr("taco"))) - - tmp, _ := t.proxy.Upgrade(context.Background()) - ExpectEq(rwl, tmp) -} - -func (t *ReadProxyTest) InitialReadLease_Revoked() { - // Set up an initial lease. - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - t.proxy = lease.NewReadProxy( - t.leaser, - t.makeRefresher(), - rl) - - // Simulate it being revoked for all methods. - ExpectCall(rl, "ReadAt")(Any(), Any()). - WillOnce(Return(0, &lease.RevokedError{})) - - ExpectCall(rl, "Upgrade")(). - WillOnce(Return(nil, &lease.RevokedError{})) - - ExpectCall(t.leaser, "NewFile")(). - Times(2). - WillRepeatedly(Return(nil, errors.New(""))) - - t.proxy.ReadAt(context.Background(), []byte{}, 0) - t.proxy.Upgrade(context.Background()) -} - -func (t *ReadProxyTest) InitialReadLease_Valid() { - var err error - - // Set up an initial lease. - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - t.proxy = lease.NewReadProxy( - t.leaser, - t.makeRefresher(), - rl) - - // ReadAt - ExpectCall(rl, "ReadAt")(Any(), 11). - WillOnce(Return(0, errors.New("taco"))). - WillOnce(Return(17, nil)) - - _, err = t.proxy.ReadAt(context.Background(), []byte{}, 11) - ExpectThat(err, Error(HasSubstr("taco"))) - - n, err := t.proxy.ReadAt(context.Background(), []byte{}, 11) - ExpectEq(17, n) - - // Upgrade - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - - ExpectCall(rl, "Revoke")() - ExpectCall(rl, "Upgrade")(). - WillOnce(Return(nil, errors.New("taco"))). - WillOnce(Return(rwl, nil)) - - _, err = t.proxy.Upgrade(context.Background()) - ExpectThat(err, Error(HasSubstr("taco"))) - - tmp, _ := t.proxy.Upgrade(context.Background()) - ExpectEq(rwl, tmp) -} - -func (t *ReadProxyTest) Destroy() { - // Arrange a successful wrapped read lease. - rwl := mock_lease.NewMockReadWriteLease(t.mockController, "rwl") - ExpectCall(t.leaser, "NewFile")(). - WillOnce(Return(rwl, nil)) - - ExpectCall(rwl, "Write")(Any()). - WillRepeatedly(Invoke(successfulWrite)) - - ExpectCall(rwl, "ReadAt")(Any(), Any()). - WillOnce(Return(0, errors.New("taco"))) - - rl := mock_lease.NewMockReadLease(t.mockController, "rl") - ExpectCall(rwl, "Downgrade")().WillOnce(Return(rl)) - - t.f = func() (rc io.ReadCloser, err error) { - rc = ioutil.NopCloser(strings.NewReader(contents)) - return - } - - t.proxy.ReadAt(context.Background(), []byte{}, 0) - - // When we destroy our lease, the wrapped should be revoked. - ExpectCall(rl, "Revoke")() - t.proxy.Destroy() -} diff --git a/lease/read_write_lease.go b/lease/read_write_lease.go deleted file mode 100644 index 611294a56f..0000000000 --- a/lease/read_write_lease.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lease - -import ( - "fmt" - "io" - "log" - "os" - - "github.com/jacobsa/syncutil" -) - -// A read-write wrapper around a file. Unlike a read lease, this cannot be -// revoked. -// -// All methods are safe for concurrent access. -type ReadWriteLease interface { - // Methods with semantics matching *os.File. - io.ReadWriteSeeker - io.ReaderAt - io.WriterAt - Truncate(size int64) (err error) - - // Return the current size of the underlying file. - Size() (size int64, err error) - - // Downgrade to a read lease, releasing any resources pinned by this lease to - // the pool that may be revoked, as with any read lease. After downgrading, - // this lease must not be used again. - Downgrade() (rl ReadLease) -} - -type readWriteLease struct { - mu syncutil.InvariantMutex - - ///////////////////////// - // Dependencies - ///////////////////////// - - // The leaser that issued this lease. - leaser *fileLeaser - - // The underlying file, set to nil once downgraded. - // - // GUARDED_BY(mu) - file *os.File - - ///////////////////////// - // Mutable state - ///////////////////////// - - // The cumulative number of bytes we have reported to the leaser using - // fileLeaser.addReadWriteByteDelta. When the size changes, we report the - // difference between the new size and this value. - // - // GUARDED_BY(mu) - reportedSize int64 - - // Our current view of the file's size, or a negative value if we dirtied the - // file but then failed to find its size. - // - // INVARIANT: If fileSize >= 0, fileSize agrees with file.Stat() - // INVARIANT: fileSize < 0 || fileSize == reportedSize - // - // GUARDED_BY(mu) - fileSize int64 -} - -var _ ReadWriteLease = &readWriteLease{} - -// size is the size that the leaser has already recorded for us. It must match -// the file's size. -func newReadWriteLease( - leaser *fileLeaser, - size int64, - file *os.File) (rwl *readWriteLease) { - rwl = &readWriteLease{ - leaser: leaser, - file: file, - reportedSize: size, - fileSize: size, - } - - rwl.mu = syncutil.NewInvariantMutex(rwl.checkInvariants) - - return -} - -//////////////////////////////////////////////////////////////////////// -// Public interface -//////////////////////////////////////////////////////////////////////// - -// LOCKS_EXCLUDED(rwl.mu) -func (rwl *readWriteLease) Read(p []byte) (n int, err error) { - rwl.mu.Lock() - defer rwl.mu.Unlock() - - n, err = rwl.file.Read(p) - return -} - -// LOCKS_EXCLUDED(rwl.mu) -func (rwl *readWriteLease) Write(p []byte) (n int, err error) { - rwl.mu.Lock() - defer rwl.mu.Unlock() - - // Ensure that we reconcile our size when we're done. - defer rwl.reconcileSize() - - // Call through. - n, err = rwl.file.Write(p) - - return -} - -// LOCKS_EXCLUDED(rwl.mu) -func (rwl *readWriteLease) Seek( - offset int64, - whence int) (off int64, err error) { - rwl.mu.Lock() - defer rwl.mu.Unlock() - - off, err = rwl.file.Seek(offset, whence) - return -} - -// LOCKS_EXCLUDED(rwl.mu) -func (rwl *readWriteLease) ReadAt(p []byte, off int64) (n int, err error) { - rwl.mu.Lock() - defer rwl.mu.Unlock() - - n, err = rwl.file.ReadAt(p, off) - return -} - -// LOCKS_EXCLUDED(rwl.mu) -func (rwl *readWriteLease) WriteAt(p []byte, off int64) (n int, err error) { - rwl.mu.Lock() - defer rwl.mu.Unlock() - - // Ensure that we reconcile our size when we're done. - defer rwl.reconcileSize() - - // Call through. - n, err = rwl.file.WriteAt(p, off) - - return -} - -// LOCKS_EXCLUDED(rwl.mu) -func (rwl *readWriteLease) Truncate(size int64) (err error) { - rwl.mu.Lock() - defer rwl.mu.Unlock() - - // Ensure that we reconcile our size when we're done. - defer rwl.reconcileSize() - - // Call through. - err = rwl.file.Truncate(size) - - return -} - -// LOCKS_EXCLUDED(rwl.mu) -func (rwl *readWriteLease) Size() (size int64, err error) { - rwl.mu.Lock() - defer rwl.mu.Unlock() - - size, err = rwl.sizeLocked() - return -} - -// LOCKS_EXCLUDED(rwl.mu) -func (rwl *readWriteLease) Downgrade() (rl ReadLease) { - rwl.mu.Lock() - defer rwl.mu.Unlock() - - // Ensure that we will crash if used again. - if rwl.leaser == nil { - panic("Nil leaser; already downgraded?") - } - - defer func() { - rwl.leaser = nil - rwl.file = nil - }() - - // Special case: if we don't know the file's current size, we can't reliably - // create a read lease wrapping the file, since we might be lying about its - // size. - // - // In this case, call through to the leaser as normal so it can update its - // bookkeeping, but discard its result in favor of a lease that ostensibly - // has the right size but whose contents cannot be read. - if rwl.fileSize < 0 { - rwl.leaser.downgrade(rwl.reportedSize, rwl.file) - rl = &alwaysRevokedReadLease{size: rwl.reportedSize} - return - } - - // Otherwise, just call through to the leaser. - rl = rwl.leaser.downgrade(rwl.fileSize, rwl.file) - - return -} - -//////////////////////////////////////////////////////////////////////// -// Helpers -//////////////////////////////////////////////////////////////////////// - -// LOCKS_REQUIRED(rwl.mu) -func (rwl *readWriteLease) checkInvariants() { - // Have we been dowgraded? - if rwl.file == nil { - return - } - - // INVARIANT: If fileSize >= 0, fileSize agrees with file.Stat() - if rwl.fileSize >= 0 { - fi, err := rwl.file.Stat() - if err != nil { - panic(fmt.Sprintf("Failed to stat file: %v", err)) - } - - if rwl.fileSize != fi.Size() { - panic(fmt.Sprintf("Size mismatch: %v vs. %v", rwl.fileSize, fi.Size())) - } - } - - // INVARIANT: fileSize < 0 || fileSize == reportedSize - if !(rwl.fileSize < 0 || rwl.fileSize == rwl.reportedSize) { - panic(fmt.Sprintf("Size mismatch: %v vs. %v", rwl.fileSize, rwl.reportedSize)) - } -} - -// LOCKS_REQUIRED(rwl.mu) -func (rwl *readWriteLease) sizeLocked() (size int64, err error) { - // Stat the file to get its size. - fi, err := rwl.file.Stat() - if err != nil { - err = fmt.Errorf("Stat: %v", err) - return - } - - size = fi.Size() - return -} - -// Notify the leaser if our size has changed. Log errors when we fail to find -// our size. -// -// LOCKS_REQUIRED(rwl.mu) -// LOCKS_EXCLUDED(rwl.leaser.mu) -func (rwl *readWriteLease) reconcileSize() { - var err error - - // If we fail to find the size, we must note that this happened. - defer func() { - if err != nil { - rwl.fileSize = -1 - } - }() - - // Find our size. - size, err := rwl.sizeLocked() - if err != nil { - log.Println("Error getting size for reconciliation:", err) - return - } - - // Let the leaser know about any change. - delta := size - rwl.reportedSize - if delta != 0 { - rwl.leaser.addReadWriteByteDelta(delta) - rwl.reportedSize = size - } - - // Update our view of the file's size. - rwl.fileSize = size -} - -//////////////////////////////////////////////////////////////////////// -// alwaysRevokedReadLease -//////////////////////////////////////////////////////////////////////// - -type alwaysRevokedReadLease struct { - size int64 -} - -func (rl *alwaysRevokedReadLease) Read(p []byte) (n int, err error) { - err = &RevokedError{} - return -} - -func (rl *alwaysRevokedReadLease) Seek( - offset int64, - whence int) (off int64, err error) { - err = &RevokedError{} - return -} - -func (rl *alwaysRevokedReadLease) ReadAt( - p []byte, off int64) (n int, err error) { - err = &RevokedError{} - return -} - -func (rl *alwaysRevokedReadLease) Size() (size int64) { - size = rl.size - return -} - -func (rl *alwaysRevokedReadLease) Revoked() (revoked bool) { - revoked = true - return -} - -func (rl *alwaysRevokedReadLease) Upgrade() (rwl ReadWriteLease, err error) { - err = &RevokedError{} - return -} - -func (rl *alwaysRevokedReadLease) Revoke() { -} diff --git a/mount.go b/mount.go index 567d555ff7..63184cf785 100644 --- a/mount.go +++ b/mount.go @@ -20,7 +20,6 @@ import ( "os" "golang.org/x/net/context" - "golang.org/x/sys/unix" "github.com/googlecloudplatform/gcsfuse/fs" "github.com/googlecloudplatform/gcsfuse/perms" @@ -55,23 +54,6 @@ func mount( } } - // The file leaser used by the file system sizes its limit on number of - // temporary files based on the process's rlimit. If this is too low, we'll - // throw away cached content unnecessarily often. This is particularly a - // problem on OS X, which has a crazy low default limit (256 as of OS X - // 10.10.3). So print a warning if the limit is low. - var rlimit unix.Rlimit - if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlimit); err == nil { - const reasonableLimit = 4096 - - if rlimit.Cur < reasonableLimit { - log.Printf( - "Warning: low file rlimit of %d will cause cached content to be "+ - "frequently evicted. Consider raising with `ulimit -n`.", - rlimit.Cur) - } - } - // Choose UID and GID. uid, gid, err := perms.MyUserAndGroup() if err != nil { @@ -101,18 +83,15 @@ func mount( // Create a file system server. serverCfg := &fs.ServerConfig{ - Clock: timeutil.RealClock(), - Bucket: bucket, - TempDir: flags.TempDir, - TempDirLimitNumFiles: fs.ChooseTempDirLimitNumFiles(), - TempDirLimitBytes: flags.TempDirLimit, - GCSChunkSize: flags.GCSChunkSize, - ImplicitDirectories: flags.ImplicitDirs, - DirTypeCacheTTL: flags.TypeCacheTTL, - Uid: uid, - Gid: gid, - FilePerms: os.FileMode(flags.FileMode), - DirPerms: os.FileMode(flags.DirMode), + Clock: timeutil.RealClock(), + Bucket: bucket, + TempDir: flags.TempDir, + ImplicitDirectories: flags.ImplicitDirs, + DirTypeCacheTTL: flags.TypeCacheTTL, + Uid: uid, + Gid: gid, + FilePerms: os.FileMode(flags.FileMode), + DirPerms: os.FileMode(flags.DirMode), AppendThreshold: 1 << 21, // 2 MiB, a total guess. TmpObjectPrefix: ".gcsfuse_tmp/", diff --git a/mutable/content.go b/mutable/content.go deleted file mode 100644 index fc8cb6d41b..0000000000 --- a/mutable/content.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutable - -import ( - "fmt" - "math" - "time" - - "github.com/googlecloudplatform/gcsfuse/lease" - "github.com/jacobsa/timeutil" - "golang.org/x/net/context" -) - -// A mutable view on some content. Created with an initial read-only view, -// which then can be modified by the user and read back. Keeps track of which -// portion of the content has been dirtied. -// -// External synchronization is required. -type Content interface { - // Panic if any internal invariants are violated. - CheckInvariants() - - // Destroy any state used by the object, putting it into an indeterminate - // state. The object must not be used again. - Destroy() - - // If the content has been dirtied from its initial state, return a - // read/write lease for the current content. Otherwise return nil. - // - // If this method returns a non-nil read/write lease, the Content is - // implicitly destroyed and must not be used again. - Release() (rwl lease.ReadWriteLease) - - // Read part of the content, with semantics equivalent to io.ReaderAt aside - // from context support. - ReadAt(ctx context.Context, buf []byte, offset int64) (n int, err error) - - // Return information about the current state of the content. - Stat(ctx context.Context) (sr StatResult, err error) - - // Write into the content, with semantics equivalent to io.WriterAt aside from - // context support. - WriteAt(ctx context.Context, buf []byte, offset int64) (n int, err error) - - // Truncate our the content to the given number of bytes, extending if n is - // greater than the current size. - Truncate(ctx context.Context, n int64) (err error) -} - -type StatResult struct { - // The current size in bytes of the content. - Size int64 - - // It is guaranteed that all bytes in the range [0, DirtyThreshold) are - // unmodified from the original content with which the mutable content object - // was created. - DirtyThreshold int64 - - // The time at which the content was last updated, or nil if we've never - // changed it. - Mtime *time.Time -} - -// Create a mutable content object whose initial contents are given by the -// supplied read proxy. -func NewContent( - initialContent lease.ReadProxy, - clock timeutil.Clock) (mc Content) { - mc = &mutableContent{ - clock: clock, - initialContent: initialContent, - dirtyThreshold: initialContent.Size(), - } - - return -} - -type mutableContent struct { - ///////////////////////// - // Dependencies - ///////////////////////// - - clock timeutil.Clock - - ///////////////////////// - // Mutable state - ///////////////////////// - - destroyed bool - - // The initial contents with which this object was created, or nil if it has - // been dirtied. - // - // INVARIANT: When non-nil, initialContent.CheckInvariants() does not panic. - initialContent lease.ReadProxy - - // When dirty, a read/write lease containing our current contents. When - // clean, nil. - // - // INVARIANT: (initialContent == nil) != (readWriteLease == nil) - readWriteLease lease.ReadWriteLease - - // The lowest byte index that has been modified from the initial contents. - // - // INVARIANT: initialContent != nil => dirtyThreshold == initialContent.Size() - dirtyThreshold int64 - - // The time at which a method that modifies our contents was last called, or - // nil if never. - // - // INVARIANT: If dirty(), then mtime != nil - mtime *time.Time -} - -//////////////////////////////////////////////////////////////////////// -// Public interface -//////////////////////////////////////////////////////////////////////// - -func (mc *mutableContent) CheckInvariants() { - if mc.destroyed { - panic("Use of destroyed mutableContent object.") - } - - // INVARIANT: When non-nil, initialContent.CheckInvariants() does not panic. - if mc.initialContent != nil { - mc.initialContent.CheckInvariants() - } - - // INVARIANT: (initialContent == nil) != (readWriteLease == nil) - if mc.initialContent == nil && mc.readWriteLease == nil { - panic("Both initialContent and readWriteLease are nil") - } - - if mc.initialContent != nil && mc.readWriteLease != nil { - panic("Both initialContent and readWriteLease are non-nil") - } - - // INVARIANT: If dirty(), then mtime != nil - if mc.dirty() && mc.mtime == nil { - panic("Expected non-nil mtime.") - } - - // INVARIANT: initialContent != nil => dirtyThreshold == initialContent.Size() - if mc.initialContent != nil { - if mc.dirtyThreshold != mc.initialContent.Size() { - panic(fmt.Sprintf( - "Dirty threshold mismatch: %d vs. %d", - mc.dirtyThreshold, - mc.initialContent.Size())) - } - } -} - -func (mc *mutableContent) Destroy() { - mc.destroyed = true - - if mc.initialContent != nil { - mc.initialContent.Destroy() - mc.initialContent = nil - } - - if mc.readWriteLease != nil { - mc.readWriteLease.Downgrade().Revoke() - mc.readWriteLease = nil - } -} - -func (mc *mutableContent) Release() (rwl lease.ReadWriteLease) { - if !mc.dirty() { - return - } - - rwl = mc.readWriteLease - mc.readWriteLease = nil - mc.Destroy() - - return -} - -func (mc *mutableContent) ReadAt( - ctx context.Context, - buf []byte, - offset int64) (n int, err error) { - // Serve from the appropriate place. - if mc.dirty() { - n, err = mc.readWriteLease.ReadAt(buf, offset) - } else { - n, err = mc.initialContent.ReadAt(ctx, buf, offset) - } - - return -} - -func (mc *mutableContent) Stat( - ctx context.Context) (sr StatResult, err error) { - sr.DirtyThreshold = mc.dirtyThreshold - sr.Mtime = mc.mtime - - // Get the size from the appropriate place. - if mc.dirty() { - sr.Size, err = mc.readWriteLease.Size() - if err != nil { - return - } - } else { - sr.Size = mc.initialContent.Size() - } - - return -} - -func (mc *mutableContent) WriteAt( - ctx context.Context, - buf []byte, - offset int64) (n int, err error) { - // Make sure we have a read/write lease. - if err = mc.ensureReadWriteLease(ctx); err != nil { - err = fmt.Errorf("ensureReadWriteLease: %v", err) - return - } - - // Update our state regarding being dirty. - mc.dirtyThreshold = minInt64(mc.dirtyThreshold, offset) - - newMtime := mc.clock.Now() - mc.mtime = &newMtime - - // Call through. - n, err = mc.readWriteLease.WriteAt(buf, offset) - - return -} - -func (mc *mutableContent) Truncate( - ctx context.Context, - n int64) (err error) { - // Make sure we have a read/write lease. - if err = mc.ensureReadWriteLease(ctx); err != nil { - err = fmt.Errorf("ensureReadWriteLease: %v", err) - return - } - - // Convert to signed, which is what lease.ReadWriteLease wants. - if n > math.MaxInt64 { - err = fmt.Errorf("Illegal offset: %v", n) - return - } - - // Update our state regarding being dirty. - mc.dirtyThreshold = minInt64(mc.dirtyThreshold, n) - - newMtime := mc.clock.Now() - mc.mtime = &newMtime - - // Call through. - err = mc.readWriteLease.Truncate(int64(n)) - - return -} - -//////////////////////////////////////////////////////////////////////// -// Helpers -//////////////////////////////////////////////////////////////////////// - -func minInt64(a int64, b int64) int64 { - if a < b { - return a - } - - return b -} - -func (mc *mutableContent) dirty() bool { - return mc.readWriteLease != nil -} - -// Ensure that mc.readWriteLease is non-nil with an authoritative view of mc's -// contents. -func (mc *mutableContent) ensureReadWriteLease( - ctx context.Context) (err error) { - // Is there anything to do? - if mc.readWriteLease != nil { - return - } - - // Set up the read/write lease. - rwl, err := mc.initialContent.Upgrade(ctx) - if err != nil { - err = fmt.Errorf("initialContent.Upgrade: %v", err) - return - } - - mc.readWriteLease = rwl - mc.initialContent = nil - - return -} diff --git a/mutable/content_test.go b/mutable/content_test.go deleted file mode 100644 index 2f67f3e5c3..0000000000 --- a/mutable/content_test.go +++ /dev/null @@ -1,550 +0,0 @@ -// Copyright 2015 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mutable_test - -import ( - "errors" - "fmt" - "testing" - "time" - - "github.com/googlecloudplatform/gcsfuse/lease" - "github.com/googlecloudplatform/gcsfuse/lease/mock_lease" - "github.com/googlecloudplatform/gcsfuse/mutable" - . "github.com/jacobsa/oglematchers" - . "github.com/jacobsa/oglemock" - . "github.com/jacobsa/ogletest" - "github.com/jacobsa/timeutil" - "golang.org/x/net/context" -) - -func TestContent(t *testing.T) { RunTests(t) } - -//////////////////////////////////////////////////////////////////////// -// Helpers -//////////////////////////////////////////////////////////////////////// - -func bufferIs(buf []byte) Matcher { - return NewMatcher( - func(candidate interface{}) error { - p := candidate.([]byte) - - // Compare. - if &buf[0] != &p[0] { - return fmt.Errorf( - "Differing first bytes: %p vs. %p", - &buf[0], - &p[0]) - } - - if len(buf) != len(p) { - return fmt.Errorf( - "Differing lengths: %d vs. %d", - len(buf), - len(p)) - } - - return nil - }, - fmt.Sprintf("Buffer matches")) -} - -//////////////////////////////////////////////////////////////////////// -// Invariant-checking mutable content -//////////////////////////////////////////////////////////////////////// - -// A wrapper around a Content that calls CheckInvariants whenever invariants -// should hold. For catching logic errors early in the test. -type checkingContent struct { - ctx context.Context - wrapped mutable.Content -} - -func (mc *checkingContent) Stat() (mutable.StatResult, error) { - mc.wrapped.CheckInvariants() - defer mc.wrapped.CheckInvariants() - return mc.wrapped.Stat(mc.ctx) -} - -func (mc *checkingContent) ReadAt(b []byte, o int64) (int, error) { - mc.wrapped.CheckInvariants() - defer mc.wrapped.CheckInvariants() - return mc.wrapped.ReadAt(mc.ctx, b, o) -} - -func (mc *checkingContent) WriteAt(b []byte, o int64) (int, error) { - mc.wrapped.CheckInvariants() - defer mc.wrapped.CheckInvariants() - return mc.wrapped.WriteAt(mc.ctx, b, o) -} - -func (mc *checkingContent) Truncate(n int64) error { - mc.wrapped.CheckInvariants() - defer mc.wrapped.CheckInvariants() - return mc.wrapped.Truncate(mc.ctx, n) -} - -func (mc *checkingContent) Destroy() { - mc.wrapped.CheckInvariants() - mc.wrapped.Destroy() -} - -func (mc *checkingContent) Release() (rwl lease.ReadWriteLease) { - mc.wrapped.CheckInvariants() - return mc.wrapped.Release() -} - -//////////////////////////////////////////////////////////////////////// -// Boilerplate -//////////////////////////////////////////////////////////////////////// - -const initialContentSize = 11 - -type mutableContentTest struct { - ctx context.Context - - initialContent mock_lease.MockReadProxy - rwl mock_lease.MockReadWriteLease - clock timeutil.SimulatedClock - - mc checkingContent -} - -var _ SetUpInterface = &mutableContentTest{} - -func (t *mutableContentTest) SetUp(ti *TestInfo) { - t.ctx = ti.Ctx - - // Set up the mock initial contents, including a default size. - t.initialContent = mock_lease.NewMockReadProxy( - ti.MockController, - "initialContent") - - ExpectCall(t.initialContent, "Size")(). - WillRepeatedly(Return(initialContentSize)) - - // Set up a mock read/write lease. - t.rwl = mock_lease.NewMockReadWriteLease( - ti.MockController, - "rwl") - - // Ignore uninteresting calls. - ExpectCall(t.initialContent, "CheckInvariants")(). - WillRepeatedly(Return()) - - // Set up the clock. - t.clock.SetTime(time.Date(2012, 8, 15, 22, 56, 0, 0, time.Local)) - - // And the mutable content. - t.mc.ctx = ti.Ctx - t.mc.wrapped = mutable.NewContent( - t.initialContent, - &t.clock) -} - -//////////////////////////////////////////////////////////////////////// -// Clean state -//////////////////////////////////////////////////////////////////////// - -type CleanTest struct { - mutableContentTest -} - -func init() { RegisterTestSuite(&CleanTest{}) } - -func (t *CleanTest) ReadAt_CallsProxy() { - buf := make([]byte, 1) - - // Proxy - ExpectCall(t.initialContent, "ReadAt")(t.ctx, bufferIs(buf), 17). - WillOnce(Return(0, errors.New(""))) - - // Call - t.mc.ReadAt(buf, 17) -} - -func (t *CleanTest) ReadAt_ProxyFails() { - // Proxy - ExpectCall(t.initialContent, "ReadAt")(Any(), Any(), Any()). - WillOnce(Return(17, errors.New("taco"))) - - // Call - n, err := t.mc.ReadAt(make([]byte, 1), 0) - - ExpectEq(17, n) - ExpectThat(err, Error(HasSubstr("taco"))) -} - -func (t *CleanTest) ReadAt_ProxySuceeds() { - // Proxy - ExpectCall(t.initialContent, "ReadAt")(Any(), Any(), Any()). - WillOnce(Return(17, nil)) - - // Call - n, err := t.mc.ReadAt(make([]byte, 1), 0) - - ExpectEq(17, n) - ExpectEq(nil, err) -} - -func (t *CleanTest) Stat() { - sr, err := t.mc.Stat() - - AssertEq(nil, err) - ExpectEq(initialContentSize, sr.Size) - ExpectEq(initialContentSize, sr.DirtyThreshold) - ExpectEq(nil, sr.Mtime) -} - -func (t *CleanTest) WriteAt_UpgradeFails() { - // Upgrade - ExpectCall(t.initialContent, "Upgrade")(Any()). - WillOnce(Return(nil, errors.New("taco"))) - - // Call - _, err := t.mc.WriteAt(make([]byte, 1), 0) - - ExpectThat(err, Error(HasSubstr("Upgrade"))) - ExpectThat(err, Error(HasSubstr("taco"))) -} - -func (t *CleanTest) WriteAt_UpgradeSucceeds() { - // Upgrade -- succeed. - ExpectCall(t.initialContent, "Upgrade")(Any()). - WillOnce(Return(t.rwl, nil)) - - // The read/write lease should be called. - ExpectCall(t.rwl, "WriteAt")(Any(), 17). - WillOnce(Return(0, errors.New(""))) - - // Call. - t.mc.WriteAt(make([]byte, 1), 17) - - // A further call should go right through to the read/write lease again. - ExpectCall(t.rwl, "WriteAt")(Any(), 19). - WillOnce(Return(0, errors.New(""))) - - t.mc.WriteAt(make([]byte, 1), 19) -} - -func (t *CleanTest) Truncate_UpgradeFails() { - // Upgrade - ExpectCall(t.initialContent, "Upgrade")(Any()). - WillOnce(Return(nil, errors.New("taco"))) - - // Call - err := t.mc.Truncate(0) - - ExpectThat(err, Error(HasSubstr("Upgrade"))) - ExpectThat(err, Error(HasSubstr("taco"))) -} - -func (t *CleanTest) Truncate_UpgradeSucceeds() { - // Upgrade -- succeed. - ExpectCall(t.initialContent, "Upgrade")(Any()). - WillOnce(Return(t.rwl, nil)) - - // The read/write lease should be called. - ExpectCall(t.rwl, "Truncate")(17). - WillOnce(Return(errors.New(""))) - - // Call. - t.mc.Truncate(17) - - // A further call should go right through to the read/write lease again. - ExpectCall(t.rwl, "Truncate")(19). - WillOnce(Return(errors.New(""))) - - t.mc.Truncate(19) -} - -func (t *CleanTest) Release() { - rwl := t.mc.Release() - ExpectEq(nil, rwl) -} - -//////////////////////////////////////////////////////////////////////// -// Dirty state -//////////////////////////////////////////////////////////////////////// - -type DirtyTest struct { - mutableContentTest - - setUpTime time.Time -} - -func init() { RegisterTestSuite(&DirtyTest{}) } - -func (t *DirtyTest) SetUp(ti *TestInfo) { - t.mutableContentTest.SetUp(ti) - t.setUpTime = t.clock.Now() - - // Simulate a successful upgrade. - ExpectCall(t.initialContent, "Upgrade")(Any()). - WillOnce(Return(t.rwl, nil)) - - ExpectCall(t.rwl, "Truncate")(Any()). - WillOnce(Return(nil)) - - err := t.mc.Truncate(initialContentSize) - AssertEq(nil, err) - - // Change the time. - t.clock.AdvanceTime(time.Second) -} - -func (t *DirtyTest) ReadAt_CallsLease() { - buf := make([]byte, 4) - const offset = 17 - - // Lease - ExpectCall(t.rwl, "ReadAt")(bufferIs(buf), offset). - WillOnce(Return(0, errors.New(""))) - - // Call - t.mc.ReadAt(buf, offset) -} - -func (t *DirtyTest) ReadAt_LeaseFails() { - // Lease - ExpectCall(t.rwl, "ReadAt")(Any(), Any()). - WillOnce(Return(13, errors.New("taco"))) - - // Call - n, err := t.mc.ReadAt([]byte{}, 0) - - ExpectEq(13, n) - ExpectThat(err, Error(HasSubstr("taco"))) -} - -func (t *DirtyTest) ReadAt_LeaseSuceeds() { - // Lease - ExpectCall(t.rwl, "ReadAt")(Any(), Any()). - WillOnce(Return(13, nil)) - - // Call - n, err := t.mc.ReadAt([]byte{}, 0) - - ExpectEq(13, n) - ExpectEq(nil, err) -} - -func (t *DirtyTest) Stat_LeaseFails() { - // Lease - ExpectCall(t.rwl, "Size")(). - WillOnce(Return(0, errors.New("taco"))) - - // Call - _, err := t.mc.Stat() - ExpectThat(err, Error(HasSubstr("taco"))) -} - -func (t *DirtyTest) Stat_LeaseSucceeds() { - // Lease - ExpectCall(t.rwl, "Size")(). - WillOnce(Return(17, nil)) - - // Call - sr, err := t.mc.Stat() - AssertEq(nil, err) - - // Check the initial state. - ExpectEq(17, sr.Size) - ExpectEq(initialContentSize, sr.DirtyThreshold) - ExpectThat(sr.Mtime, Pointee(timeutil.TimeEq(t.setUpTime))) -} - -func (t *DirtyTest) WriteAt_CallsLease() { - buf := make([]byte, 4) - const offset = 17 - - // Lease - ExpectCall(t.rwl, "WriteAt")(bufferIs(buf), offset). - WillOnce(Return(0, errors.New(""))) - - // Call - t.mc.WriteAt(buf, offset) -} - -func (t *DirtyTest) WriteAt_LeaseFails() { - const offset = initialContentSize - 2 - - // Lease - ExpectCall(t.rwl, "WriteAt")(Any(), Any()). - WillOnce(Return(13, errors.New("taco"))) - - // Call - n, err := t.mc.WriteAt([]byte{}, offset) - - ExpectEq(13, n) - ExpectThat(err, Error(HasSubstr("taco"))) - - // The dirty threshold and mtime should have been updated. - ExpectCall(t.rwl, "Size")(). - WillRepeatedly(Return(initialContentSize, nil)) - - sr, err := t.mc.Stat() - AssertEq(nil, err) - ExpectEq(offset, sr.DirtyThreshold) - ExpectThat(sr.Mtime, Pointee(timeutil.TimeEq(t.clock.Now()))) -} - -func (t *DirtyTest) WriteAt_LeaseSucceeds() { - const offset = initialContentSize - 2 - - // Lease - ExpectCall(t.rwl, "WriteAt")(Any(), Any()). - WillOnce(Return(13, nil)) - - // Call - n, err := t.mc.WriteAt([]byte{}, offset) - - ExpectEq(13, n) - ExpectEq(nil, err) - - // The dirty threshold and mtime should have been updated. - ExpectCall(t.rwl, "Size")(). - WillRepeatedly(Return(initialContentSize, nil)) - - sr, err := t.mc.Stat() - AssertEq(nil, err) - ExpectEq(offset, sr.DirtyThreshold) - ExpectThat(sr.Mtime, Pointee(timeutil.TimeEq(t.clock.Now()))) -} - -func (t *DirtyTest) WriteAt_DirtyThreshold() { - var sr mutable.StatResult - var err error - - // Simulate successful writes and size requests. - ExpectCall(t.rwl, "WriteAt")(Any(), Any()). - WillRepeatedly(Return(0, nil)) - - ExpectCall(t.rwl, "Size")(). - WillRepeatedly(Return(100, nil)) - - // Writing at the end of the initial content should not affect the dirty - // threshold. - _, err = t.mc.WriteAt([]byte{}, initialContentSize) - AssertEq(nil, err) - - sr, err = t.mc.Stat() - AssertEq(nil, err) - ExpectEq(initialContentSize, sr.DirtyThreshold) - - // Nor should writing past the end. - _, err = t.mc.WriteAt([]byte{}, initialContentSize+100) - AssertEq(nil, err) - - sr, err = t.mc.Stat() - AssertEq(nil, err) - ExpectEq(initialContentSize, sr.DirtyThreshold) - - // But writing before the end should. - _, err = t.mc.WriteAt([]byte{}, initialContentSize-1) - AssertEq(nil, err) - - sr, err = t.mc.Stat() - AssertEq(nil, err) - ExpectEq(initialContentSize-1, sr.DirtyThreshold) -} - -func (t *DirtyTest) Truncate_CallsLease() { - // Lease - ExpectCall(t.rwl, "Truncate")(17). - WillOnce(Return(errors.New(""))) - - // Call - t.mc.Truncate(17) -} - -func (t *DirtyTest) Truncate_LeaseFails() { - // Lease - ExpectCall(t.rwl, "Truncate")(Any()). - WillOnce(Return(errors.New("taco"))) - - // Call - err := t.mc.Truncate(1) - ExpectThat(err, Error(HasSubstr("taco"))) - - // The dirty threshold and mtime should have been updated. - ExpectCall(t.rwl, "Size")(). - WillRepeatedly(Return(0, nil)) - - sr, err := t.mc.Stat() - AssertEq(nil, err) - ExpectEq(1, sr.DirtyThreshold) - ExpectThat(sr.Mtime, Pointee(timeutil.TimeEq(t.clock.Now()))) -} - -func (t *DirtyTest) Truncate_LeaseSucceeds() { - // Lease - ExpectCall(t.rwl, "Truncate")(Any()). - WillOnce(Return(nil)) - - // Call - err := t.mc.Truncate(1) - ExpectEq(nil, err) - - // The dirty threshold and mtime should have been updated. - ExpectCall(t.rwl, "Size")(). - WillRepeatedly(Return(0, nil)) - - sr, err := t.mc.Stat() - AssertEq(nil, err) - ExpectEq(1, sr.DirtyThreshold) - ExpectThat(sr.Mtime, Pointee(timeutil.TimeEq(t.clock.Now()))) -} - -func (t *DirtyTest) Truncate_DirtyThreshold() { - var sr mutable.StatResult - var err error - - // Simulate successful truncations and size requests. - ExpectCall(t.rwl, "Truncate")(Any()). - WillRepeatedly(Return(nil)) - - ExpectCall(t.rwl, "Size")(). - WillRepeatedly(Return(100, nil)) - - // Truncating to the same size should not affect the dirty threshold. - err = t.mc.Truncate(initialContentSize) - AssertEq(nil, err) - - sr, err = t.mc.Stat() - AssertEq(nil, err) - ExpectEq(initialContentSize, sr.DirtyThreshold) - - // Nor should truncating upward. - err = t.mc.Truncate(initialContentSize + 100) - AssertEq(nil, err) - - sr, err = t.mc.Stat() - AssertEq(nil, err) - ExpectEq(initialContentSize, sr.DirtyThreshold) - - // But truncating downward should. - err = t.mc.Truncate(initialContentSize - 1) - AssertEq(nil, err) - - sr, err = t.mc.Stat() - AssertEq(nil, err) - ExpectEq(initialContentSize-1, sr.DirtyThreshold) -} - -func (t *DirtyTest) Release() { - rwl := t.mc.Release() - ExpectEq(t.rwl, rwl) -} diff --git a/mutable/mock/mock_content.go b/mutable/mock/mock_content.go deleted file mode 100644 index 448870a5d1..0000000000 --- a/mutable/mock/mock_content.go +++ /dev/null @@ -1,217 +0,0 @@ -// This file was auto-generated using createmock. See the following page for -// more information: -// -// https://github.com/jacobsa/oglemock -// - -package mock_mutable - -import ( - fmt "fmt" - lease "github.com/googlecloudplatform/gcsfuse/lease" - mutable "github.com/googlecloudplatform/gcsfuse/mutable" - oglemock "github.com/jacobsa/oglemock" - context "golang.org/x/net/context" - runtime "runtime" - unsafe "unsafe" -) - -type MockContent interface { - mutable.Content - oglemock.MockObject -} - -type mockContent struct { - controller oglemock.Controller - description string -} - -func NewMockContent( - c oglemock.Controller, - desc string) MockContent { - return &mockContent{ - controller: c, - description: desc, - } -} - -func (m *mockContent) Oglemock_Id() uintptr { - return uintptr(unsafe.Pointer(m)) -} - -func (m *mockContent) Oglemock_Description() string { - return m.description -} - -func (m *mockContent) CheckInvariants() { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "CheckInvariants", - file, - line, - []interface{}{}) - - if len(retVals) != 0 { - panic(fmt.Sprintf("mockContent.CheckInvariants: invalid return values: %v", retVals)) - } - - return -} - -func (m *mockContent) Destroy() { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Destroy", - file, - line, - []interface{}{}) - - if len(retVals) != 0 { - panic(fmt.Sprintf("mockContent.Destroy: invalid return values: %v", retVals)) - } - - return -} - -func (m *mockContent) ReadAt(p0 context.Context, p1 []uint8, p2 int64) (o0 int, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "ReadAt", - file, - line, - []interface{}{p0, p1, p2}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockContent.ReadAt: invalid return values: %v", retVals)) - } - - // o0 int - if retVals[0] != nil { - o0 = retVals[0].(int) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} - -func (m *mockContent) Release() (o0 lease.ReadWriteLease) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Release", - file, - line, - []interface{}{}) - - if len(retVals) != 1 { - panic(fmt.Sprintf("mockContent.Release: invalid return values: %v", retVals)) - } - - // o0 lease.ReadWriteLease - if retVals[0] != nil { - o0 = retVals[0].(lease.ReadWriteLease) - } - - return -} - -func (m *mockContent) Stat(p0 context.Context) (o0 mutable.StatResult, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Stat", - file, - line, - []interface{}{p0}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockContent.Stat: invalid return values: %v", retVals)) - } - - // o0 mutable.StatResult - if retVals[0] != nil { - o0 = retVals[0].(mutable.StatResult) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -} - -func (m *mockContent) Truncate(p0 context.Context, p1 int64) (o0 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "Truncate", - file, - line, - []interface{}{p0, p1}) - - if len(retVals) != 1 { - panic(fmt.Sprintf("mockContent.Truncate: invalid return values: %v", retVals)) - } - - // o0 error - if retVals[0] != nil { - o0 = retVals[0].(error) - } - - return -} - -func (m *mockContent) WriteAt(p0 context.Context, p1 []uint8, p2 int64) (o0 int, o1 error) { - // Get a file name and line number for the caller. - _, file, line, _ := runtime.Caller(1) - - // Hand the call off to the controller, which does most of the work. - retVals := m.controller.HandleMethodCall( - m, - "WriteAt", - file, - line, - []interface{}{p0, p1, p2}) - - if len(retVals) != 2 { - panic(fmt.Sprintf("mockContent.WriteAt: invalid return values: %v", retVals)) - } - - // o0 int - if retVals[0] != nil { - o0 = retVals[0].(int) - } - - // o1 error - if retVals[1] != nil { - o1 = retVals[1].(error) - } - - return -}