From 0ebd6c3e1e01b60ea5126141bbbdb7051fefd55b Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Thu, 29 Jun 2017 07:15:41 +0200 Subject: [PATCH 01/46] lowered the default values --- chunk/storage.go | 4 +--- main.go | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/chunk/storage.go b/chunk/storage.go index a91e56a..18624e9 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -85,7 +85,7 @@ func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]b res := make(chan []byte) go func() { - for { + for _ = range time.Tick(time.Millisecond) { s.tocLock.Lock() _, exists := s.toc[id] s.tocLock.Unlock() @@ -102,8 +102,6 @@ func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]b return } } - - time.Sleep(10 * time.Millisecond) } }() diff --git a/main.go b/main.go index 31c952e..e5102b8 100644 --- a/main.go +++ b/main.go @@ -45,8 +45,8 @@ func main() { argMongoDatabase := flag.String("mongo-database", "plexdrive", "MongoDB database") argChunkSize := flag.String("chunk-size", "5M", "The size of each chunk that is downloaded (units: B, K, M, G)") argChunkLoadThreads := flag.Int("chunk-load-threads", runtime.NumCPU(), "The number of threads to use for downloading chunks") - argChunkLoadAhead := flag.Int("chunk-load-ahead", (runtime.NumCPU()/2)-1, "The number of chunks that should be read ahead") - argChunkLoadTimeout := flag.Duration("chunk-load-timeout", 30*time.Second, "Duration to wait for a chunk to be loaded") + argChunkLoadAhead := flag.Int("chunk-load-ahead", 2, "The number of chunks that should be read ahead") + argChunkLoadTimeout := flag.Duration("chunk-load-timeout", time.Minute, "Duration to wait for a chunk to be loaded") argChunkLoadRetries := flag.Int("chunk-load-retries", 3, "Number of retries to load a chunk") argMaxChunks := flag.Int("max-chunks", 10, "The maximum number of chunks to be stored on disk") argRefreshInterval := flag.Duration("refresh-interval", 5*time.Minute, "The time to wait till checking for changes") From d9c642fcb30cd451f12ae3c51f96c34fd764bf5d Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Thu, 29 Jun 2017 19:53:29 +0200 Subject: [PATCH 02/46] changed defaults --- chunk/manager.go | 4 ++-- main.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/chunk/manager.go b/chunk/manager.go index 5ab9650..b931bf1 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -69,8 +69,8 @@ func NewManager( Timeout: timeout, TimeoutRetries: timeoutRetries, downloader: downloader, - queue: make(chan *Request, threads*100), - preloadQueue: make(chan *Request, threads*loadAhead*100), + queue: make(chan *Request, 20), + preloadQueue: make(chan *Request, loadAhead*20), storage: NewStorage(chunkPath, chunkSize, maxChunks), } diff --git a/main.go b/main.go index e5102b8..be4c995 100644 --- a/main.go +++ b/main.go @@ -43,7 +43,7 @@ func main() { argMongoUser := flag.String("mongo-user", "", "MongoDB username") argMongoPass := flag.String("mongo-password", "", "MongoDB password") argMongoDatabase := flag.String("mongo-database", "plexdrive", "MongoDB database") - argChunkSize := flag.String("chunk-size", "5M", "The size of each chunk that is downloaded (units: B, K, M, G)") + argChunkSize := flag.String("chunk-size", "2M", "The size of each chunk that is downloaded (units: B, K, M, G)") argChunkLoadThreads := flag.Int("chunk-load-threads", runtime.NumCPU(), "The number of threads to use for downloading chunks") argChunkLoadAhead := flag.Int("chunk-load-ahead", 2, "The number of chunks that should be read ahead") argChunkLoadTimeout := flag.Duration("chunk-load-timeout", time.Minute, "Duration to wait for a chunk to be loaded") From c233a3f7ff3457d14c64bf4d00adc7184dfe85ee Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Thu, 29 Jun 2017 20:38:15 +0200 Subject: [PATCH 03/46] changed defaults --- chunk/manager.go | 4 ++-- chunk/storage.go | 6 +++++- main.go | 4 ++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/chunk/manager.go b/chunk/manager.go index b931bf1..b4b6fc3 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -69,8 +69,8 @@ func NewManager( Timeout: timeout, TimeoutRetries: timeoutRetries, downloader: downloader, - queue: make(chan *Request, 20), - preloadQueue: make(chan *Request, loadAhead*20), + queue: make(chan *Request, 100), + preloadQueue: make(chan *Request, 100), storage: NewStorage(chunkPath, chunkSize, maxChunks), } diff --git a/chunk/storage.go b/chunk/storage.go index 18624e9..f889332 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -85,7 +85,7 @@ func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]b res := make(chan []byte) go func() { - for _ = range time.Tick(time.Millisecond) { + for { s.tocLock.Lock() _, exists := s.toc[id] s.tocLock.Unlock() @@ -93,15 +93,19 @@ func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]b bytes, exists := s.loadFromRAM(id, offset, size) if exists { res <- bytes + close(res) return } bytes, exists = s.loadFromDisk(id, offset, size) if exists { res <- bytes + close(res) return } } + + time.Sleep(10 * time.Millisecond) } }() diff --git a/main.go b/main.go index be4c995..c2ad641 100644 --- a/main.go +++ b/main.go @@ -43,10 +43,10 @@ func main() { argMongoUser := flag.String("mongo-user", "", "MongoDB username") argMongoPass := flag.String("mongo-password", "", "MongoDB password") argMongoDatabase := flag.String("mongo-database", "plexdrive", "MongoDB database") - argChunkSize := flag.String("chunk-size", "2M", "The size of each chunk that is downloaded (units: B, K, M, G)") + argChunkSize := flag.String("chunk-size", "5M", "The size of each chunk that is downloaded (units: B, K, M, G)") argChunkLoadThreads := flag.Int("chunk-load-threads", runtime.NumCPU(), "The number of threads to use for downloading chunks") argChunkLoadAhead := flag.Int("chunk-load-ahead", 2, "The number of chunks that should be read ahead") - argChunkLoadTimeout := flag.Duration("chunk-load-timeout", time.Minute, "Duration to wait for a chunk to be loaded") + argChunkLoadTimeout := flag.Duration("chunk-load-timeout", 10*time.Second, "Duration to wait for a chunk to be loaded") argChunkLoadRetries := flag.Int("chunk-load-retries", 3, "Number of retries to load a chunk") argMaxChunks := flag.Int("max-chunks", 10, "The maximum number of chunks to be stored on disk") argRefreshInterval := flag.Duration("refresh-interval", 5*time.Minute, "The time to wait till checking for changes") From d858db59d632884d88bd531e368842c7f103f7a7 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 30 Jun 2017 07:08:18 +0200 Subject: [PATCH 04/46] download error detection --- chunk/manager.go | 1 + chunk/storage.go | 34 +++++++++++++++++++++++++++++----- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/chunk/manager.go b/chunk/manager.go index b4b6fc3..a218a8c 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -147,6 +147,7 @@ func (m *Manager) checkChunk(req *Request) { bytes, err := m.downloader.Download(req) if nil != err { Log.Warningf("%v", err) + m.storage.Error(req.id, err) } if err := m.storage.Store(req.id, bytes); nil != err { diff --git a/chunk/storage.go b/chunk/storage.go index f889332..51da437 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -24,7 +24,7 @@ type Storage struct { queue chan *Item chunks map[string][]byte chunksLock sync.Mutex - toc map[string]bool + toc map[string]error tocLock sync.Mutex stack *Stack } @@ -41,7 +41,7 @@ func NewStorage(chunkPath string, chunkSize int64, maxChunks int) *Storage { MaxChunks: maxChunks, queue: make(chan *Item, 100), chunks: make(map[string][]byte), - toc: make(map[string]bool), + toc: make(map[string]error), stack: NewStack(), } @@ -63,7 +63,7 @@ func (s *Storage) ExistsOrCreate(id string) bool { s.tocLock.Unlock() return true } - s.toc[id] = true + s.toc[id] = nil s.tocLock.Unlock() return false } @@ -81,18 +81,26 @@ func (s *Storage) Store(id string, bytes []byte) error { return nil } +func (s *Storage) Error(id string, err error) { + s.tocLock.Lock() + s.toc[id] = err + s.tocLock.Unlock() +} + func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]byte, error) { res := make(chan []byte) + ec := make(chan error) go func() { for { s.tocLock.Lock() - _, exists := s.toc[id] + err, exists := s.toc[id] s.tocLock.Unlock() - if exists { + if nil == err && exists { bytes, exists := s.loadFromRAM(id, offset, size) if exists { res <- bytes + close(ec) close(res) return } @@ -100,9 +108,15 @@ func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]b bytes, exists = s.loadFromDisk(id, offset, size) if exists { res <- bytes + close(ec) close(res) return } + } else if nil != err { + ec <- err + close(ec) + close(res) + return } time.Sleep(10 * time.Millisecond) @@ -112,7 +126,11 @@ func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]b select { case r := <-res: return r, nil + case err := <-ec: + s.deleteFromToc(id) + return nil, err case <-time.After(timeout): + s.deleteFromToc(id) return nil, TIMEOUT } } @@ -126,6 +144,12 @@ func (s *Storage) thread() { } } +func (s *Storage) deleteFromToc(id string) { + s.tocLock.Lock() + delete(s.toc, id) + s.tocLock.Unlock() +} + func (s *Storage) loadFromRAM(id string, offset, size int64) ([]byte, bool) { s.chunksLock.Lock() bytes, exists := s.chunks[id] From 08fbe9d678c0d7f0760b555b6260d6a5cd431505 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 30 Jun 2017 09:39:49 +0200 Subject: [PATCH 05/46] retry on 500 error --- chunk/download.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/chunk/download.go b/chunk/download.go index b0b9298..a090305 100644 --- a/chunk/download.go +++ b/chunk/download.go @@ -55,7 +55,7 @@ func downloadFromAPI(client *http.Client, request *Request, delay int64) ([]byte reader := res.Body if res.StatusCode != 206 { - if res.StatusCode != 403 { + if res.StatusCode != 403 && res.StatusCode != 500 { Log.Debugf("Request\n----------\n%v\n----------\n", req) Log.Debugf("Response\n----------\n%v\n----------\n", res) return nil, fmt.Errorf("Wrong status code %v", res.StatusCode) @@ -68,7 +68,7 @@ func downloadFromAPI(client *http.Client, request *Request, delay int64) ([]byte bytes, err := ioutil.ReadAll(reader) if nil != err { Log.Debugf("%v", err) - return nil, fmt.Errorf("Could not read body of 403 error") + return nil, fmt.Errorf("Could not read body of error") } body := string(bytes) if strings.Contains(body, "dailyLimitExceeded") || @@ -83,7 +83,7 @@ func downloadFromAPI(client *http.Client, request *Request, delay int64) ([]byte return downloadFromAPI(client, request, delay) } - // return an error if other 403 error occurred + // return an error if other error occurred Log.Debugf("%v", body) return nil, fmt.Errorf("Could not read object %v (%v) / StatusCode: %v", request.object.ObjectID, request.object.Name, res.StatusCode) From f7165d201529fcc9a232f4c35ae620a6599ceb20 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 30 Jun 2017 11:03:01 +0200 Subject: [PATCH 06/46] merge --- drive/cache.go | 271 ++++++++++++++++++++++++++++++++----------------- drive/drive.go | 9 +- main.go | 20 +--- 3 files changed, 184 insertions(+), 116 deletions(-) diff --git a/drive/cache.go b/drive/cache.go index 87b718f..968e89a 100644 --- a/drive/cache.go +++ b/drive/cache.go @@ -1,6 +1,7 @@ package drive import ( + "bytes" "encoding/json" "fmt" "io/ioutil" @@ -11,35 +12,24 @@ import ( . "github.com/claudetech/loggo/default" "golang.org/x/oauth2" - "io" - - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" + "github.com/boltdb/bolt" ) // Cache is the cache type Cache struct { - session *mgo.Session - dbName string + db *bolt.DB tokenPath string } -const ( - // StoreAction stores an object in cache - StoreAction = iota - // DeleteAction deletes an object in cache - DeleteAction = iota +var ( + bObjects = []byte("api_objects") + bParents = []byte("idx_api_objects_py_parent") + bPageToken = []byte("page_token") ) -type cacheAction struct { - action int - object *APIObject - instant bool -} - // APIObject is a Google Drive file object type APIObject struct { - ObjectID string `bson:"_id,omitempty"` + ObjectID string Name string IsDir bool Size uint64 @@ -51,46 +41,46 @@ type APIObject struct { // PageToken is the last change id type PageToken struct { - ID string `bson:"_id,omitempty"` + ID string Token string } // NewCache creates a new cache instance -func NewCache(mongoURL, mongoUser, mongoPass, mongoDatabase, cacheBasePath string, sqlDebug bool) (*Cache, error) { +func NewCache(cacheBasePath string, sqlDebug bool) (*Cache, error) { Log.Debugf("Opening cache connection") - session, err := mgo.Dial(mongoURL) + db, err := bolt.Open(filepath.Join(cacheBasePath, "cache.bolt"), 0600, nil) if nil != err { - Log.Debugf("%v") - return nil, fmt.Errorf("Could not open mongo db connection") + Log.Debugf("%v", err) + return nil, fmt.Errorf("Could not open cache file") } cache := Cache{ - session: session, - dbName: mongoDatabase, + db: db, tokenPath: filepath.Join(cacheBasePath, "token.json"), } - // getting the db - db := session.DB(mongoDatabase) - - // login - if "" != mongoUser && "" != mongoPass { - db.Login(mongoUser, mongoPass) - } - - // create index - col := db.C("api_objects") - col.EnsureIndex(mgo.Index{Key: []string{"parents"}}) - col.EnsureIndex(mgo.Index{Key: []string{"name"}}) + // Make sure the necessary buckets exist + err = db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists(bObjects); nil != err { + return err + } + if _, err := tx.CreateBucketIfNotExists(bParents); nil != err { + return err + } + if _, err := tx.CreateBucketIfNotExists(bPageToken); nil != err { + return err + } + return nil + }) - return &cache, nil + return &cache, err } // Close closes all handles func (c *Cache) Close() error { - Log.Debugf("Closing cache connection") - c.session.Close() + Log.Debugf("Closing cache file") + c.db.Close() return nil } @@ -131,68 +121,98 @@ func (c *Cache) StoreToken(token *oauth2.Token) error { } // GetObject gets an object by id -func (c *Cache) GetObject(id string) (*APIObject, error) { +func (c *Cache) GetObject(id string) (object *APIObject, err error) { Log.Tracef("Getting object %v", id) - db := c.session.DB(c.dbName).C("api_objects") - var object APIObject - if err := db.Find(bson.M{"_id": id}).One(&object); nil != err { - if io.EOF == err { - c.session.Refresh() - return c.GetObject(id) - } - return nil, fmt.Errorf("Could not find object %v in cache", id) + c.db.View(func(tx *bolt.Tx) error { + object, err = boltGetObject(tx, id) + return nil + }) + if nil != err { + return nil, err } Log.Tracef("Got object from cache %v", object) - return &object, nil + return object, err } // GetObjectsByParent get all objects under parent id func (c *Cache) GetObjectsByParent(parent string) ([]*APIObject, error) { Log.Tracef("Getting children for %v", parent) - db := c.session.DB(c.dbName).C("api_objects") - var objects []*APIObject - if err := db.Find(bson.M{"parents": parent}).All(&objects); nil != err { - if io.EOF == err { - c.session.Refresh() - return c.GetObjectsByParent(parent) + objects := make([]*APIObject, 0) + c.db.View(func(tx *bolt.Tx) error { + cr := tx.Bucket(bParents).Cursor() + + // Iterate over all object ids stored under the parent in the index + objectIds := make([]string, 0) + prefix := []byte(parent + "/") + for k, v := cr.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = cr.Next() { + objectIds = append(objectIds, string(v)) } - return nil, fmt.Errorf("Could not find children for parent %v in cache", parent) - } + + // Fetch all objects for the given ids + for _, id := range objectIds { + if object, err := boltGetObject(tx, id); nil == err { + objects = append(objects, object) + } + } + return nil + }) Log.Tracef("Got objects from cache %v", objects) return objects, nil } // GetObjectByParentAndName finds a child element by name and its parent id -func (c *Cache) GetObjectByParentAndName(parent, name string) (*APIObject, error) { +func (c *Cache) GetObjectByParentAndName(parent, name string) (object *APIObject, err error) { Log.Tracef("Getting object %v in parent %v", name, parent) - db := c.session.DB(c.dbName).C("api_objects") - var object APIObject - if err := db.Find(bson.M{"parents": parent, "name": name}).One(&object); nil != err { - if io.EOF == err { - c.session.Refresh() - return c.GetObjectByParentAndName(parent, name) + c.db.View(func(tx *bolt.Tx) error { + // Look up object id in parent-name index + b := tx.Bucket(bParents) + v := b.Get([]byte(parent + "/" + name)) + if nil == v { + return nil } + + // Fetch object for given id + object, err = boltGetObject(tx, string(v)) + return nil + }) + if nil != err { + return nil, err + } + + if object == nil { return nil, fmt.Errorf("Could not find object with name %v in parent %v", name, parent) } Log.Tracef("Got object from cache %v", object) - return &object, nil + return object, nil } // DeleteObject deletes an object by id func (c *Cache) DeleteObject(id string) error { - db := c.session.DB(c.dbName).C("api_objects") + err := c.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(bObjects) + object, _ := boltGetObject(tx, id) + if nil == object { + return nil + } + + b.Delete([]byte(id)) - if err := db.Remove(bson.M{"_id": id}); nil != err { - if io.EOF == err { - c.session.Refresh() - return c.DeleteObject(id) + // Remove object ids from the index + b = tx.Bucket(bParents) + for _, parent := range object.Parents { + b.Delete([]byte(parent + "/" + object.Name)) } + + return nil + }) + if nil != err { + Log.Debugf("%v", err) return fmt.Errorf("Could not delete object %v", id) } @@ -201,29 +221,91 @@ func (c *Cache) DeleteObject(id string) error { // UpdateObject updates an object func (c *Cache) UpdateObject(object *APIObject) error { - db := c.session.DB(c.dbName).C("api_objects") + err := c.db.Update(func(tx *bolt.Tx) error { + return boltUpdateObject(tx, object) + }) - if _, err := db.Upsert(bson.M{"_id": object.ObjectID}, object); nil != err { - if io.EOF == err { - c.session.Refresh() - return c.UpdateObject(object) - } + if nil != err { + Log.Debugf("%v", err) return fmt.Errorf("Could not update/save object %v (%v)", object.ObjectID, object.Name) } return nil } +func boltStoreObject(tx *bolt.Tx, object *APIObject) error { + b := tx.Bucket(bObjects) + v, err := json.Marshal(object) + if nil != err { + return err + } + return b.Put([]byte(object.ObjectID), v) +} + +func boltGetObject(tx *bolt.Tx, id string) (*APIObject, error) { + b := tx.Bucket(bObjects) + v := b.Get([]byte(id)) + if v == nil { + return nil, fmt.Errorf("Could not find object %v in cache", id) + } + + var object APIObject + err := json.Unmarshal(v, &object) + return &object, err +} + +func boltUpdateObject(tx *bolt.Tx, object *APIObject) error { + prev, _ := boltGetObject(tx, object.ObjectID) + if nil != prev { + // Remove object ids from the index + b := tx.Bucket(bParents) + for _, parent := range object.Parents { + b.Delete([]byte(parent + "/" + object.Name)) + } + } + + if err := boltStoreObject(tx, object); nil != err { + return err + } + + // Store the object id by parent-name in the index + b := tx.Bucket(bParents) + for _, parent := range object.Parents { + if err := b.Put([]byte(parent+"/"+object.Name), []byte(object.ObjectID)); nil != err { + return err + } + } + return nil +} + +func (c *Cache) BatchUpdateObjects(objects []*APIObject) error { + err := c.db.Update(func(tx *bolt.Tx) error { + for _, object := range objects { + if err := boltUpdateObject(tx, object); nil != err { + return err + } + } + return nil + }) + + if nil != err { + Log.Debugf("%v", err) + return fmt.Errorf("Could not update/save objects: %v", err) + } + + return nil +} + // StoreStartPageToken stores the page token for changes func (c *Cache) StoreStartPageToken(token string) error { Log.Debugf("Storing page token %v in cache", token) - db := c.session.DB(c.dbName).C("page_token") + err := c.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(bPageToken) + return b.Put([]byte("t"), []byte(token)) + }) - if _, err := db.Upsert(bson.M{"_id": "t"}, &PageToken{ID: "t", Token: token}); nil != err { - if io.EOF == err { - c.session.Refresh() - return c.StoreStartPageToken(token) - } + if nil != err { + Log.Debugf("%v", err) return fmt.Errorf("Could not store token %v", token) } @@ -232,18 +314,19 @@ func (c *Cache) StoreStartPageToken(token string) error { // GetStartPageToken gets the start page token func (c *Cache) GetStartPageToken() (string, error) { - Log.Debugf("Getting start page token from cache") - db := c.session.DB(c.dbName).C("page_token") + var pageToken string - var pageToken PageToken - if err := db.Find(nil).One(&pageToken); nil != err { - if io.EOF == err { - c.session.Refresh() - return c.GetStartPageToken() - } - return "", fmt.Errorf("Could not get token from cache") + Log.Debugf("Getting start page token from cache") + c.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(bPageToken) + v := b.Get([]byte("t")) + pageToken = string(v) + return nil + }) + if pageToken == "" { + return "", fmt.Errorf("Could not get token from cache, token is empty") } - Log.Tracef("Got start page token %v", pageToken.Token) - return pageToken.Token, nil + Log.Tracef("Got start page token %v", pageToken) + return pageToken, nil } diff --git a/drive/drive.go b/drive/drive.go index 01abab4..5b22e40 100644 --- a/drive/drive.go +++ b/drive/drive.go @@ -114,6 +114,7 @@ func (d *Client) checkChanges(firstCheck bool) { break } + objects := make([]*APIObject, 0) for _, change := range results.Changes { Log.Tracef("Change %v", change) @@ -128,15 +129,17 @@ func (d *Client) checkChanges(firstCheck bool) { Log.Debugf("%v", err) Log.Warningf("Could not map Google Drive file %v (%v) to object", change.File.Id, change.File.Name) } else { - if err := d.cache.UpdateObject(object); nil != err { - Log.Warningf("%v", err) - } + objects = append(objects, object) updatedItems++ } } processedItems++ } + if err := d.cache.BatchUpdateObjects(objects); nil != err { + Log.Warningf("%v", err) + return + } if processedItems > 0 { Log.Infof("Processed %v items / deleted %v items / updated %v items", diff --git a/main.go b/main.go index c2ad641..1a0c076 100644 --- a/main.go +++ b/main.go @@ -39,10 +39,6 @@ func main() { argRootNodeID := flag.String("root-node-id", "root", "The ID of the root node to mount (use this for only mount a sub directory)") argConfigPath := flag.StringP("config", "c", filepath.Join(user.HomeDir, ".plexdrive"), "The path to the configuration directory") argTempPath := flag.StringP("temp", "t", os.TempDir(), "Path to a temporary directory to store temporary data") - argMongoURL := flag.StringP("mongo-host", "m", "localhost", "MongoDB host") - argMongoUser := flag.String("mongo-user", "", "MongoDB username") - argMongoPass := flag.String("mongo-password", "", "MongoDB password") - argMongoDatabase := flag.String("mongo-database", "plexdrive", "MongoDB database") argChunkSize := flag.String("chunk-size", "5M", "The size of each chunk that is downloaded (units: B, K, M, G)") argChunkLoadThreads := flag.Int("chunk-load-threads", runtime.NumCPU(), "The number of threads to use for downloading chunks") argChunkLoadAhead := flag.Int("chunk-load-ahead", 2, "The number of chunks that should be read ahead") @@ -71,16 +67,6 @@ func main() { fmt.Println() panic(fmt.Errorf("Mountpoint not specified")) } - if "" == *argMongoURL { - flag.Usage() - fmt.Println() - panic(fmt.Errorf("MongoDB URL not specified")) - } - if "" == *argMongoDatabase { - flag.Usage() - fmt.Println() - panic(fmt.Errorf("MongoDB database not specified")) - } // calculate uid / gid uid := uint32(unix.Geteuid()) @@ -124,10 +110,6 @@ func main() { Log.Debugf("root-node-id : %v", *argRootNodeID) Log.Debugf("config : %v", *argConfigPath) Log.Debugf("temp : %v", *argTempPath) - Log.Debugf("mongo-host : %v", *argMongoURL) - Log.Debugf("mongo-user : %v", *argMongoUser) - Log.Debugf("mongo-password : %v", *argMongoPass) - Log.Debugf("mongo-database : %v", *argMongoDatabase) Log.Debugf("chunk-size : %v", *argChunkSize) Log.Debugf("chunk-load-threads : %v", *argChunkLoadThreads) Log.Debugf("chunk-load-ahead : %v", *argChunkLoadAhead) @@ -169,7 +151,7 @@ func main() { } } - cache, err := drive.NewCache(*argMongoURL, *argMongoUser, *argMongoPass, *argMongoDatabase, *argConfigPath, *argLogLevel > 3) + cache, err := drive.NewCache(*argConfigPath, *argLogLevel > 3) if nil != err { Log.Errorf("%v", err) os.Exit(4) From 612475b7ede93d969105498db4efbdb43c35a31d Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 30 Jun 2017 11:09:14 +0200 Subject: [PATCH 07/46] workaround for renaming objects --- drive/drive.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drive/drive.go b/drive/drive.go index 5b22e40..41bb67f 100644 --- a/drive/drive.go +++ b/drive/drive.go @@ -344,6 +344,11 @@ func (d *Client) Rename(object *APIObject, OldParent string, NewParent string, N } object.Parents = append(object.Parents, NewParent) + if err := d.cache.DeleteObject(object.ObjectID); nil != err { + Log.Debugf("%v", err) + return fmt.Errorf("Could not delete object %v (%v) from cache", object.ObjectID, object.Name) + } + if err := d.cache.UpdateObject(object); nil != err { Log.Debugf("%v", err) return fmt.Errorf("Could not rename object %v (%v) from cache", object.ObjectID, object.Name) From 749d13af763d6e27df43ab14a643288031fc6713 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 30 Jun 2017 11:11:17 +0200 Subject: [PATCH 08/46] build nightlies from develop --- ci/pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/pipeline.yml b/ci/pipeline.yml index cdf17aa..49cf695 100644 --- a/ci/pipeline.yml +++ b/ci/pipeline.yml @@ -10,7 +10,7 @@ resources: type: git source: uri: https://github.com/dweidenfeld/plexdrive - branch: master + branch: develop - name: github-nightly-release type: github-release From 146f1577bfb0cf2900e6a5e23f6feebd3383db08 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 30 Jun 2017 11:15:28 +0200 Subject: [PATCH 09/46] clean renaming --- drive/cache.go | 4 ++-- drive/drive.go | 5 ----- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/drive/cache.go b/drive/cache.go index 968e89a..71ed93d 100644 --- a/drive/cache.go +++ b/drive/cache.go @@ -259,8 +259,8 @@ func boltUpdateObject(tx *bolt.Tx, object *APIObject) error { if nil != prev { // Remove object ids from the index b := tx.Bucket(bParents) - for _, parent := range object.Parents { - b.Delete([]byte(parent + "/" + object.Name)) + for _, parent := range prev.Parents { + b.Delete([]byte(parent + "/" + prev.Name)) } } diff --git a/drive/drive.go b/drive/drive.go index 41bb67f..5b22e40 100644 --- a/drive/drive.go +++ b/drive/drive.go @@ -344,11 +344,6 @@ func (d *Client) Rename(object *APIObject, OldParent string, NewParent string, N } object.Parents = append(object.Parents, NewParent) - if err := d.cache.DeleteObject(object.ObjectID); nil != err { - Log.Debugf("%v", err) - return fmt.Errorf("Could not delete object %v (%v) from cache", object.ObjectID, object.Name) - } - if err := d.cache.UpdateObject(object); nil != err { Log.Debugf("%v", err) return fmt.Errorf("Could not rename object %v (%v) from cache", object.ObjectID, object.Name) From 07f050510995f4f1eda2b355aa6f9b095904ca8a Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 30 Jun 2017 11:25:11 +0200 Subject: [PATCH 10/46] retry on 500 internal server error --- chunk/download.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/chunk/download.go b/chunk/download.go index a090305..205230e 100644 --- a/chunk/download.go +++ b/chunk/download.go @@ -74,7 +74,8 @@ func downloadFromAPI(client *http.Client, request *Request, delay int64) ([]byte if strings.Contains(body, "dailyLimitExceeded") || strings.Contains(body, "userRateLimitExceeded") || strings.Contains(body, "rateLimitExceeded") || - strings.Contains(body, "backendError") { + strings.Contains(body, "backendError") || + strings.Contains(body, "internalError") { if 0 == delay { delay = 1 } else { From 5478263f76859ea5c3398f5eeaf1b7dbd191c062 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 30 Jun 2017 11:42:15 +0200 Subject: [PATCH 11/46] adjusted readme --- README.md | 74 ++++++++++++++++--------------------------------------- 1 file changed, 21 insertions(+), 53 deletions(-) diff --git a/README.md b/README.md index 55efcec..e919371 100644 --- a/README.md +++ b/README.md @@ -15,13 +15,12 @@ I tried using rclone for a long time, but got API Quota errors every day and/or _If you like the project, feel free to make a small [donation via PayPal](https://www.paypal.me/dowei). Otherwise support the project by implementing new functions / bugfixes yourself and create pull requests :)_ ## Installation -1. First you need to install fuse and mongodb on your system +1. First you need to install fuse on your system 2. Then you should download the newest release from the [GitHub release page](https://github.com/dweidenfeld/plexdrive/releases). 3. Create your own client id and client secret (see [https://rclone.org/drive/#making-your-own-client-id](https://rclone.org/drive/#making-your-own-client-id)). 4. Sample command line for plexdrive ``` -./plexdrive -m localhost --clear-chunk-age=24h --chunk-load-ahead=4 --chunk-load-threads=8 -t /mnt/plexdrive-cache/ --config=/root/.plexdrive --refresh-interval=1m --fuse-options=allow_other /mnt/plexdrive - +./plexdrive -t /mnt/plexdrive-cache/ -c /root/.plexdrive -o allow_other /mnt/plexdrive ``` ### Crypted mount with rclone @@ -31,45 +30,37 @@ You can use [this tutorial](TUTORIAL.md) for instruction how to mount an encrypt ``` Usage of ./plexdrive: --chunk-load-ahead int - The number of chunks that should be read ahead (default 4) + The number of chunks that should be read ahead (default 2) + --chunk-load-retries int + Number of retries to load a chunk (default 3) --chunk-load-threads int - The number of threads to use for downloading chunks (default 8) + The number of threads to use for downloading chunks (default 4) + --chunk-load-timeout duration + Duration to wait for a chunk to be loaded (default 10s) --chunk-size string - The size of each chunk that is downloaded (units: B, K, M, G) (default "5M") - --clear-chunk-age duration - The maximum age of a cached chunk file (default 30m0s) - --clear-chunk-interval duration - The time to wait till clearing the chunk directory (default 1m0s) + The size of each chunk that is downloaded (units: B, K, M, G) (default "5M") -c, --config string - The path to the configuration directory (default "/root/.plexdrive") + The path to the configuration directory (default "~/.plexdrive") -o, --fuse-options string - Fuse mount options (e.g. -fuse-options allow_other,...) + Fuse mount options (e.g. -fuse-options allow_other,...) --gid int - Set the mounts GID (-1 = default permissions) (default -1) - --mongo-database string - MongoDB database (default "plexdrive") - -m, --mongo-host string - MongoDB host (default "localhost") - --mongo-password string - MongoDB password - --mongo-user string - MongoDB username + Set the mounts GID (-1 = default permissions) (default -1) + --max-chunks int + The maximum number of chunks to be stored on disk (default 10) --refresh-interval duration - The time to wait till checking for changes (default 5m0s) + The time to wait till checking for changes (default 5m0s) --root-node-id string - The ID of the root node to mount (use this for only mount a sub directory) (default "root") - --speed-limit string - This value limits the download speed, e.g. 5M = 5MB/s per chunk (units: B, K, M, G) + The ID of the root node to mount (use this for only mount a sub directory) (default "root") -t, --temp string - Path to a temporary directory to store temporary data (default "/tmp") + Path to a temporary directory to store temporary data (default "/tmp") --uid int - Set the mounts UID (-1 = default permissions) (default -1) + Set the mounts UID (-1 = default permissions) (default -1) --umask value - Override the default file permissions + Override the default file permissions -v, --verbosity int - Set the log level (0 = error, 1 = warn, 2 = info, 3 = debug, 4 = trace) + Set the log level (0 = error, 1 = warn, 2 = info, 3 = debug, 4 = trace) --version - Displays program's version information + Displays program's version information ``` ### Support @@ -91,29 +82,6 @@ Feel free to ask configuration and setup questions here. * volume_name=myname * read_only -### Cache by usage -If you set the --clear-chunk-age to e.g. 24 hours your files will be stored -for 24 hours on your harddisk. This prevents you from downloading the file -everytime it is accessed so will have a faster playback start, avoid stuttering -and spare API calls. - -Everytime a file is accessed it will the caching time will be extended. -E.g. You access a file at 20:00, then it will be deleted on the next day at -20:00. If you access the file e.g. at 18:00 the next day, the file will be -deleted the day after at 18:00 and so on. - -If you activate the option `clear-chunk-max-size` you will automatically disable -the cache cleaning by time. So it will only delete the oldest chunk file when it -needs the space. - -**This function does not limit the storage to the given size**. It will only say -"if you reach the given limit, check if you can clean up old stuff". So if you have -of at most 60gb to be sure it will not override the 100gb limit. The implementation is -a limit of e.g. 100gb available for chunks, you should specify the clear-chunk-max-size -done that way, because a hard checking routine could make the playback unstable and -present buffering because the cleaning of the old chunks off the file system is a low -priority over streaming your files. - ### Root-Node-ID You can use the option `root-node-id` to specify a folder id that should be mounted as From e3c016a51a36214306ecad40bc4e15ae8c513f10 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 30 Jun 2017 11:49:48 +0200 Subject: [PATCH 12/46] added docs --- chunk/download.go | 1 + chunk/manager.go | 4 +++- chunk/stack.go | 6 ++++++ chunk/storage.go | 13 +++++++++++-- 4 files changed, 21 insertions(+), 3 deletions(-) diff --git a/chunk/download.go b/chunk/download.go index 205230e..afc22d3 100644 --- a/chunk/download.go +++ b/chunk/download.go @@ -24,6 +24,7 @@ func NewDownloader(threads int, client *http.Client) (*Downloader, error) { return &manager, nil } +// Download starts a new download request func (d *Downloader) Download(req *Request) ([]byte, error) { return downloadFromAPI(d.Client, req, 0) } diff --git a/chunk/manager.go b/chunk/manager.go index a218a8c..412203d 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -25,6 +25,7 @@ type Manager struct { storage *Storage } +// Request represents a chunk request type Request struct { id string object *drive.APIObject @@ -85,6 +86,7 @@ func NewManager( return &manager, nil } +// GetChunk loads one chunk and starts the preload for the next chunks func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, error) { chunkOffset := offset % m.ChunkSize offsetStart := offset - chunkOffset @@ -116,7 +118,7 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, bytes, err := m.storage.Get(id, chunkOffset, size, m.Timeout) retryCount := 0 - for err == TIMEOUT && retryCount < m.TimeoutRetries { + for err == ErrTimeout && retryCount < m.TimeoutRetries { Log.Warningf("Timeout while requesting chunk %v. Retrying (%v / %v)", id, (retryCount + 1), m.TimeoutRetries) bytes, err = m.storage.Get(id, chunkOffset, size, m.Timeout) retryCount++ diff --git a/chunk/stack.go b/chunk/stack.go index 1bf2d8a..b9d9975 100644 --- a/chunk/stack.go +++ b/chunk/stack.go @@ -5,17 +5,20 @@ import ( "sync" ) +// Stack is a thread safe list/stack implementation type Stack struct { items *list.List lock sync.Mutex } +// NewStack creates a new stack func NewStack() *Stack { return &Stack{ items: list.New(), } } +// Len returns the length of the current stack func (s *Stack) Len() int { s.lock.Lock() count := s.items.Len() @@ -23,6 +26,7 @@ func (s *Stack) Len() int { return count } +// Pop pops the first item from the stack func (s *Stack) Pop() string { s.lock.Lock() item := s.items.Front() @@ -36,6 +40,7 @@ func (s *Stack) Pop() string { return item.Value.(string) } +// Touch moves the specified item to the last position of the stack func (s *Stack) Touch(id string) { s.lock.Lock() for item := s.items.Front(); item != nil; item = item.Next() { @@ -47,6 +52,7 @@ func (s *Stack) Touch(id string) { s.lock.Unlock() } +// Push adds a new item to the last position of the stack func (s *Stack) Push(id string) { s.lock.Lock() for item := s.items.Front(); item != nil; item = item.Next() { diff --git a/chunk/storage.go b/chunk/storage.go index 51da437..b647d1b 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -15,8 +15,10 @@ import ( . "github.com/claudetech/loggo/default" ) -var TIMEOUT = errors.New("timeout") +// ErrTimeout is a timeout error +var ErrTimeout = errors.New("timeout") +// Storage is a chunk storage type Storage struct { ChunkPath string ChunkSize int64 @@ -29,11 +31,13 @@ type Storage struct { stack *Stack } +// Item represents a chunk in RAM type Item struct { id string bytes []byte } +// NewStorage creates a new storage func NewStorage(chunkPath string, chunkSize int64, maxChunks int) *Storage { storage := Storage{ ChunkPath: chunkPath, @@ -50,6 +54,7 @@ func NewStorage(chunkPath string, chunkSize int64, maxChunks int) *Storage { return &storage } +// Clear removes all old chunks on disk (will be called on each program start) func (s *Storage) Clear() error { if err := os.RemoveAll(s.ChunkPath); nil != err { return fmt.Errorf("Could not clear old chunks from disk") @@ -57,6 +62,7 @@ func (s *Storage) Clear() error { return nil } +// ExistsOrCreate check if an item already exists, otherwise it will create a placeholder func (s *Storage) ExistsOrCreate(id string) bool { s.tocLock.Lock() if _, exists := s.toc[id]; exists { @@ -68,6 +74,7 @@ func (s *Storage) ExistsOrCreate(id string) bool { return false } +// Store stores a chunk in the RAM and adds it to the disk storage queue func (s *Storage) Store(id string, bytes []byte) error { s.chunksLock.Lock() s.chunks[id] = bytes @@ -81,12 +88,14 @@ func (s *Storage) Store(id string, bytes []byte) error { return nil } +// Error is called to remove an item from the index if there has been an issue downloading the chunk func (s *Storage) Error(id string, err error) { s.tocLock.Lock() s.toc[id] = err s.tocLock.Unlock() } +// Get gets a chunk content (blocking) func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]byte, error) { res := make(chan []byte) ec := make(chan error) @@ -131,7 +140,7 @@ func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]b return nil, err case <-time.After(timeout): s.deleteFromToc(id) - return nil, TIMEOUT + return nil, ErrTimeout } } From 90b37e88b6c34af26628bd79c48c471e0a61c6c0 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 30 Jun 2017 12:00:12 +0200 Subject: [PATCH 13/46] upgraded to 5.x --- ci/meta/version | 2 +- ci/scripts/compile.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/meta/version b/ci/meta/version index e2bca59..28cbf7c 100644 --- a/ci/meta/version +++ b/ci/meta/version @@ -1 +1 @@ -4.1.0-beta \ No newline at end of file +5.0.0 \ No newline at end of file diff --git a/ci/scripts/compile.sh b/ci/scripts/compile.sh index e705f66..ed45643 100755 --- a/ci/scripts/compile.sh +++ b/ci/scripts/compile.sh @@ -9,7 +9,7 @@ export TS=$(date +%s) cd $GOPATH/src/github.com/dweidenfeld/plexdrive # Version -export VERSION="$(cat ci/meta/version)-$TS" +export VERSION="$(cat ci/meta/version)-beta.$TS" echo "Got version $VERSION" sed -i.bak s/%VERSION%/$VERSION/g main.go From 05e2a945084b444f56601948d09f1c5e4c3dc0c5 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 30 Jun 2017 13:14:45 +0200 Subject: [PATCH 14/46] shrink binaries --- ci/scripts/go-build-all | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/ci/scripts/go-build-all b/ci/scripts/go-build-all index 39e92df..6b91ee5 100755 --- a/ci/scripts/go-build-all +++ b/ci/scripts/go-build-all @@ -63,6 +63,8 @@ PLATFORMS="$PLATFORMS freebsd/amd64" # PLATFORMS_ARM="linux" +FLAGS="-ldflags='-s -w'" + ############################################################## # Shouldn't really need to modify anything below this line. # ############################################################## @@ -80,14 +82,14 @@ for PLATFORM in $PLATFORMS; do GOARCH=${PLATFORM#*/} BIN_FILENAME="${OUTPUT}-${GOOS}-${GOARCH}" if [[ "${GOOS}" == "windows" ]]; then BIN_FILENAME="${BIN_FILENAME}.exe"; fi - CMD="GOOS=${GOOS} GOARCH=${GOARCH} go build -o ${BIN_FILENAME} $@" + CMD="GOOS=${GOOS} GOARCH=${GOARCH} go build ${FLAGS} -o ${BIN_FILENAME} $@" echo "${CMD}" eval $CMD || FAILURES="${FAILURES} ${PLATFORM}" done # ARM builds if [[ $PLATFORMS_ARM == *"linux"* ]]; then - CMD="GOOS=linux GOARCH=arm64 go build -o ${OUTPUT}-linux-arm64 $@" + CMD="GOOS=linux GOARCH=arm64 go build ${FLAGS} -o ${OUTPUT}-linux-arm64 $@" echo "${CMD}" eval $CMD || FAILURES="${FAILURES} ${PLATFORM}" fi @@ -96,7 +98,7 @@ for GOOS in $PLATFORMS_ARM; do # build for each ARM version for GOARM in 7 6 5; do BIN_FILENAME="${OUTPUT}-${GOOS}-${GOARCH}${GOARM}" - CMD="GOARM=${GOARM} GOOS=${GOOS} GOARCH=${GOARCH} go build -o ${BIN_FILENAME} $@" + CMD="GOARM=${GOARM} GOOS=${GOOS} GOARCH=${GOARCH} go build ${FLAGS} -o ${BIN_FILENAME} $@" echo "${CMD}" eval "${CMD}" || FAILURES="${FAILURES} ${GOOS}/${GOARCH}${GOARM}" done From df6c9a0d10891bdc76f27460021726818f449d66 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 30 Jun 2017 17:39:51 +0200 Subject: [PATCH 15/46] deactivated mips build --- ci/scripts/go-build-all | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/scripts/go-build-all b/ci/scripts/go-build-all index 6b91ee5..784eb19 100755 --- a/ci/scripts/go-build-all +++ b/ci/scripts/go-build-all @@ -39,7 +39,7 @@ PLATFORMS="darwin/amd64 darwin/386" # amd64 only as of go1.5 # PLATFORMS="$PLATFORMS windows/amd64 windows/386" # arm compilation not available for Windows PLATFORMS="$PLATFORMS linux/amd64 linux/386" PLATFORMS="$PLATFORMS linux/ppc64 linux/ppc64le" -PLATFORMS="$PLATFORMS linux/mips64 linux/mips64le" # experimental in go1.6 +# PLATFORMS="$PLATFORMS linux/mips64 linux/mips64le" # experimental in go1.6 PLATFORMS="$PLATFORMS freebsd/amd64" # PLATFORMS="$PLATFORMS netbsd/amd64" # amd64 only as of go1.6 # PLATFORMS="$PLATFORMS openbsd/amd64" # amd64 only as of go1.6 From d02a4eeeef7fc3efd49a48d3d2d01512c34eb076 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Wed, 5 Jul 2017 08:38:45 +0200 Subject: [PATCH 16/46] check if file exists before trying to open it --- chunk/storage.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/chunk/storage.go b/chunk/storage.go index b647d1b..977b3d5 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -175,6 +175,10 @@ func (s *Storage) loadFromRAM(id string, offset, size int64) ([]byte, bool) { func (s *Storage) loadFromDisk(id string, offset, size int64) ([]byte, bool) { filename := filepath.Join(s.ChunkPath, id) + if _, err := os.Stat(filename); os.IsExist(err) { + return nil, false + } + f, err := os.Open(filename) if nil != err { Log.Tracef("%v", err) From bbe336471c3dc82f3eb07b321a3b694a5b72e786 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Wed, 5 Jul 2017 13:20:08 +0200 Subject: [PATCH 17/46] thread activity montoring --- chunk/download.go | 2 -- chunk/manager.go | 13 ++++++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/chunk/download.go b/chunk/download.go index afc22d3..66efc51 100644 --- a/chunk/download.go +++ b/chunk/download.go @@ -35,8 +35,6 @@ func downloadFromAPI(client *http.Client, request *Request, delay int64) ([]byte time.Sleep(time.Duration(delay) * time.Second) } - Log.Debugf("Requesting object %v (%v) bytes %v - %v from API (preload: %v)", - request.object.ObjectID, request.object.Name, request.offsetStart, request.offsetEnd, request.preload) req, err := http.NewRequest("GET", request.object.DownloadURL, nil) if nil != err { Log.Debugf("%v", err) diff --git a/chunk/manager.go b/chunk/manager.go index 412203d..dcff9b6 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -80,7 +80,7 @@ func NewManager( } for i := 0; i < threads; i++ { - go manager.thread() + go manager.thread(i) } return &manager, nil @@ -126,14 +126,14 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, return bytes, err } -func (m *Manager) thread() { +func (m *Manager) thread(threadID int) { for { select { case req := <-m.queue: - m.checkChunk(req) + m.checkChunk(req, threadID) break case req := <-m.preloadQueue: - m.checkChunk(req) + m.checkChunk(req, threadID) break default: time.Sleep(10 * time.Millisecond) @@ -141,11 +141,14 @@ func (m *Manager) thread() { } } -func (m *Manager) checkChunk(req *Request) { +func (m *Manager) checkChunk(req *Request, threadID int) { if m.storage.ExistsOrCreate(req.id) { return } + Log.Debugf("Requesting object %v (%v) bytes %v - %v from API (preload: %v | thread: %v)", + req.object.ObjectID, req.object.Name, req.offsetStart, req.offsetEnd, req.preload, threadID) + bytes, err := m.downloader.Download(req) if nil != err { Log.Warningf("%v", err) From 3707c58a3c63934b8a314f2288b138adf13b209c Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 7 Jul 2017 07:18:08 +0200 Subject: [PATCH 18/46] removed high prio queue --- chunk/manager.go | 17 +++-------------- chunk/storage.go | 2 -- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/chunk/manager.go b/chunk/manager.go index dcff9b6..e77c7e0 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -21,7 +21,6 @@ type Manager struct { TimeoutRetries int downloader *Downloader queue chan *Request - preloadQueue chan *Request storage *Storage } @@ -70,8 +69,7 @@ func NewManager( Timeout: timeout, TimeoutRetries: timeoutRetries, downloader: downloader, - queue: make(chan *Request, 100), - preloadQueue: make(chan *Request, 100), + queue: make(chan *Request, 500), storage: NewStorage(chunkPath, chunkSize, maxChunks), } @@ -106,7 +104,7 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, aheadOffsetEnd := aheadOffsetStart + m.ChunkSize if uint64(aheadOffsetStart) < object.Size && uint64(aheadOffsetEnd) < object.Size { id := fmt.Sprintf("%v:%v", object.ObjectID, aheadOffsetStart) - m.preloadQueue <- &Request{ + m.queue <- &Request{ id: id, object: object, offsetStart: aheadOffsetStart, @@ -128,16 +126,7 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, func (m *Manager) thread(threadID int) { for { - select { - case req := <-m.queue: - m.checkChunk(req, threadID) - break - case req := <-m.preloadQueue: - m.checkChunk(req, threadID) - break - default: - time.Sleep(10 * time.Millisecond) - } + m.checkChunk(<-m.queue, threadID) } } diff --git a/chunk/storage.go b/chunk/storage.go index 977b3d5..793fe70 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -127,8 +127,6 @@ func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]b close(res) return } - - time.Sleep(10 * time.Millisecond) } }() From 359459de3681a68fc29e72f364e061e86a0ee864 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 7 Jul 2017 07:27:06 +0200 Subject: [PATCH 19/46] disabled timeout / changed file reading logic --- chunk/storage.go | 56 +++++++++++++++--------------------------------- 1 file changed, 17 insertions(+), 39 deletions(-) diff --git a/chunk/storage.go b/chunk/storage.go index 793fe70..ea30386 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -97,48 +97,26 @@ func (s *Storage) Error(id string, err error) { // Get gets a chunk content (blocking) func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]byte, error) { - res := make(chan []byte) - ec := make(chan error) + // TODO: use timeout - go func() { - for { - s.tocLock.Lock() - err, exists := s.toc[id] - s.tocLock.Unlock() - if nil == err && exists { - bytes, exists := s.loadFromRAM(id, offset, size) - if exists { - res <- bytes - close(ec) - close(res) - return - } - - bytes, exists = s.loadFromDisk(id, offset, size) - if exists { - res <- bytes - close(ec) - close(res) - return - } - } else if nil != err { - ec <- err - close(ec) - close(res) - return + for { + s.tocLock.Lock() + err, exists := s.toc[id] + s.tocLock.Unlock() + if nil == err && exists { + bytes, exists := s.loadFromRAM(id, offset, size) + if exists { + return bytes, nil + } + + bytes, exists = s.loadFromDisk(id, offset, size) + if exists { + return bytes, nil } + } else if nil != err { + s.deleteFromToc(id) + return nil, err } - }() - - select { - case r := <-res: - return r, nil - case err := <-ec: - s.deleteFromToc(id) - return nil, err - case <-time.After(timeout): - s.deleteFromToc(id) - return nil, ErrTimeout } } From 5aa93b6d8fad4dff16c96ba54b88543c4ce64c73 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 7 Jul 2017 07:32:43 +0200 Subject: [PATCH 20/46] check file on demand --- chunk/storage.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/chunk/storage.go b/chunk/storage.go index ea30386..97c4d98 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -151,10 +151,6 @@ func (s *Storage) loadFromRAM(id string, offset, size int64) ([]byte, bool) { func (s *Storage) loadFromDisk(id string, offset, size int64) ([]byte, bool) { filename := filepath.Join(s.ChunkPath, id) - if _, err := os.Stat(filename); os.IsExist(err) { - return nil, false - } - f, err := os.Open(filename) if nil != err { Log.Tracef("%v", err) From d1ecb4a2b8892123720e13943519ae2cc7ec43ce Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 7 Jul 2017 11:26:53 +0200 Subject: [PATCH 21/46] chunk existence check is handled in main thread --- chunk/manager.go | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/chunk/manager.go b/chunk/manager.go index e77c7e0..c39afd5 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -91,12 +91,14 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, offsetEnd := offsetStart + m.ChunkSize id := fmt.Sprintf("%v:%v", object.ObjectID, offsetStart) - m.queue <- &Request{ - id: id, - object: object, - offsetStart: offsetStart, - offsetEnd: offsetEnd, - preload: false, + if !m.storage.ExistsOrCreate(id) { + m.queue <- &Request{ + id: id, + object: object, + offsetStart: offsetStart, + offsetEnd: offsetEnd, + preload: false, + } } for i := m.ChunkSize; i < (m.ChunkSize * int64(m.LoadAhead+1)); i += m.ChunkSize { @@ -104,12 +106,14 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, aheadOffsetEnd := aheadOffsetStart + m.ChunkSize if uint64(aheadOffsetStart) < object.Size && uint64(aheadOffsetEnd) < object.Size { id := fmt.Sprintf("%v:%v", object.ObjectID, aheadOffsetStart) - m.queue <- &Request{ - id: id, - object: object, - offsetStart: aheadOffsetStart, - offsetEnd: aheadOffsetEnd, - preload: true, + if !m.storage.ExistsOrCreate(id) { + m.queue <- &Request{ + id: id, + object: object, + offsetStart: aheadOffsetStart, + offsetEnd: aheadOffsetEnd, + preload: true, + } } } } @@ -131,10 +135,6 @@ func (m *Manager) thread(threadID int) { } func (m *Manager) checkChunk(req *Request, threadID int) { - if m.storage.ExistsOrCreate(req.id) { - return - } - Log.Debugf("Requesting object %v (%v) bytes %v - %v from API (preload: %v | thread: %v)", req.object.ObjectID, req.object.Name, req.offsetStart, req.offsetEnd, req.preload, threadID) From 894ae3a63e9df4aa2bd703dd93166bfa95dbe875 Mon Sep 17 00:00:00 2001 From: zenjabba Date: Fri, 7 Jul 2017 14:48:49 -0400 Subject: [PATCH 22/46] Update storage.go https://github.com/dweidenfeld/plexdrive/issues/152#issuecomment-313754559 --- chunk/storage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chunk/storage.go b/chunk/storage.go index 97c4d98..d2ce9bc 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -163,7 +163,7 @@ func (s *Storage) loadFromDisk(id string, offset, size int64) ([]byte, bool) { if n > 0 && (nil == err || io.EOF == err || io.ErrUnexpectedEOF == err) { s.stack.Touch(id) - eOffset := int64(math.Min(float64(size), float64(len(buf)))) + eOffset := int64(math.Min(float64(size), float64(n))) return buf[:eOffset], true } From 07af9e73db11a414067ea0025f95c5dddf830cfe Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 7 Jul 2017 21:15:01 +0200 Subject: [PATCH 23/46] configurable location of cache file --- drive/cache.go | 6 +++--- main.go | 9 ++++++++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/drive/cache.go b/drive/cache.go index 71ed93d..7ce3ea9 100644 --- a/drive/cache.go +++ b/drive/cache.go @@ -46,10 +46,10 @@ type PageToken struct { } // NewCache creates a new cache instance -func NewCache(cacheBasePath string, sqlDebug bool) (*Cache, error) { +func NewCache(cacheFile, configPath string, sqlDebug bool) (*Cache, error) { Log.Debugf("Opening cache connection") - db, err := bolt.Open(filepath.Join(cacheBasePath, "cache.bolt"), 0600, nil) + db, err := bolt.Open(cacheFile, 0600, nil) if nil != err { Log.Debugf("%v", err) return nil, fmt.Errorf("Could not open cache file") @@ -57,7 +57,7 @@ func NewCache(cacheBasePath string, sqlDebug bool) (*Cache, error) { cache := Cache{ db: db, - tokenPath: filepath.Join(cacheBasePath, "token.json"), + tokenPath: filepath.Join(configPath, "token.json"), } // Make sure the necessary buckets exist diff --git a/main.go b/main.go index 1a0c076..cadbe97 100644 --- a/main.go +++ b/main.go @@ -39,6 +39,7 @@ func main() { argRootNodeID := flag.String("root-node-id", "root", "The ID of the root node to mount (use this for only mount a sub directory)") argConfigPath := flag.StringP("config", "c", filepath.Join(user.HomeDir, ".plexdrive"), "The path to the configuration directory") argTempPath := flag.StringP("temp", "t", os.TempDir(), "Path to a temporary directory to store temporary data") + argCacheFile := flag.String("cache-file", filepath.Join(user.HomeDir, ".plexdrive", "cache.bolt"), "Path the the cache file") argChunkSize := flag.String("chunk-size", "5M", "The size of each chunk that is downloaded (units: B, K, M, G)") argChunkLoadThreads := flag.Int("chunk-load-threads", runtime.NumCPU(), "The number of threads to use for downloading chunks") argChunkLoadAhead := flag.Int("chunk-load-ahead", 2, "The number of chunks that should be read ahead") @@ -110,6 +111,7 @@ func main() { Log.Debugf("root-node-id : %v", *argRootNodeID) Log.Debugf("config : %v", *argConfigPath) Log.Debugf("temp : %v", *argTempPath) + Log.Debugf("cache-file : %v", *argCacheFile) Log.Debugf("chunk-size : %v", *argChunkSize) Log.Debugf("chunk-load-threads : %v", *argChunkLoadThreads) Log.Debugf("chunk-load-ahead : %v", *argChunkLoadAhead) @@ -130,6 +132,11 @@ func main() { Log.Debugf("%v", err) os.Exit(1) } + if err := os.MkdirAll(filepath.Dir(*argCacheFile), 0766); nil != err { + Log.Errorf("Could not create cache file directory") + Log.Debugf("%v", err) + os.Exit(1) + } chunkPath := filepath.Join(*argTempPath, "chunks") // set the global buffer configuration @@ -151,7 +158,7 @@ func main() { } } - cache, err := drive.NewCache(*argConfigPath, *argLogLevel > 3) + cache, err := drive.NewCache(*argCacheFile, *argConfigPath, *argLogLevel > 3) if nil != err { Log.Errorf("%v", err) os.Exit(4) From a8567f579033024d8a479b72a7466459974a38b1 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Sat, 8 Jul 2017 08:54:18 +0200 Subject: [PATCH 24/46] timeout / performance queue --- chunk/manager.go | 13 ++++++++++--- chunk/storage.go | 5 +++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/chunk/manager.go b/chunk/manager.go index c39afd5..898dc4f 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -21,6 +21,7 @@ type Manager struct { TimeoutRetries int downloader *Downloader queue chan *Request + queue2 chan *Request storage *Storage } @@ -69,7 +70,8 @@ func NewManager( Timeout: timeout, TimeoutRetries: timeoutRetries, downloader: downloader, - queue: make(chan *Request, 500), + queue: make(chan *Request, 20), + queue2: make(chan *Request, 80), storage: NewStorage(chunkPath, chunkSize, maxChunks), } @@ -107,7 +109,7 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, if uint64(aheadOffsetStart) < object.Size && uint64(aheadOffsetEnd) < object.Size { id := fmt.Sprintf("%v:%v", object.ObjectID, aheadOffsetStart) if !m.storage.ExistsOrCreate(id) { - m.queue <- &Request{ + m.queue2 <- &Request{ id: id, object: object, offsetStart: aheadOffsetStart, @@ -130,7 +132,12 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, func (m *Manager) thread(threadID int) { for { - m.checkChunk(<-m.queue, threadID) + select { + case req := <-m.queue: + m.checkChunk(req, threadID) + case req := <-m.queue2: + m.checkChunk(req, threadID) + } } } diff --git a/chunk/storage.go b/chunk/storage.go index d2ce9bc..b6b33b3 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -97,8 +97,7 @@ func (s *Storage) Error(id string, err error) { // Get gets a chunk content (blocking) func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]byte, error) { - // TODO: use timeout - + start := time.Now() for { s.tocLock.Lock() err, exists := s.toc[id] @@ -116,6 +115,8 @@ func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]b } else if nil != err { s.deleteFromToc(id) return nil, err + } else if time.Now().Sub(start) > timeout { + return nil, ErrTimeout } } } From fe581c8364fa92ebb132a0059c09cf31f879bcdc Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Mon, 10 Jul 2017 19:25:12 +0200 Subject: [PATCH 25/46] use rw locking --- chunk/stack.go | 6 +++--- chunk/storage.go | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/chunk/stack.go b/chunk/stack.go index b9d9975..ffda4ee 100644 --- a/chunk/stack.go +++ b/chunk/stack.go @@ -8,7 +8,7 @@ import ( // Stack is a thread safe list/stack implementation type Stack struct { items *list.List - lock sync.Mutex + lock sync.RWMutex } // NewStack creates a new stack @@ -20,9 +20,9 @@ func NewStack() *Stack { // Len returns the length of the current stack func (s *Stack) Len() int { - s.lock.Lock() + s.lock.RLock() count := s.items.Len() - s.lock.Unlock() + s.lock.RUnlock() return count } diff --git a/chunk/storage.go b/chunk/storage.go index b6b33b3..97aacb3 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -25,9 +25,9 @@ type Storage struct { MaxChunks int queue chan *Item chunks map[string][]byte - chunksLock sync.Mutex + chunksLock sync.RWMutex toc map[string]error - tocLock sync.Mutex + tocLock sync.RWMutex stack *Stack } @@ -99,9 +99,9 @@ func (s *Storage) Error(id string, err error) { func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]byte, error) { start := time.Now() for { - s.tocLock.Lock() + s.tocLock.RLock() err, exists := s.toc[id] - s.tocLock.Unlock() + s.tocLock.RUnlock() if nil == err && exists { bytes, exists := s.loadFromRAM(id, offset, size) if exists { @@ -137,9 +137,9 @@ func (s *Storage) deleteFromToc(id string) { } func (s *Storage) loadFromRAM(id string, offset, size int64) ([]byte, bool) { - s.chunksLock.Lock() + s.chunksLock.RLock() bytes, exists := s.chunks[id] - s.chunksLock.Unlock() + s.chunksLock.RUnlock() if !exists { return nil, false } From 5d753433eee18c0ad8948c9148f2f9c30309b040 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Mon, 10 Jul 2017 19:35:31 +0200 Subject: [PATCH 26/46] dedicated downloader --- chunk/download.go | 7 ++++--- chunk/manager.go | 7 ++++--- chunk/storage.go | 2 +- main.go | 2 +- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/chunk/download.go b/chunk/download.go index 66efc51..3e79ed9 100644 --- a/chunk/download.go +++ b/chunk/download.go @@ -8,15 +8,16 @@ import ( "time" . "github.com/claudetech/loggo/default" + "github.com/dweidenfeld/plexdrive/drive" ) // Downloader handles concurrent chunk downloads type Downloader struct { - Client *http.Client + Client *drive.Client } // NewDownloader creates a new download manager -func NewDownloader(threads int, client *http.Client) (*Downloader, error) { +func NewDownloader(threads int, client *drive.Client) (*Downloader, error) { manager := Downloader{ Client: client, } @@ -26,7 +27,7 @@ func NewDownloader(threads int, client *http.Client) (*Downloader, error) { // Download starts a new download request func (d *Downloader) Download(req *Request) ([]byte, error) { - return downloadFromAPI(d.Client, req, 0) + return downloadFromAPI(d.Client.GetNativeClient(), req, 0) } func downloadFromAPI(client *http.Client, request *Request, delay int64) ([]byte, error) { diff --git a/chunk/manager.go b/chunk/manager.go index 898dc4f..351e445 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -3,8 +3,6 @@ package chunk import ( "fmt" - "net/http" - . "github.com/claudetech/loggo/default" "time" @@ -40,7 +38,7 @@ func NewManager( chunkSize int64, loadAhead, threads int, - client *http.Client, + client *drive.Client, maxChunks int, timeout time.Duration, timeoutRetries int) (*Manager, error) { @@ -127,6 +125,9 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, bytes, err = m.storage.Get(id, chunkOffset, size, m.Timeout) retryCount++ } + if nil != err { + m.storage.Error(id, err) + } return bytes, err } diff --git a/chunk/storage.go b/chunk/storage.go index 97aacb3..c2130e8 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -65,7 +65,7 @@ func (s *Storage) Clear() error { // ExistsOrCreate check if an item already exists, otherwise it will create a placeholder func (s *Storage) ExistsOrCreate(id string) bool { s.tocLock.Lock() - if _, exists := s.toc[id]; exists { + if err, exists := s.toc[id]; exists && nil != err { s.tocLock.Unlock() return true } diff --git a/main.go b/main.go index cadbe97..07e9671 100644 --- a/main.go +++ b/main.go @@ -176,7 +176,7 @@ func main() { chunkSize, *argChunkLoadAhead, *argChunkLoadThreads, - client.GetNativeClient(), + client, *argMaxChunks, *argChunkLoadTimeout, *argChunkLoadRetries) From 00eff01e7ef2eabc6c948993c6893b5738e7da49 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Mon, 10 Jul 2017 19:37:17 +0200 Subject: [PATCH 27/46] bugfix --- chunk/storage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chunk/storage.go b/chunk/storage.go index c2130e8..1966784 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -65,7 +65,7 @@ func (s *Storage) Clear() error { // ExistsOrCreate check if an item already exists, otherwise it will create a placeholder func (s *Storage) ExistsOrCreate(id string) bool { s.tocLock.Lock() - if err, exists := s.toc[id]; exists && nil != err { + if err, exists := s.toc[id]; exists && nil == err { s.tocLock.Unlock() return true } From ae507858850914d5ee28c3c277c0655d6d3e25fb Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Wed, 12 Jul 2017 07:41:29 +0200 Subject: [PATCH 28/46] refactoring --- chunk/manager.go | 75 ++++++++----- chunk/storage.go | 286 ++++++++++++++++++++--------------------------- 2 files changed, 172 insertions(+), 189 deletions(-) diff --git a/chunk/manager.go b/chunk/manager.go index 351e445..34b36ec 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -2,6 +2,7 @@ package chunk import ( "fmt" + "math" . "github.com/claudetech/loggo/default" @@ -30,6 +31,13 @@ type Request struct { preload bool offsetStart int64 offsetEnd int64 + response chan Response +} + +// Response represents a chunk responses +type Response struct { + err error + bytes []byte } // NewManager creates a new chunk manager @@ -91,14 +99,14 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, offsetEnd := offsetStart + m.ChunkSize id := fmt.Sprintf("%v:%v", object.ObjectID, offsetStart) - if !m.storage.ExistsOrCreate(id) { - m.queue <- &Request{ - id: id, - object: object, - offsetStart: offsetStart, - offsetEnd: offsetEnd, - preload: false, - } + response := make(chan Response) + m.queue <- &Request{ + id: id, + object: object, + offsetStart: offsetStart, + offsetEnd: offsetEnd, + preload: false, + response: response, } for i := m.ChunkSize; i < (m.ChunkSize * int64(m.LoadAhead+1)); i += m.ChunkSize { @@ -106,29 +114,24 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, aheadOffsetEnd := aheadOffsetStart + m.ChunkSize if uint64(aheadOffsetStart) < object.Size && uint64(aheadOffsetEnd) < object.Size { id := fmt.Sprintf("%v:%v", object.ObjectID, aheadOffsetStart) - if !m.storage.ExistsOrCreate(id) { - m.queue2 <- &Request{ - id: id, - object: object, - offsetStart: aheadOffsetStart, - offsetEnd: aheadOffsetEnd, - preload: true, - } + m.queue2 <- &Request{ + id: id, + object: object, + offsetStart: aheadOffsetStart, + offsetEnd: aheadOffsetEnd, + preload: true, } } } - bytes, err := m.storage.Get(id, chunkOffset, size, m.Timeout) - retryCount := 0 - for err == ErrTimeout && retryCount < m.TimeoutRetries { - Log.Warningf("Timeout while requesting chunk %v. Retrying (%v / %v)", id, (retryCount + 1), m.TimeoutRetries) - bytes, err = m.storage.Get(id, chunkOffset, size, m.Timeout) - retryCount++ + res := <-response + if nil != res.err { + return nil, res.err } - if nil != err { - m.storage.Error(id, err) - } - return bytes, err + + sOffset := int64(math.Min(float64(len(res.bytes)), float64(chunkOffset))) + eOffset := int64(math.Min(float64(len(res.bytes)), float64(chunkOffset+size))) + return res.bytes[sOffset:eOffset], nil } func (m *Manager) thread(threadID int) { @@ -143,13 +146,31 @@ func (m *Manager) thread(threadID int) { } func (m *Manager) checkChunk(req *Request, threadID int) { + if chunk, exists := m.storage.LoadOrCreate(req.id); exists { + if nil != req.response { + req.response <- Response{bytes: chunk} + close(req.response) + } + return + } + Log.Debugf("Requesting object %v (%v) bytes %v - %v from API (preload: %v | thread: %v)", req.object.ObjectID, req.object.Name, req.offsetStart, req.offsetEnd, req.preload, threadID) bytes, err := m.downloader.Download(req) if nil != err { Log.Warningf("%v", err) - m.storage.Error(req.id, err) + m.storage.Error(req.id) + + if nil != req.response { + req.response <- Response{err: err} + close(req.response) + } + } + + if nil != req.response { + req.response <- Response{bytes: bytes} + close(req.response) } if err := m.storage.Store(req.id, bytes); nil != err { diff --git a/chunk/storage.go b/chunk/storage.go index 1966784..8a19978 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -3,16 +3,9 @@ package chunk import ( "errors" "fmt" - "io" - "io/ioutil" - "math" "os" - "path/filepath" "sync" - - "time" - - . "github.com/claudetech/loggo/default" + // . "github.com/claudetech/loggo/default" ) // ErrTimeout is a timeout error @@ -20,15 +13,15 @@ var ErrTimeout = errors.New("timeout") // Storage is a chunk storage type Storage struct { - ChunkPath string - ChunkSize int64 - MaxChunks int - queue chan *Item - chunks map[string][]byte - chunksLock sync.RWMutex - toc map[string]error - tocLock sync.RWMutex - stack *Stack + ChunkPath string + ChunkSize int64 + MaxChunks int + // queue chan *Item + chunks map[string][]byte + lock sync.RWMutex + // toc map[string]error + // tocLock sync.RWMutex + // stack *Stack } // Item represents a chunk in RAM @@ -43,13 +36,13 @@ func NewStorage(chunkPath string, chunkSize int64, maxChunks int) *Storage { ChunkPath: chunkPath, ChunkSize: chunkSize, MaxChunks: maxChunks, - queue: make(chan *Item, 100), - chunks: make(map[string][]byte), - toc: make(map[string]error), - stack: NewStack(), + // queue: make(chan *Item, 100), + chunks: make(map[string][]byte), + // toc: make(map[string]error), + // stack: NewStack(), } - go storage.thread() + // go storage.thread() return &storage } @@ -62,156 +55,125 @@ func (s *Storage) Clear() error { return nil } -// ExistsOrCreate check if an item already exists, otherwise it will create a placeholder -func (s *Storage) ExistsOrCreate(id string) bool { - s.tocLock.Lock() - if err, exists := s.toc[id]; exists && nil == err { - s.tocLock.Unlock() - return true +// LoadOrCreate loads a chunk from ram or creates it +func (s *Storage) LoadOrCreate(id string) ([]byte, bool) { + s.lock.Lock() + if chunk, exists := s.chunks[id]; exists { + s.lock.Unlock() + return chunk, true } - s.toc[id] = nil - s.tocLock.Unlock() - return false + s.chunks[id] = nil + s.lock.Unlock() + return nil, false } // Store stores a chunk in the RAM and adds it to the disk storage queue func (s *Storage) Store(id string, bytes []byte) error { - s.chunksLock.Lock() + s.lock.Lock() s.chunks[id] = bytes - s.chunksLock.Unlock() - - s.queue <- &Item{ - id: id, - bytes: bytes, - } + s.lock.Unlock() return nil } // Error is called to remove an item from the index if there has been an issue downloading the chunk -func (s *Storage) Error(id string, err error) { - s.tocLock.Lock() - s.toc[id] = err - s.tocLock.Unlock() -} - -// Get gets a chunk content (blocking) -func (s *Storage) Get(id string, offset, size int64, timeout time.Duration) ([]byte, error) { - start := time.Now() - for { - s.tocLock.RLock() - err, exists := s.toc[id] - s.tocLock.RUnlock() - if nil == err && exists { - bytes, exists := s.loadFromRAM(id, offset, size) - if exists { - return bytes, nil - } - - bytes, exists = s.loadFromDisk(id, offset, size) - if exists { - return bytes, nil - } - } else if nil != err { - s.deleteFromToc(id) - return nil, err - } else if time.Now().Sub(start) > timeout { - return nil, ErrTimeout - } - } -} - -func (s *Storage) thread() { - for { - item := <-s.queue - if err := s.storeToDisk(item.id, item.bytes); nil != err { - Log.Warningf("%v", err) - } - } -} - -func (s *Storage) deleteFromToc(id string) { - s.tocLock.Lock() - delete(s.toc, id) - s.tocLock.Unlock() -} - -func (s *Storage) loadFromRAM(id string, offset, size int64) ([]byte, bool) { - s.chunksLock.RLock() - bytes, exists := s.chunks[id] - s.chunksLock.RUnlock() - if !exists { - return nil, false - } - - sOffset := int64(math.Min(float64(len(bytes)), float64(offset))) - eOffset := int64(math.Min(float64(len(bytes)), float64(offset+size))) - return bytes[sOffset:eOffset], true -} - -func (s *Storage) loadFromDisk(id string, offset, size int64) ([]byte, bool) { - filename := filepath.Join(s.ChunkPath, id) - - f, err := os.Open(filename) - if nil != err { - Log.Tracef("%v", err) - return nil, false - } - defer f.Close() - - buf := make([]byte, size) - n, err := f.ReadAt(buf, offset) - if n > 0 && (nil == err || io.EOF == err || io.ErrUnexpectedEOF == err) { - s.stack.Touch(id) - - eOffset := int64(math.Min(float64(size), float64(n))) - return buf[:eOffset], true - } - - Log.Tracef("%v", err) - return nil, false -} - -func (s *Storage) storeToDisk(id string, bytes []byte) error { - filename := filepath.Join(s.ChunkPath, id) - - if s.stack.Len() >= s.MaxChunks { - deleteID := s.stack.Pop() - - if "" != deleteID { - filename := filepath.Join(s.ChunkPath, deleteID) - - Log.Debugf("Deleting chunk %v", filename) - if err := os.Remove(filename); nil != err { - Log.Debugf("%v", err) - Log.Warningf("Could not delete chunk %v", filename) - } - - s.tocLock.Lock() - delete(s.toc, deleteID) - s.tocLock.Unlock() - } - } - - if _, err := os.Stat(s.ChunkPath); os.IsNotExist(err) { - if err := os.MkdirAll(s.ChunkPath, 0777); nil != err { - Log.Debugf("%v", err) - return fmt.Errorf("Could not create chunk temp path %v", s.ChunkPath) - } - } - - if _, err := os.Stat(filename); os.IsNotExist(err) { - if err := ioutil.WriteFile(filename, bytes, 0777); nil != err { - Log.Debugf("%v", err) - return fmt.Errorf("Could not write chunk temp file %v", filename) - } - - s.stack.Push(id) - } - - s.chunksLock.Lock() +func (s *Storage) Error(id string) { + s.lock.Lock() delete(s.chunks, id) - s.chunksLock.Unlock() - - return nil + s.lock.Unlock() } + +// func (s *Storage) thread() { +// for { +// item := <-s.queue +// if err := s.storeToDisk(item.id, item.bytes); nil != err { +// Log.Warningf("%v", err) +// } +// } +// } + +// func (s *Storage) deleteFromToc(id string) { +// s.tocLock.Lock() +// delete(s.toc, id) +// s.tocLock.Unlock() +// } + +// func (s *Storage) loadFromRAM(id string, offset, size int64) ([]byte, bool) { +// s.chunksLock.RLock() +// bytes, exists := s.chunks[id] +// s.chunksLock.RUnlock() +// if !exists { +// return nil, false +// } + +// sOffset := int64(math.Min(float64(len(bytes)), float64(offset))) +// eOffset := int64(math.Min(float64(len(bytes)), float64(offset+size))) +// return bytes[sOffset:eOffset], true +// } + +// func (s *Storage) loadFromDisk(id string, offset, size int64) ([]byte, bool) { +// filename := filepath.Join(s.ChunkPath, id) + +// f, err := os.Open(filename) +// if nil != err { +// Log.Tracef("%v", err) +// return nil, false +// } +// defer f.Close() + +// buf := make([]byte, size) +// n, err := f.ReadAt(buf, offset) +// if n > 0 && (nil == err || io.EOF == err || io.ErrUnexpectedEOF == err) { +// s.stack.Touch(id) + +// eOffset := int64(math.Min(float64(size), float64(n))) +// return buf[:eOffset], true +// } + +// Log.Tracef("%v", err) +// return nil, false +// } + +// func (s *Storage) storeToDisk(id string, bytes []byte) error { +// filename := filepath.Join(s.ChunkPath, id) + +// if s.stack.Len() >= s.MaxChunks { +// deleteID := s.stack.Pop() + +// if "" != deleteID { +// filename := filepath.Join(s.ChunkPath, deleteID) + +// Log.Debugf("Deleting chunk %v", filename) +// if err := os.Remove(filename); nil != err { +// Log.Debugf("%v", err) +// Log.Warningf("Could not delete chunk %v", filename) +// } + +// s.tocLock.Lock() +// delete(s.toc, deleteID) +// s.tocLock.Unlock() +// } +// } + +// if _, err := os.Stat(s.ChunkPath); os.IsNotExist(err) { +// if err := os.MkdirAll(s.ChunkPath, 0777); nil != err { +// Log.Debugf("%v", err) +// return fmt.Errorf("Could not create chunk temp path %v", s.ChunkPath) +// } +// } + +// if _, err := os.Stat(filename); os.IsNotExist(err) { +// if err := ioutil.WriteFile(filename, bytes, 0777); nil != err { +// Log.Debugf("%v", err) +// return fmt.Errorf("Could not write chunk temp file %v", filename) +// } + +// s.stack.Push(id) +// } + +// s.chunksLock.Lock() +// delete(s.chunks, id) +// s.chunksLock.Unlock() + +// return nil +// } From 51d5bec472b9a5e66f0dd061d9d17d33b7a975ee Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Wed, 12 Jul 2017 22:32:19 +0200 Subject: [PATCH 29/46] added chunk removal --- chunk/storage.go | 124 ++++++----------------------------------------- 1 file changed, 16 insertions(+), 108 deletions(-) diff --git a/chunk/storage.go b/chunk/storage.go index 8a19978..4d79000 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -5,7 +5,8 @@ import ( "fmt" "os" "sync" - // . "github.com/claudetech/loggo/default" + + . "github.com/claudetech/loggo/default" ) // ErrTimeout is a timeout error @@ -16,12 +17,9 @@ type Storage struct { ChunkPath string ChunkSize int64 MaxChunks int - // queue chan *Item - chunks map[string][]byte - lock sync.RWMutex - // toc map[string]error - // tocLock sync.RWMutex - // stack *Stack + chunks map[string][]byte + stack []string + lock sync.RWMutex } // Item represents a chunk in RAM @@ -36,14 +34,10 @@ func NewStorage(chunkPath string, chunkSize int64, maxChunks int) *Storage { ChunkPath: chunkPath, ChunkSize: chunkSize, MaxChunks: maxChunks, - // queue: make(chan *Item, 100), - chunks: make(map[string][]byte), - // toc: make(map[string]error), - // stack: NewStack(), + chunks: make(map[string][]byte), + stack: make([]string, maxChunks), } - // go storage.thread() - return &storage } @@ -70,6 +64,15 @@ func (s *Storage) LoadOrCreate(id string) ([]byte, bool) { // Store stores a chunk in the RAM and adds it to the disk storage queue func (s *Storage) Store(id string, bytes []byte) error { s.lock.Lock() + s.stack = append(s.stack, id) + if len(s.stack) > s.MaxChunks { + deleteID := s.stack[0] + if "" != deleteID { + s.stack = s.stack[1:] + Log.Debugf("Deleting chunk %v", deleteID) + delete(s.chunks, deleteID) + } + } s.chunks[id] = bytes s.lock.Unlock() @@ -82,98 +85,3 @@ func (s *Storage) Error(id string) { delete(s.chunks, id) s.lock.Unlock() } - -// func (s *Storage) thread() { -// for { -// item := <-s.queue -// if err := s.storeToDisk(item.id, item.bytes); nil != err { -// Log.Warningf("%v", err) -// } -// } -// } - -// func (s *Storage) deleteFromToc(id string) { -// s.tocLock.Lock() -// delete(s.toc, id) -// s.tocLock.Unlock() -// } - -// func (s *Storage) loadFromRAM(id string, offset, size int64) ([]byte, bool) { -// s.chunksLock.RLock() -// bytes, exists := s.chunks[id] -// s.chunksLock.RUnlock() -// if !exists { -// return nil, false -// } - -// sOffset := int64(math.Min(float64(len(bytes)), float64(offset))) -// eOffset := int64(math.Min(float64(len(bytes)), float64(offset+size))) -// return bytes[sOffset:eOffset], true -// } - -// func (s *Storage) loadFromDisk(id string, offset, size int64) ([]byte, bool) { -// filename := filepath.Join(s.ChunkPath, id) - -// f, err := os.Open(filename) -// if nil != err { -// Log.Tracef("%v", err) -// return nil, false -// } -// defer f.Close() - -// buf := make([]byte, size) -// n, err := f.ReadAt(buf, offset) -// if n > 0 && (nil == err || io.EOF == err || io.ErrUnexpectedEOF == err) { -// s.stack.Touch(id) - -// eOffset := int64(math.Min(float64(size), float64(n))) -// return buf[:eOffset], true -// } - -// Log.Tracef("%v", err) -// return nil, false -// } - -// func (s *Storage) storeToDisk(id string, bytes []byte) error { -// filename := filepath.Join(s.ChunkPath, id) - -// if s.stack.Len() >= s.MaxChunks { -// deleteID := s.stack.Pop() - -// if "" != deleteID { -// filename := filepath.Join(s.ChunkPath, deleteID) - -// Log.Debugf("Deleting chunk %v", filename) -// if err := os.Remove(filename); nil != err { -// Log.Debugf("%v", err) -// Log.Warningf("Could not delete chunk %v", filename) -// } - -// s.tocLock.Lock() -// delete(s.toc, deleteID) -// s.tocLock.Unlock() -// } -// } - -// if _, err := os.Stat(s.ChunkPath); os.IsNotExist(err) { -// if err := os.MkdirAll(s.ChunkPath, 0777); nil != err { -// Log.Debugf("%v", err) -// return fmt.Errorf("Could not create chunk temp path %v", s.ChunkPath) -// } -// } - -// if _, err := os.Stat(filename); os.IsNotExist(err) { -// if err := ioutil.WriteFile(filename, bytes, 0777); nil != err { -// Log.Debugf("%v", err) -// return fmt.Errorf("Could not write chunk temp file %v", filename) -// } - -// s.stack.Push(id) -// } - -// s.chunksLock.Lock() -// delete(s.chunks, id) -// s.chunksLock.Unlock() - -// return nil -// } From 096a1d8f1ec05bd3d191ee7486f5c7ba954c8508 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Wed, 12 Jul 2017 22:35:19 +0200 Subject: [PATCH 30/46] added chunk removal --- chunk/storage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chunk/storage.go b/chunk/storage.go index 4d79000..beba3d8 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -64,7 +64,6 @@ func (s *Storage) LoadOrCreate(id string) ([]byte, bool) { // Store stores a chunk in the RAM and adds it to the disk storage queue func (s *Storage) Store(id string, bytes []byte) error { s.lock.Lock() - s.stack = append(s.stack, id) if len(s.stack) > s.MaxChunks { deleteID := s.stack[0] if "" != deleteID { @@ -74,6 +73,7 @@ func (s *Storage) Store(id string, bytes []byte) error { } } s.chunks[id] = bytes + s.stack = append(s.stack, id) s.lock.Unlock() return nil From 135992db2b6479f87e02289e7eb6b82add0a6312 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Thu, 13 Jul 2017 07:47:56 +0200 Subject: [PATCH 31/46] chunk removal --- chunk/storage.go | 21 ++++++++++++++------- main.go | 2 +- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/chunk/storage.go b/chunk/storage.go index beba3d8..0119937 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -19,6 +19,7 @@ type Storage struct { MaxChunks int chunks map[string][]byte stack []string + stackSize int lock sync.RWMutex } @@ -35,7 +36,7 @@ func NewStorage(chunkPath string, chunkSize int64, maxChunks int) *Storage { ChunkSize: chunkSize, MaxChunks: maxChunks, chunks: make(map[string][]byte), - stack: make([]string, maxChunks), + stack: make([]string, 0), } return &storage @@ -64,16 +65,22 @@ func (s *Storage) LoadOrCreate(id string) ([]byte, bool) { // Store stores a chunk in the RAM and adds it to the disk storage queue func (s *Storage) Store(id string, bytes []byte) error { s.lock.Lock() - if len(s.stack) > s.MaxChunks { + + // delete chunk + for s.stackSize > s.MaxChunks { + Log.Debugf("%v / %v", s.stackSize, s.MaxChunks) + deleteID := s.stack[0] - if "" != deleteID { - s.stack = s.stack[1:] - Log.Debugf("Deleting chunk %v", deleteID) - delete(s.chunks, deleteID) - } + s.stack = s.stack[1:] + s.stackSize-- + + Log.Debugf("Deleting chunk %v", deleteID) + delete(s.chunks, deleteID) } + s.chunks[id] = bytes s.stack = append(s.stack, id) + s.stackSize++ s.lock.Unlock() return nil diff --git a/main.go b/main.go index 07e9671..586aff0 100644 --- a/main.go +++ b/main.go @@ -45,7 +45,7 @@ func main() { argChunkLoadAhead := flag.Int("chunk-load-ahead", 2, "The number of chunks that should be read ahead") argChunkLoadTimeout := flag.Duration("chunk-load-timeout", 10*time.Second, "Duration to wait for a chunk to be loaded") argChunkLoadRetries := flag.Int("chunk-load-retries", 3, "Number of retries to load a chunk") - argMaxChunks := flag.Int("max-chunks", 10, "The maximum number of chunks to be stored on disk") + argMaxChunks := flag.Int("max-chunks", 200, "The maximum number of chunks to be stored on disk") argRefreshInterval := flag.Duration("refresh-interval", 5*time.Minute, "The time to wait till checking for changes") argMountOptions := flag.StringP("fuse-options", "o", "", "Fuse mount options (e.g. -fuse-options allow_other,...)") argVersion := flag.Bool("version", false, "Displays program's version information") From 9419eee862973a446041a78f86712cb2780915b1 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Thu, 13 Jul 2017 09:19:30 +0200 Subject: [PATCH 32/46] error bugfix --- chunk/manager.go | 1 + 1 file changed, 1 insertion(+) diff --git a/chunk/manager.go b/chunk/manager.go index 34b36ec..4d197ad 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -166,6 +166,7 @@ func (m *Manager) checkChunk(req *Request, threadID int) { req.response <- Response{err: err} close(req.response) } + return } if nil != req.response { From 5e0e467899f86a2df76349d987f37c5dce900bfd Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Sat, 22 Jul 2017 21:19:13 +0200 Subject: [PATCH 33/46] tryouts --- chunk/download.go | 44 +++++++++++++-- chunk/manager.go | 136 +++++++++++++++++----------------------------- chunk/storage.go | 43 +++++---------- mount/mount.go | 20 ++++--- 4 files changed, 117 insertions(+), 126 deletions(-) diff --git a/chunk/download.go b/chunk/download.go index 3e79ed9..fff52a2 100644 --- a/chunk/download.go +++ b/chunk/download.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "net/http" "strings" + "sync" "time" . "github.com/claudetech/loggo/default" @@ -13,21 +14,56 @@ import ( // Downloader handles concurrent chunk downloads type Downloader struct { - Client *drive.Client + Client *drive.Client + queue chan *Request + callbacks map[string][]ResponseFunc + lock sync.RWMutex } // NewDownloader creates a new download manager func NewDownloader(threads int, client *drive.Client) (*Downloader, error) { manager := Downloader{ - Client: client, + Client: client, + queue: make(chan *Request, 100), + callbacks: make(map[string][]ResponseFunc, 100), + } + + for i := 0; i < threads; i++ { + Log.Debugf("Starting download thread %v", i) + go manager.thread(i) } return &manager, nil } // Download starts a new download request -func (d *Downloader) Download(req *Request) ([]byte, error) { - return downloadFromAPI(d.Client.GetNativeClient(), req, 0) +func (d *Downloader) Download(req *Request, callback ResponseFunc) { + d.lock.Lock() + _, exists := d.callbacks[req.id] + d.callbacks[req.id] = append(d.callbacks[req.id], callback) + if !exists { + d.queue <- req + } + d.lock.Unlock() +} + +func (d *Downloader) thread(threadID int) { + for { + req := <-d.queue + d.lock.RLock() + callbacks := d.callbacks[req.id] + d.lock.RUnlock() + d.download(d.Client.GetNativeClient(), req, callbacks) + } +} + +func (d *Downloader) download(client *http.Client, req *Request, callbacks []ResponseFunc) { + Log.Debugf("Starting download %v", req.id) + bytes, err := downloadFromAPI(client, req, 0) + + for _, callback := range callbacks { + callback(err, bytes) + } } func downloadFromAPI(client *http.Client, request *Request, delay int64) ([]byte, error) { diff --git a/chunk/manager.go b/chunk/manager.go index 4d197ad..6b8c3fd 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -2,7 +2,6 @@ package chunk import ( "fmt" - "math" . "github.com/claudetech/loggo/default" @@ -19,26 +18,21 @@ type Manager struct { Timeout time.Duration TimeoutRetries int downloader *Downloader - queue chan *Request - queue2 chan *Request storage *Storage + preloadQueue chan *Request } // Request represents a chunk request type Request struct { - id string - object *drive.APIObject - preload bool - offsetStart int64 - offsetEnd int64 - response chan Response + id string + object *drive.APIObject + offsetStart int64 + offsetEnd int64 + chunkOffset int64 + chunkOffsetEnd int64 } -// Response represents a chunk responses -type Response struct { - err error - bytes []byte -} +type ResponseFunc func(error, []byte) // NewManager creates a new chunk manager func NewManager( @@ -76,8 +70,6 @@ func NewManager( Timeout: timeout, TimeoutRetries: timeoutRetries, downloader: downloader, - queue: make(chan *Request, 20), - queue2: make(chan *Request, 80), storage: NewStorage(chunkPath, chunkSize, maxChunks), } @@ -85,96 +77,68 @@ func NewManager( return nil, err } - for i := 0; i < threads; i++ { - go manager.thread(i) - } + go manager.thread() return &manager, nil } // GetChunk loads one chunk and starts the preload for the next chunks -func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, error) { +func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64, callback ResponseFunc) { chunkOffset := offset % m.ChunkSize offsetStart := offset - chunkOffset offsetEnd := offsetStart + m.ChunkSize id := fmt.Sprintf("%v:%v", object.ObjectID, offsetStart) - response := make(chan Response) - m.queue <- &Request{ - id: id, - object: object, - offsetStart: offsetStart, - offsetEnd: offsetEnd, - preload: false, - response: response, - } - - for i := m.ChunkSize; i < (m.ChunkSize * int64(m.LoadAhead+1)); i += m.ChunkSize { - aheadOffsetStart := offsetStart + i - aheadOffsetEnd := aheadOffsetStart + m.ChunkSize - if uint64(aheadOffsetStart) < object.Size && uint64(aheadOffsetEnd) < object.Size { - id := fmt.Sprintf("%v:%v", object.ObjectID, aheadOffsetStart) - m.queue2 <- &Request{ - id: id, - object: object, - offsetStart: aheadOffsetStart, - offsetEnd: aheadOffsetEnd, - preload: true, - } - } - } - - res := <-response - if nil != res.err { - return nil, res.err - } - - sOffset := int64(math.Min(float64(len(res.bytes)), float64(chunkOffset))) - eOffset := int64(math.Min(float64(len(res.bytes)), float64(chunkOffset+size))) - return res.bytes[sOffset:eOffset], nil + request := &Request{ + id: id, + object: object, + offsetStart: offsetStart, + offsetEnd: offsetEnd, + chunkOffset: chunkOffset, + chunkOffsetEnd: chunkOffset + size, + } + + m.checkChunk(request, callback) + + // for i := m.ChunkSize; i < (m.ChunkSize * int64(m.LoadAhead+1)); i += m.ChunkSize { + // aheadOffsetStart := offsetStart + i + // aheadOffsetEnd := aheadOffsetStart + m.ChunkSize + // if uint64(aheadOffsetStart) < object.Size && uint64(aheadOffsetEnd) < object.Size { + // id := fmt.Sprintf("%v:%v", object.ObjectID, aheadOffsetStart) + // m.preloadQueue <- &Request{ + // id: id, + // object: object, + // offsetStart: aheadOffsetStart, + // offsetEnd: aheadOffsetEnd, + // } + // } + // } } -func (m *Manager) thread(threadID int) { +func (m *Manager) thread() { for { - select { - case req := <-m.queue: - m.checkChunk(req, threadID) - case req := <-m.queue2: - m.checkChunk(req, threadID) - } + req := <-m.preloadQueue + m.checkChunk(req, nil) } } -func (m *Manager) checkChunk(req *Request, threadID int) { - if chunk, exists := m.storage.LoadOrCreate(req.id); exists { - if nil != req.response { - req.response <- Response{bytes: chunk} - close(req.response) +func (m *Manager) checkChunk(req *Request, callback ResponseFunc) { + if chunk := m.storage.Load(req.id); nil != chunk { + if nil != callback { + callback(nil, chunk[req.chunkOffset:req.chunkOffsetEnd]) } return } - Log.Debugf("Requesting object %v (%v) bytes %v - %v from API (preload: %v | thread: %v)", - req.object.ObjectID, req.object.Name, req.offsetStart, req.offsetEnd, req.preload, threadID) - - bytes, err := m.downloader.Download(req) - if nil != err { - Log.Warningf("%v", err) - m.storage.Error(req.id) - - if nil != req.response { - req.response <- Response{err: err} - close(req.response) + m.downloader.Download(req, func(err error, bytes []byte) { + if nil != callback { + callback(err, bytes[req.chunkOffset:req.chunkOffsetEnd]) } - return - } - if nil != req.response { - req.response <- Response{bytes: bytes} - close(req.response) - } - - if err := m.storage.Store(req.id, bytes); nil != err { - Log.Warningf("%v", err) - } + if nil != err { + if err := m.storage.Store(req.id, bytes); nil != err { + Log.Warningf("Could not store chunk %v", req.id) + } + } + }) } diff --git a/chunk/storage.go b/chunk/storage.go index 0119937..dc465af 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -5,8 +5,6 @@ import ( "fmt" "os" "sync" - - . "github.com/claudetech/loggo/default" ) // ErrTimeout is a timeout error @@ -18,9 +16,7 @@ type Storage struct { ChunkSize int64 MaxChunks int chunks map[string][]byte - stack []string - stackSize int - lock sync.RWMutex + lock sync.Mutex } // Item represents a chunk in RAM @@ -36,7 +32,6 @@ func NewStorage(chunkPath string, chunkSize int64, maxChunks int) *Storage { ChunkSize: chunkSize, MaxChunks: maxChunks, chunks: make(map[string][]byte), - stack: make([]string, 0), } return &storage @@ -50,45 +45,35 @@ func (s *Storage) Clear() error { return nil } -// LoadOrCreate loads a chunk from ram or creates it -func (s *Storage) LoadOrCreate(id string) ([]byte, bool) { +// Load a chunk from ram or creates it +func (s *Storage) Load(id string) []byte { s.lock.Lock() if chunk, exists := s.chunks[id]; exists { s.lock.Unlock() - return chunk, true + return chunk } - s.chunks[id] = nil s.lock.Unlock() - return nil, false + return nil } // Store stores a chunk in the RAM and adds it to the disk storage queue func (s *Storage) Store(id string, bytes []byte) error { s.lock.Lock() - // delete chunk - for s.stackSize > s.MaxChunks { - Log.Debugf("%v / %v", s.stackSize, s.MaxChunks) + // // delete chunk + // for s.stackSize > s.MaxChunks { + // Log.Debugf("%v / %v", s.stackSize, s.MaxChunks) - deleteID := s.stack[0] - s.stack = s.stack[1:] - s.stackSize-- + // deleteID := s.stack[0] + // s.stack = s.stack[1:] + // s.stackSize-- - Log.Debugf("Deleting chunk %v", deleteID) - delete(s.chunks, deleteID) - } + // Log.Debugf("Deleting chunk %v", deleteID) + // delete(s.chunks, deleteID) + // } s.chunks[id] = bytes - s.stack = append(s.stack, id) - s.stackSize++ s.lock.Unlock() return nil } - -// Error is called to remove an item from the index if there has been an issue downloading the chunk -func (s *Storage) Error(id string) { - s.lock.Lock() - delete(s.chunks, id) - s.lock.Unlock() -} diff --git a/mount/mount.go b/mount/mount.go index c7df2df..1b680fd 100644 --- a/mount/mount.go +++ b/mount/mount.go @@ -232,14 +232,20 @@ func (o *Object) Lookup(ctx context.Context, name string) (fs.Node, error) { // Read reads some bytes or the whole file func (o *Object) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { - bytes, err := o.chunkManager.GetChunk(o.object, req.Offset, int64(req.Size)) - if nil != err { - Log.Warningf("%v", err) - return fuse.EIO - } + var e error + wait := make(chan bool) + o.chunkManager.GetChunk(o.object, req.Offset, int64(req.Size), func(err error, bytes []byte) { + if nil != err { + Log.Warningf("%v", err) + e = err + } + resp.Data = bytes + wait <- true + close(wait) + }) - resp.Data = bytes[:] - return nil + <-wait + return e } // Remove deletes an element From f6d7d9d1c0698466968d69018f95707b5873ddf8 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Sun, 23 Jul 2017 00:45:12 +0200 Subject: [PATCH 34/46] tryouts --- chunk/download.go | 31 +++++++++------- chunk/manager.go | 95 ++++++++++++++++++++++++++++++++--------------- chunk/stack.go | 33 ++++++++-------- chunk/storage.go | 22 ++++++----- main.go | 4 +- mount/mount.go | 23 +++++------- 6 files changed, 123 insertions(+), 85 deletions(-) diff --git a/chunk/download.go b/chunk/download.go index fff52a2..0cd1d47 100644 --- a/chunk/download.go +++ b/chunk/download.go @@ -16,28 +16,30 @@ import ( type Downloader struct { Client *drive.Client queue chan *Request - callbacks map[string][]ResponseFunc - lock sync.RWMutex + callbacks map[string][]DownloadCallback + lock sync.Mutex } +type DownloadCallback func(error, []byte) + // NewDownloader creates a new download manager func NewDownloader(threads int, client *drive.Client) (*Downloader, error) { manager := Downloader{ Client: client, queue: make(chan *Request, 100), - callbacks: make(map[string][]ResponseFunc, 100), + callbacks: make(map[string][]DownloadCallback, 100), } - for i := 0; i < threads; i++ { - Log.Debugf("Starting download thread %v", i) - go manager.thread(i) - } + // for i := 0; i < threads/2; i++ { + // Log.Debugf("Starting download thread %v", i) + go manager.thread(1) + // } return &manager, nil } // Download starts a new download request -func (d *Downloader) Download(req *Request, callback ResponseFunc) { +func (d *Downloader) Download(req *Request, callback DownloadCallback) { d.lock.Lock() _, exists := d.callbacks[req.id] d.callbacks[req.id] = append(d.callbacks[req.id], callback) @@ -50,20 +52,21 @@ func (d *Downloader) Download(req *Request, callback ResponseFunc) { func (d *Downloader) thread(threadID int) { for { req := <-d.queue - d.lock.RLock() - callbacks := d.callbacks[req.id] - d.lock.RUnlock() - d.download(d.Client.GetNativeClient(), req, callbacks) + d.download(d.Client.GetNativeClient(), req) } } -func (d *Downloader) download(client *http.Client, req *Request, callbacks []ResponseFunc) { - Log.Debugf("Starting download %v", req.id) +func (d *Downloader) download(client *http.Client, req *Request) { + Log.Debugf("Starting download %v (preload: %v)", req.id, req.preload) bytes, err := downloadFromAPI(client, req, 0) + d.lock.Lock() + callbacks := d.callbacks[req.id] for _, callback := range callbacks { callback(err, bytes) } + delete(d.callbacks, req.id) + d.lock.Unlock() } func downloadFromAPI(client *http.Client, request *Request, delay int64) ([]byte, error) { diff --git a/chunk/manager.go b/chunk/manager.go index 6b8c3fd..5da7098 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -19,7 +19,12 @@ type Manager struct { TimeoutRetries int downloader *Downloader storage *Storage - preloadQueue chan *Request + queue chan *QueueEntry +} + +type QueueEntry struct { + request *Request + response chan Response } // Request represents a chunk request @@ -30,9 +35,14 @@ type Request struct { offsetEnd int64 chunkOffset int64 chunkOffsetEnd int64 + preload bool } -type ResponseFunc func(error, []byte) +// Response represetns a chunk response +type Response struct { + Error error + Bytes []byte +} // NewManager creates a new chunk manager func NewManager( @@ -71,19 +81,22 @@ func NewManager( TimeoutRetries: timeoutRetries, downloader: downloader, storage: NewStorage(chunkPath, chunkSize, maxChunks), + queue: make(chan *QueueEntry, 100), } if err := manager.storage.Clear(); nil != err { return nil, err } - go manager.thread() + for i := 0; i < threads; i++ { + go manager.thread(i) + } return &manager, nil } // GetChunk loads one chunk and starts the preload for the next chunks -func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64, callback ResponseFunc) { +func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64, response chan Response) { chunkOffset := offset % m.ChunkSize offsetStart := offset - chunkOffset offsetEnd := offsetStart + m.ChunkSize @@ -96,49 +109,71 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64, callback offsetEnd: offsetEnd, chunkOffset: chunkOffset, chunkOffsetEnd: chunkOffset + size, + preload: false, } - m.checkChunk(request, callback) - - // for i := m.ChunkSize; i < (m.ChunkSize * int64(m.LoadAhead+1)); i += m.ChunkSize { - // aheadOffsetStart := offsetStart + i - // aheadOffsetEnd := aheadOffsetStart + m.ChunkSize - // if uint64(aheadOffsetStart) < object.Size && uint64(aheadOffsetEnd) < object.Size { - // id := fmt.Sprintf("%v:%v", object.ObjectID, aheadOffsetStart) - // m.preloadQueue <- &Request{ - // id: id, - // object: object, - // offsetStart: aheadOffsetStart, - // offsetEnd: aheadOffsetEnd, - // } - // } - // } + m.queue <- &QueueEntry{ + request: request, + response: response, + } + + for i := m.ChunkSize; i < (m.ChunkSize * int64(m.LoadAhead+1)); i += m.ChunkSize { + aheadOffsetStart := offsetStart + i + aheadOffsetEnd := aheadOffsetStart + m.ChunkSize + if uint64(aheadOffsetStart) < object.Size && uint64(aheadOffsetEnd) < object.Size { + id := fmt.Sprintf("%v:%v", object.ObjectID, aheadOffsetStart) + request := &Request{ + id: id, + object: object, + offsetStart: aheadOffsetStart, + offsetEnd: aheadOffsetEnd, + preload: true, + } + m.queue <- &QueueEntry{ + request: request, + } + } + } } -func (m *Manager) thread() { +func (m *Manager) thread(threadID int) { for { - req := <-m.preloadQueue - m.checkChunk(req, nil) + queueEntry := <-m.queue + m.checkChunk(queueEntry.request, queueEntry.response) } } -func (m *Manager) checkChunk(req *Request, callback ResponseFunc) { +func (m *Manager) checkChunk(req *Request, response chan Response) { if chunk := m.storage.Load(req.id); nil != chunk { - if nil != callback { - callback(nil, chunk[req.chunkOffset:req.chunkOffsetEnd]) + if nil != response { + response <- Response{ + Bytes: chunk[req.chunkOffset:req.chunkOffsetEnd], + } + close(response) } return } m.downloader.Download(req, func(err error, bytes []byte) { - if nil != callback { - callback(err, bytes[req.chunkOffset:req.chunkOffsetEnd]) + if nil != err { + if nil != response { + response <- Response{ + Error: err, + } + close(response) + } + return } - if nil != err { - if err := m.storage.Store(req.id, bytes); nil != err { - Log.Warningf("Could not store chunk %v", req.id) + if nil != response { + response <- Response{ + Bytes: bytes[req.chunkOffset:req.chunkOffsetEnd], } + close(response) + } + + if err := m.storage.Store(req.id, bytes); nil != err { + Log.Warningf("Coult not store chunk %v", req.id) } }) } diff --git a/chunk/stack.go b/chunk/stack.go index ffda4ee..9c9564a 100644 --- a/chunk/stack.go +++ b/chunk/stack.go @@ -8,22 +8,22 @@ import ( // Stack is a thread safe list/stack implementation type Stack struct { items *list.List + index map[string]*list.Element + len int lock sync.RWMutex } // NewStack creates a new stack -func NewStack() *Stack { +func NewStack(maxChunks int) *Stack { return &Stack{ items: list.New(), + index: make(map[string]*list.Element, maxChunks), } } // Len returns the length of the current stack func (s *Stack) Len() int { - s.lock.RLock() - count := s.items.Len() - s.lock.RUnlock() - return count + return s.len } // Pop pops the first item from the stack @@ -35,19 +35,20 @@ func (s *Stack) Pop() string { return "" } s.items.Remove(item) + s.len-- + id := item.Value.(string) + delete(s.index, id) s.lock.Unlock() - return item.Value.(string) + return id } // Touch moves the specified item to the last position of the stack func (s *Stack) Touch(id string) { s.lock.Lock() - for item := s.items.Front(); item != nil; item = item.Next() { - if item.Value.(string) == id { - s.items.MoveToBack(item) - break - } + item, exists := s.index[id] + if exists { + s.items.MoveToBack(item) } s.lock.Unlock() } @@ -55,12 +56,12 @@ func (s *Stack) Touch(id string) { // Push adds a new item to the last position of the stack func (s *Stack) Push(id string) { s.lock.Lock() - for item := s.items.Front(); item != nil; item = item.Next() { - if item.Value.(string) == id { - s.lock.Unlock() - return - } + if _, exists := s.index[id]; exists { + s.lock.Unlock() + return } s.items.PushBack(id) + s.index[id] = s.items.Back() + s.len++ s.lock.Unlock() } diff --git a/chunk/storage.go b/chunk/storage.go index dc465af..6d545c8 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -5,6 +5,8 @@ import ( "fmt" "os" "sync" + + . "github.com/claudetech/loggo/default" ) // ErrTimeout is a timeout error @@ -16,6 +18,7 @@ type Storage struct { ChunkSize int64 MaxChunks int chunks map[string][]byte + stack *Stack lock sync.Mutex } @@ -32,6 +35,7 @@ func NewStorage(chunkPath string, chunkSize int64, maxChunks int) *Storage { ChunkSize: chunkSize, MaxChunks: maxChunks, chunks: make(map[string][]byte), + stack: NewStack(maxChunks), } return &storage @@ -50,6 +54,7 @@ func (s *Storage) Load(id string) []byte { s.lock.Lock() if chunk, exists := s.chunks[id]; exists { s.lock.Unlock() + s.stack.Touch(id) return chunk } s.lock.Unlock() @@ -60,19 +65,16 @@ func (s *Storage) Load(id string) []byte { func (s *Storage) Store(id string, bytes []byte) error { s.lock.Lock() - // // delete chunk - // for s.stackSize > s.MaxChunks { - // Log.Debugf("%v / %v", s.stackSize, s.MaxChunks) - - // deleteID := s.stack[0] - // s.stack = s.stack[1:] - // s.stackSize-- + // delete oldest chunk + if s.stack.Len() >= s.MaxChunks-1 { + deleteID := s.stack.Pop() + delete(s.chunks, deleteID) - // Log.Debugf("Deleting chunk %v", deleteID) - // delete(s.chunks, deleteID) - // } + Log.Debugf("Deleted chunk %v", deleteID) + } s.chunks[id] = bytes + s.stack.Push(id) s.lock.Unlock() return nil diff --git a/main.go b/main.go index 586aff0..d71fb47 100644 --- a/main.go +++ b/main.go @@ -41,11 +41,11 @@ func main() { argTempPath := flag.StringP("temp", "t", os.TempDir(), "Path to a temporary directory to store temporary data") argCacheFile := flag.String("cache-file", filepath.Join(user.HomeDir, ".plexdrive", "cache.bolt"), "Path the the cache file") argChunkSize := flag.String("chunk-size", "5M", "The size of each chunk that is downloaded (units: B, K, M, G)") - argChunkLoadThreads := flag.Int("chunk-load-threads", runtime.NumCPU(), "The number of threads to use for downloading chunks") + argChunkLoadThreads := flag.Int("chunk-load-threads", runtime.NumCPU()-1, "The number of threads to use for downloading chunks") argChunkLoadAhead := flag.Int("chunk-load-ahead", 2, "The number of chunks that should be read ahead") argChunkLoadTimeout := flag.Duration("chunk-load-timeout", 10*time.Second, "Duration to wait for a chunk to be loaded") argChunkLoadRetries := flag.Int("chunk-load-retries", 3, "Number of retries to load a chunk") - argMaxChunks := flag.Int("max-chunks", 200, "The maximum number of chunks to be stored on disk") + argMaxChunks := flag.Int("max-chunks", 10, "The maximum number of chunks to be stored on disk") argRefreshInterval := flag.Duration("refresh-interval", 5*time.Minute, "The time to wait till checking for changes") argMountOptions := flag.StringP("fuse-options", "o", "", "Fuse mount options (e.g. -fuse-options allow_other,...)") argVersion := flag.Bool("version", false, "Displays program's version information") diff --git a/mount/mount.go b/mount/mount.go index 1b680fd..7ed21a3 100644 --- a/mount/mount.go +++ b/mount/mount.go @@ -232,20 +232,17 @@ func (o *Object) Lookup(ctx context.Context, name string) (fs.Node, error) { // Read reads some bytes or the whole file func (o *Object) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { - var e error - wait := make(chan bool) - o.chunkManager.GetChunk(o.object, req.Offset, int64(req.Size), func(err error, bytes []byte) { - if nil != err { - Log.Warningf("%v", err) - e = err - } - resp.Data = bytes - wait <- true - close(wait) - }) + response := make(chan chunk.Response) + go o.chunkManager.GetChunk(o.object, req.Offset, int64(req.Size), response) + res := <-response - <-wait - return e + if nil != res.Error { + Log.Warningf("%v", res.Error) + return fuse.EIO + } + + resp.Data = res.Bytes + return nil } // Remove deletes an element From 46dcc370899ab9b5c4ffe1f5f0b71914a0e6815b Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Sun, 23 Jul 2017 02:17:34 +0200 Subject: [PATCH 35/46] working stuff --- chunk/download.go | 9 ++++----- chunk/manager.go | 11 ++++++----- chunk/storage.go | 3 +-- main.go | 9 ++++++--- mount/mount.go | 2 +- 5 files changed, 18 insertions(+), 16 deletions(-) diff --git a/chunk/download.go b/chunk/download.go index 0cd1d47..901473d 100644 --- a/chunk/download.go +++ b/chunk/download.go @@ -30,10 +30,9 @@ func NewDownloader(threads int, client *drive.Client) (*Downloader, error) { callbacks: make(map[string][]DownloadCallback, 100), } - // for i := 0; i < threads/2; i++ { - // Log.Debugf("Starting download thread %v", i) - go manager.thread(1) - // } + for i := 0; i < threads; i++ { + go manager.thread() + } return &manager, nil } @@ -49,7 +48,7 @@ func (d *Downloader) Download(req *Request, callback DownloadCallback) { d.lock.Unlock() } -func (d *Downloader) thread(threadID int) { +func (d *Downloader) thread() { for { req := <-d.queue d.download(d.Client.GetNativeClient(), req) diff --git a/chunk/manager.go b/chunk/manager.go index 5da7098..6e93abb 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -49,7 +49,8 @@ func NewManager( chunkPath string, chunkSize int64, loadAhead, - threads int, + checkThreads int, + loadThreads int, client *drive.Client, maxChunks int, timeout time.Duration, @@ -68,7 +69,7 @@ func NewManager( return nil, fmt.Errorf("max-chunk must be greater than 2 and bigger than the load ahead value") } - downloader, err := NewDownloader(threads, client) + downloader, err := NewDownloader(loadThreads, client) if nil != err { return nil, err } @@ -88,8 +89,8 @@ func NewManager( return nil, err } - for i := 0; i < threads; i++ { - go manager.thread(i) + for i := 0; i < checkThreads; i++ { + go manager.thread() } return &manager, nil @@ -136,7 +137,7 @@ func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64, response } } -func (m *Manager) thread(threadID int) { +func (m *Manager) thread() { for { queueEntry := <-m.queue m.checkChunk(queueEntry.request, queueEntry.response) diff --git a/chunk/storage.go b/chunk/storage.go index 6d545c8..52e9661 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -65,8 +65,7 @@ func (s *Storage) Load(id string) []byte { func (s *Storage) Store(id string, bytes []byte) error { s.lock.Lock() - // delete oldest chunk - if s.stack.Len() >= s.MaxChunks-1 { + for s.stack.Len() >= s.MaxChunks-1 { deleteID := s.stack.Pop() delete(s.chunks, deleteID) diff --git a/main.go b/main.go index d71fb47..b55c08f 100644 --- a/main.go +++ b/main.go @@ -40,9 +40,10 @@ func main() { argConfigPath := flag.StringP("config", "c", filepath.Join(user.HomeDir, ".plexdrive"), "The path to the configuration directory") argTempPath := flag.StringP("temp", "t", os.TempDir(), "Path to a temporary directory to store temporary data") argCacheFile := flag.String("cache-file", filepath.Join(user.HomeDir, ".plexdrive", "cache.bolt"), "Path the the cache file") - argChunkSize := flag.String("chunk-size", "5M", "The size of each chunk that is downloaded (units: B, K, M, G)") - argChunkLoadThreads := flag.Int("chunk-load-threads", runtime.NumCPU()-1, "The number of threads to use for downloading chunks") - argChunkLoadAhead := flag.Int("chunk-load-ahead", 2, "The number of chunks that should be read ahead") + argChunkSize := flag.String("chunk-size", "10M", "The size of each chunk that is downloaded (units: B, K, M, G)") + argChunkLoadThreads := flag.Int("chunk-load-threads", runtime.NumCPU()/2, "The number of threads to use for downloading chunks") + argChunkCheckThreads := flag.Int("chunk-check-threads", runtime.NumCPU()/2, "The number of threads to use for checking chunk existence") + argChunkLoadAhead := flag.Int("chunk-load-ahead", 1, "The number of chunks that should be read ahead") argChunkLoadTimeout := flag.Duration("chunk-load-timeout", 10*time.Second, "Duration to wait for a chunk to be loaded") argChunkLoadRetries := flag.Int("chunk-load-retries", 3, "Number of retries to load a chunk") argMaxChunks := flag.Int("max-chunks", 10, "The maximum number of chunks to be stored on disk") @@ -114,6 +115,7 @@ func main() { Log.Debugf("cache-file : %v", *argCacheFile) Log.Debugf("chunk-size : %v", *argChunkSize) Log.Debugf("chunk-load-threads : %v", *argChunkLoadThreads) + Log.Debugf("chunk-check-threads : %v", *argChunkCheckThreads) Log.Debugf("chunk-load-ahead : %v", *argChunkLoadAhead) Log.Debugf("chunk-load-timeout : %v", *argChunkLoadTimeout) Log.Debugf("chunk-load-retries : %v", *argChunkLoadRetries) @@ -175,6 +177,7 @@ func main() { chunkPath, chunkSize, *argChunkLoadAhead, + *argChunkCheckThreads, *argChunkLoadThreads, client, *argMaxChunks, diff --git a/mount/mount.go b/mount/mount.go index 7ed21a3..0d1237a 100644 --- a/mount/mount.go +++ b/mount/mount.go @@ -233,7 +233,7 @@ func (o *Object) Lookup(ctx context.Context, name string) (fs.Node, error) { // Read reads some bytes or the whole file func (o *Object) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { response := make(chan chunk.Response) - go o.chunkManager.GetChunk(o.object, req.Offset, int64(req.Size), response) + o.chunkManager.GetChunk(o.object, req.Offset, int64(req.Size), response) res := <-response if nil != res.Error { From a8333020497b4fc5caa5ca57d1d76fba039cdbb9 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Sun, 23 Jul 2017 10:01:29 +0200 Subject: [PATCH 36/46] chunk cutting / 416 error reporting --- chunk/download.go | 2 +- chunk/manager.go | 16 +++++++++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/chunk/download.go b/chunk/download.go index 901473d..5618ee9 100644 --- a/chunk/download.go +++ b/chunk/download.go @@ -96,7 +96,7 @@ func downloadFromAPI(client *http.Client, request *Request, delay int64) ([]byte if res.StatusCode != 403 && res.StatusCode != 500 { Log.Debugf("Request\n----------\n%v\n----------\n", req) Log.Debugf("Response\n----------\n%v\n----------\n", res) - return nil, fmt.Errorf("Wrong status code %v", res.StatusCode) + return nil, fmt.Errorf("Wrong status code %v for %v", res.StatusCode, request.object) } // throttle requests diff --git a/chunk/manager.go b/chunk/manager.go index 6e93abb..ddbcc48 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -7,6 +7,8 @@ import ( "time" + "math" + "github.com/dweidenfeld/plexdrive/drive" ) @@ -145,10 +147,10 @@ func (m *Manager) thread() { } func (m *Manager) checkChunk(req *Request, response chan Response) { - if chunk := m.storage.Load(req.id); nil != chunk { + if bytes := m.storage.Load(req.id); nil != bytes { if nil != response { response <- Response{ - Bytes: chunk[req.chunkOffset:req.chunkOffsetEnd], + Bytes: adjustResponseChunk(req, bytes), } close(response) } @@ -167,8 +169,9 @@ func (m *Manager) checkChunk(req *Request, response chan Response) { } if nil != response { + response <- Response{ - Bytes: bytes[req.chunkOffset:req.chunkOffsetEnd], + Bytes: adjustResponseChunk(req, bytes), } close(response) } @@ -178,3 +181,10 @@ func (m *Manager) checkChunk(req *Request, response chan Response) { } }) } + +func adjustResponseChunk(req *Request, bytes []byte) []byte { + sOffset := int64(math.Min(float64(req.chunkOffset), float64(len(bytes)))) + eOffset := int64(math.Min(float64(req.chunkOffsetEnd), float64(len(bytes)))) + + return bytes[sOffset:eOffset] +} From 1f0a8d1e3278dd7bb6df39ccc009e5320dc0c3c0 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Mon, 24 Jul 2017 21:30:05 +0200 Subject: [PATCH 37/46] length check race condition --- chunk/stack.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/chunk/stack.go b/chunk/stack.go index 9c9564a..cd2a7bc 100644 --- a/chunk/stack.go +++ b/chunk/stack.go @@ -23,7 +23,10 @@ func NewStack(maxChunks int) *Stack { // Len returns the length of the current stack func (s *Stack) Len() int { - return s.len + s.lock.RLock() + len := s.len + s.lock.RUnlock() + return len } // Pop pops the first item from the stack From 3e7bb53dd732af4613bbbf12c2f0dc3a1612eba2 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Thu, 27 Jul 2017 19:42:46 +0200 Subject: [PATCH 38/46] faster stack reading --- chunk/stack.go | 33 ++++++++++++++++++++------------- chunk/storage.go | 4 ++-- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/chunk/stack.go b/chunk/stack.go index cd2a7bc..5329a26 100644 --- a/chunk/stack.go +++ b/chunk/stack.go @@ -7,31 +7,38 @@ import ( // Stack is a thread safe list/stack implementation type Stack struct { - items *list.List - index map[string]*list.Element - len int - lock sync.RWMutex + items *list.List + index map[string]*list.Element + len int + lock sync.RWMutex + maxSize int } // NewStack creates a new stack func NewStack(maxChunks int) *Stack { return &Stack{ - items: list.New(), - index: make(map[string]*list.Element, maxChunks), + items: list.New(), + index: make(map[string]*list.Element, maxChunks), + maxSize: maxChunks, } } -// Len returns the length of the current stack -func (s *Stack) Len() int { - s.lock.RLock() - len := s.len - s.lock.RUnlock() - return len -} +// // Len returns the length of the current stack +// func (s *Stack) Len() int { +// s.lock.RLock() +// len := s.len +// s.lock.RUnlock() +// return len +// } // Pop pops the first item from the stack func (s *Stack) Pop() string { s.lock.Lock() + if s.len < s.maxSize { + s.lock.Unlock() + return "" + } + item := s.items.Front() if nil == item { s.lock.Unlock() diff --git a/chunk/storage.go b/chunk/storage.go index 52e9661..790139d 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -65,8 +65,8 @@ func (s *Storage) Load(id string) []byte { func (s *Storage) Store(id string, bytes []byte) error { s.lock.Lock() - for s.stack.Len() >= s.MaxChunks-1 { - deleteID := s.stack.Pop() + deleteID := s.stack.Pop() + if "" != deleteID { delete(s.chunks, deleteID) Log.Debugf("Deleted chunk %v", deleteID) From 86c3f9c813e321658986184ff8112a6dd88c802d Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Thu, 27 Jul 2017 19:58:11 +0200 Subject: [PATCH 39/46] finalized parameters --- README.md | 18 ++++++++---------- chunk/manager.go | 36 +++++++++++------------------------- chunk/storage.go | 9 +-------- main.go | 16 +++------------- 4 files changed, 23 insertions(+), 56 deletions(-) diff --git a/README.md b/README.md index e919371..5febb6c 100644 --- a/README.md +++ b/README.md @@ -29,16 +29,16 @@ You can use [this tutorial](TUTORIAL.md) for instruction how to mount an encrypt ## Usage ``` Usage of ./plexdrive: + --cache-file string + Path the the cache file (default "~/.plexdrive/cache.bolt") + --chunk-check-threads int + The number of threads to use for checking chunk existence (default 2) --chunk-load-ahead int - The number of chunks that should be read ahead (default 2) - --chunk-load-retries int - Number of retries to load a chunk (default 3) + The number of chunks that should be read ahead (default 3) --chunk-load-threads int - The number of threads to use for downloading chunks (default 4) - --chunk-load-timeout duration - Duration to wait for a chunk to be loaded (default 10s) + The number of threads to use for downloading chunks (default 2) --chunk-size string - The size of each chunk that is downloaded (units: B, K, M, G) (default "5M") + The size of each chunk that is downloaded (units: B, K, M, G) (default "10M") -c, --config string The path to the configuration directory (default "~/.plexdrive") -o, --fuse-options string @@ -48,11 +48,9 @@ Usage of ./plexdrive: --max-chunks int The maximum number of chunks to be stored on disk (default 10) --refresh-interval duration - The time to wait till checking for changes (default 5m0s) + The time to wait till checking for changes (default 1m0s) --root-node-id string The ID of the root node to mount (use this for only mount a sub directory) (default "root") - -t, --temp string - Path to a temporary directory to store temporary data (default "/tmp") --uid int Set the mounts UID (-1 = default permissions) (default -1) --umask value diff --git a/chunk/manager.go b/chunk/manager.go index ddbcc48..73a94a9 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -5,8 +5,6 @@ import ( . "github.com/claudetech/loggo/default" - "time" - "math" "github.com/dweidenfeld/plexdrive/drive" @@ -14,14 +12,11 @@ import ( // Manager manages chunks on disk type Manager struct { - ChunkPath string - ChunkSize int64 - LoadAhead int - Timeout time.Duration - TimeoutRetries int - downloader *Downloader - storage *Storage - queue chan *QueueEntry + ChunkSize int64 + LoadAhead int + downloader *Downloader + storage *Storage + queue chan *QueueEntry } type QueueEntry struct { @@ -48,19 +43,13 @@ type Response struct { // NewManager creates a new chunk manager func NewManager( - chunkPath string, chunkSize int64, loadAhead, checkThreads int, loadThreads int, client *drive.Client, - maxChunks int, - timeout time.Duration, - timeoutRetries int) (*Manager, error) { + maxChunks int) (*Manager, error) { - if "" == chunkPath { - return nil, fmt.Errorf("Path to chunk file must not be empty") - } if chunkSize < 4096 { return nil, fmt.Errorf("Chunk size must not be < 4096") } @@ -77,14 +66,11 @@ func NewManager( } manager := Manager{ - ChunkPath: chunkPath, - ChunkSize: chunkSize, - LoadAhead: loadAhead, - Timeout: timeout, - TimeoutRetries: timeoutRetries, - downloader: downloader, - storage: NewStorage(chunkPath, chunkSize, maxChunks), - queue: make(chan *QueueEntry, 100), + ChunkSize: chunkSize, + LoadAhead: loadAhead, + downloader: downloader, + storage: NewStorage(chunkSize, maxChunks), + queue: make(chan *QueueEntry, 100), } if err := manager.storage.Clear(); nil != err { diff --git a/chunk/storage.go b/chunk/storage.go index 790139d..3a81129 100644 --- a/chunk/storage.go +++ b/chunk/storage.go @@ -2,8 +2,6 @@ package chunk import ( "errors" - "fmt" - "os" "sync" . "github.com/claudetech/loggo/default" @@ -14,7 +12,6 @@ var ErrTimeout = errors.New("timeout") // Storage is a chunk storage type Storage struct { - ChunkPath string ChunkSize int64 MaxChunks int chunks map[string][]byte @@ -29,9 +26,8 @@ type Item struct { } // NewStorage creates a new storage -func NewStorage(chunkPath string, chunkSize int64, maxChunks int) *Storage { +func NewStorage(chunkSize int64, maxChunks int) *Storage { storage := Storage{ - ChunkPath: chunkPath, ChunkSize: chunkSize, MaxChunks: maxChunks, chunks: make(map[string][]byte), @@ -43,9 +39,6 @@ func NewStorage(chunkPath string, chunkSize int64, maxChunks int) *Storage { // Clear removes all old chunks on disk (will be called on each program start) func (s *Storage) Clear() error { - if err := os.RemoveAll(s.ChunkPath); nil != err { - return fmt.Errorf("Could not clear old chunks from disk") - } return nil } diff --git a/main.go b/main.go index b55c08f..a2ce22f 100644 --- a/main.go +++ b/main.go @@ -38,16 +38,13 @@ func main() { argLogLevel := flag.IntP("verbosity", "v", 0, "Set the log level (0 = error, 1 = warn, 2 = info, 3 = debug, 4 = trace)") argRootNodeID := flag.String("root-node-id", "root", "The ID of the root node to mount (use this for only mount a sub directory)") argConfigPath := flag.StringP("config", "c", filepath.Join(user.HomeDir, ".plexdrive"), "The path to the configuration directory") - argTempPath := flag.StringP("temp", "t", os.TempDir(), "Path to a temporary directory to store temporary data") argCacheFile := flag.String("cache-file", filepath.Join(user.HomeDir, ".plexdrive", "cache.bolt"), "Path the the cache file") argChunkSize := flag.String("chunk-size", "10M", "The size of each chunk that is downloaded (units: B, K, M, G)") argChunkLoadThreads := flag.Int("chunk-load-threads", runtime.NumCPU()/2, "The number of threads to use for downloading chunks") argChunkCheckThreads := flag.Int("chunk-check-threads", runtime.NumCPU()/2, "The number of threads to use for checking chunk existence") - argChunkLoadAhead := flag.Int("chunk-load-ahead", 1, "The number of chunks that should be read ahead") - argChunkLoadTimeout := flag.Duration("chunk-load-timeout", 10*time.Second, "Duration to wait for a chunk to be loaded") - argChunkLoadRetries := flag.Int("chunk-load-retries", 3, "Number of retries to load a chunk") + argChunkLoadAhead := flag.Int("chunk-load-ahead", runtime.NumCPU()-1, "The number of chunks that should be read ahead") argMaxChunks := flag.Int("max-chunks", 10, "The maximum number of chunks to be stored on disk") - argRefreshInterval := flag.Duration("refresh-interval", 5*time.Minute, "The time to wait till checking for changes") + argRefreshInterval := flag.Duration("refresh-interval", 1*time.Minute, "The time to wait till checking for changes") argMountOptions := flag.StringP("fuse-options", "o", "", "Fuse mount options (e.g. -fuse-options allow_other,...)") argVersion := flag.Bool("version", false, "Displays program's version information") argUID := flag.Int64("uid", -1, "Set the mounts UID (-1 = default permissions)") @@ -111,14 +108,11 @@ func main() { Log.Debugf("verbosity : %v", logLevel) Log.Debugf("root-node-id : %v", *argRootNodeID) Log.Debugf("config : %v", *argConfigPath) - Log.Debugf("temp : %v", *argTempPath) Log.Debugf("cache-file : %v", *argCacheFile) Log.Debugf("chunk-size : %v", *argChunkSize) Log.Debugf("chunk-load-threads : %v", *argChunkLoadThreads) Log.Debugf("chunk-check-threads : %v", *argChunkCheckThreads) Log.Debugf("chunk-load-ahead : %v", *argChunkLoadAhead) - Log.Debugf("chunk-load-timeout : %v", *argChunkLoadTimeout) - Log.Debugf("chunk-load-retries : %v", *argChunkLoadRetries) Log.Debugf("max-chunks : %v", *argMaxChunks) Log.Debugf("refresh-interval : %v", *argRefreshInterval) Log.Debugf("fuse-options : %v", *argMountOptions) @@ -139,7 +133,6 @@ func main() { Log.Debugf("%v", err) os.Exit(1) } - chunkPath := filepath.Join(*argTempPath, "chunks") // set the global buffer configuration chunkSize, err := parseSizeArg(*argChunkSize) @@ -174,15 +167,12 @@ func main() { } chunkManager, err := chunk.NewManager( - chunkPath, chunkSize, *argChunkLoadAhead, *argChunkCheckThreads, *argChunkLoadThreads, client, - *argMaxChunks, - *argChunkLoadTimeout, - *argChunkLoadRetries) + *argMaxChunks) if nil != err { Log.Errorf("%v", err) os.Exit(4) From c9e505405fd51d42583eae8aef11bda06636b5c3 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Thu, 27 Jul 2017 20:03:05 +0200 Subject: [PATCH 40/46] fixed tests --- chunk/stack.go | 8 -------- chunk/stack_test.go | 4 ++-- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/chunk/stack.go b/chunk/stack.go index 5329a26..92024cd 100644 --- a/chunk/stack.go +++ b/chunk/stack.go @@ -23,14 +23,6 @@ func NewStack(maxChunks int) *Stack { } } -// // Len returns the length of the current stack -// func (s *Stack) Len() int { -// s.lock.RLock() -// len := s.len -// s.lock.RUnlock() -// return len -// } - // Pop pops the first item from the stack func (s *Stack) Pop() string { s.lock.Lock() diff --git a/chunk/stack_test.go b/chunk/stack_test.go index 3e3b525..ef9919a 100644 --- a/chunk/stack_test.go +++ b/chunk/stack_test.go @@ -3,14 +3,14 @@ package chunk import "testing" func TestOOB(t *testing.T) { - stack := NewStack() + stack := NewStack(1) stack.Push("1") stack.Touch("1") } func TestAddToStack(t *testing.T) { - stack := NewStack() + stack := NewStack(1) stack.Push("1") stack.Push("2") From 275d9c3f5cdb10c52744b1177c624036221b5826 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Thu, 27 Jul 2017 20:54:51 +0200 Subject: [PATCH 41/46] bugfix --- chunk/manager.go | 2 +- main.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/chunk/manager.go b/chunk/manager.go index 73a94a9..2e9784f 100644 --- a/chunk/manager.go +++ b/chunk/manager.go @@ -57,7 +57,7 @@ func NewManager( return nil, fmt.Errorf("Chunk size must be divideable by 1024") } if maxChunks < 2 || maxChunks < loadAhead { - return nil, fmt.Errorf("max-chunk must be greater than 2 and bigger than the load ahead value") + return nil, fmt.Errorf("max-chunks must be greater than 2 and bigger than the load ahead value") } downloader, err := NewDownloader(loadThreads, client) diff --git a/main.go b/main.go index a2ce22f..afbec68 100644 --- a/main.go +++ b/main.go @@ -43,7 +43,7 @@ func main() { argChunkLoadThreads := flag.Int("chunk-load-threads", runtime.NumCPU()/2, "The number of threads to use for downloading chunks") argChunkCheckThreads := flag.Int("chunk-check-threads", runtime.NumCPU()/2, "The number of threads to use for checking chunk existence") argChunkLoadAhead := flag.Int("chunk-load-ahead", runtime.NumCPU()-1, "The number of chunks that should be read ahead") - argMaxChunks := flag.Int("max-chunks", 10, "The maximum number of chunks to be stored on disk") + argMaxChunks := flag.Int("max-chunks", runtime.NumCPU()*2, "The maximum number of chunks to be stored on disk") argRefreshInterval := flag.Duration("refresh-interval", 1*time.Minute, "The time to wait till checking for changes") argMountOptions := flag.StringP("fuse-options", "o", "", "Fuse mount options (e.g. -fuse-options allow_other,...)") argVersion := flag.Bool("version", false, "Displays program's version information") From eec4edbe87c0d0378a24ca935d9e7ad24218758d Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Fri, 28 Jul 2017 10:51:45 +0200 Subject: [PATCH 42/46] added external client-id/secret passing --- README.md | 4 ++++ config/config.go | 41 ++++++++++++++++++++++++----------------- drive/drive.go | 18 +++++++++++------- main.go | 10 +++++++--- 4 files changed, 46 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 5febb6c..df9c093 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,10 @@ Usage of ./plexdrive: The number of threads to use for downloading chunks (default 2) --chunk-size string The size of each chunk that is downloaded (units: B, K, M, G) (default "10M") + --client-id string + The client-id of your Google Drive API + --client-secret string + The client-secret of your Google Drive API -c, --config string The path to the configuration directory (default "~/.plexdrive") -o, --fuse-options string diff --git a/config/config.go b/config/config.go index d88adba..77bb326 100644 --- a/config/config.go +++ b/config/config.go @@ -26,24 +26,31 @@ func Read(configPath string) (*Config, error) { return &config, nil } -// CreateConfig creates the configuration by requesting from stdin -func Create(configPath string) (*Config, error) { - fmt.Println("1. Please go to https://console.developers.google.com/") - fmt.Println("2. Create a new project") - fmt.Println("3. Go to library and activate the Google Drive API") - fmt.Println("4. Go to credentials and create an OAuth client ID") - fmt.Println("5. Set the application type to 'other'") - fmt.Println("6. Specify some name and click create") - fmt.Printf("7. Enter your generated client ID: ") +// Create creates the configuration by requesting from stdin +func Create(configPath, clientID, clientSecret string) (*Config, error) { var config Config - if _, err := fmt.Scan(&config.ClientID); err != nil { - Log.Debugf("%v", err) - return nil, fmt.Errorf("Unable to read client id") - } - fmt.Printf("8. Enter your generated client secret: ") - if _, err := fmt.Scan(&config.ClientSecret); err != nil { - Log.Debugf("%v", err) - return nil, fmt.Errorf("Unable to read client secret") + + if "" == clientID || "" == clientSecret { + fmt.Println("1. Please go to https://console.developers.google.com/") + fmt.Println("2. Create a new project") + fmt.Println("3. Go to library and activate the Google Drive API") + fmt.Println("4. Go to credentials and create an OAuth client ID") + fmt.Println("5. Set the application type to 'other'") + fmt.Println("6. Specify some name and click create") + fmt.Printf("7. Enter your generated client ID: ") + + if _, err := fmt.Scan(&config.ClientID); err != nil { + Log.Debugf("%v", err) + return nil, fmt.Errorf("Unable to read client id") + } + fmt.Printf("8. Enter your generated client secret: ") + if _, err := fmt.Scan(&config.ClientSecret); err != nil { + Log.Debugf("%v", err) + return nil, fmt.Errorf("Unable to read client secret") + } + } else { + config.ClientID = clientID + config.ClientSecret = clientSecret } configJSON, err := json.Marshal(&config) diff --git a/drive/drive.go b/drive/drive.go index 5b22e40..651334d 100644 --- a/drive/drive.go +++ b/drive/drive.go @@ -32,7 +32,7 @@ type Client struct { } // NewClient creates a new Google Drive client -func NewClient(config *config.Config, cache *Cache, refreshInterval time.Duration, rootNodeID string) (*Client, error) { +func NewClient(config *config.Config, cache *Cache, refreshInterval time.Duration, rootNodeID string, suppressOutput bool) (*Client, error) { client := Client{ cache: cache, context: context.Background(), @@ -54,7 +54,7 @@ func NewClient(config *config.Config, cache *Cache, refreshInterval time.Duratio client.rootNodeID = "root" } - if err := client.authorize(); nil != err { + if err := client.authorize(suppressOutput); nil != err { return nil, err } @@ -163,14 +163,14 @@ func (d *Client) checkChanges(firstCheck bool) { d.changesChecking = false } -func (d *Client) authorize() error { +func (d *Client) authorize(suppressOutput bool) error { Log.Debugf("Authorizing against Google Drive API") token, err := d.cache.LoadToken() if nil != err { Log.Debugf("Token could not be found, fetching new one") - t, err := getTokenFromWeb(d.config) + t, err := getTokenFromWeb(d.config, suppressOutput) if nil != err { return err } @@ -186,10 +186,14 @@ func (d *Client) authorize() error { // getTokenFromWeb uses Config to request a Token. // It returns the retrieved Token. -func getTokenFromWeb(config *oauth2.Config) (*oauth2.Token, error) { +func getTokenFromWeb(config *oauth2.Config, suppressOutput bool) (*oauth2.Token, error) { authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline) - fmt.Printf("Go to the following link in your browser %v\n", authURL) - fmt.Printf("Paste the authorization code: ") + if !suppressOutput { + fmt.Printf("Go to the following link in your browser %v\n", authURL) + fmt.Printf("Paste the authorization code: ") + } else { + fmt.Printf("%v\n", authURL) + } var code string if _, err := fmt.Scan(&code); err != nil { diff --git a/main.go b/main.go index afbec68..3207645 100644 --- a/main.go +++ b/main.go @@ -50,6 +50,8 @@ func main() { argUID := flag.Int64("uid", -1, "Set the mounts UID (-1 = default permissions)") argGID := flag.Int64("gid", -1, "Set the mounts GID (-1 = default permissions)") argUmask := flag.Uint32("umask", 0, "Override the default file permissions") + argClientID := flag.String("client-id", "", "The client-id of your Google Drive API") + argClientSecret := flag.String("client-secret", "", "The client-secret of your Google Drive API") // argDownloadSpeedLimit := flag.String("speed-limit", "", "This value limits the download speed, e.g. 5M = 5MB/s per chunk (units: B, K, M, G)") flag.Parse() @@ -118,7 +120,9 @@ func main() { Log.Debugf("fuse-options : %v", *argMountOptions) Log.Debugf("UID : %v", uid) Log.Debugf("GID : %v", gid) - Log.Debugf("Umask : %v", umask) + Log.Debugf("umask : %v", umask) + Log.Debugf("client-id : %v", *argClientID) + Log.Debugf("client-secret : %v", *argClientSecret) // Log.Debugf("speed-limit : %v", *argDownloadSpeedLimit) // version missing here @@ -145,7 +149,7 @@ func main() { configPath := filepath.Join(*argConfigPath, "config.json") cfg, err := config.Read(configPath) if nil != err { - cfg, err = config.Create(configPath) + cfg, err = config.Create(configPath, *argClientID, *argClientSecret) if nil != err { Log.Errorf("Could not read configuration") Log.Debugf("%v", err) @@ -160,7 +164,7 @@ func main() { } defer cache.Close() - client, err := drive.NewClient(cfg, cache, *argRefreshInterval, *argRootNodeID) + client, err := drive.NewClient(cfg, cache, *argRefreshInterval, *argRootNodeID, "" != *argClientID && "" != *argClientSecret) if nil != err { Log.Errorf("%v", err) os.Exit(4) From 37bdc43e272eabfd1151aacfe18b69eb6e9002cc Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Tue, 1 Aug 2017 19:09:11 +0200 Subject: [PATCH 43/46] instant delete objects and reappear if failed --- drive/drive.go | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/drive/drive.go b/drive/drive.go index 651334d..fbc8f7d 100644 --- a/drive/drive.go +++ b/drive/drive.go @@ -272,23 +272,27 @@ func (d *Client) Remove(object *APIObject, parent string) error { return fmt.Errorf("Could not get Google Drive client") } - if object.CanTrash { - if _, err := client.Files.Update(object.ObjectID, &gdrive.File{Trashed: true}).Do(); nil != err { - Log.Debugf("%v", err) - return fmt.Errorf("Could not delete object %v (%v) from API", object.ObjectID, object.Name) - } - } else { - if _, err := client.Files.Update(object.ObjectID, nil).RemoveParents(parent).Do(); nil != err { - Log.Debugf("%v", err) - return fmt.Errorf("Could not unsubscribe object %v (%v) from API", object.ObjectID, object.Name) - } - } - if err := d.cache.DeleteObject(object.ObjectID); nil != err { Log.Debugf("%v", err) return fmt.Errorf("Could not delete object %v (%v) from cache", object.ObjectID, object.Name) } + go func() { + if object.CanTrash { + if _, err := client.Files.Update(object.ObjectID, &gdrive.File{Trashed: true}).Do(); nil != err { + Log.Debugf("%v", err) + Log.Warningf("Could not delete object %v (%v) from API", object.ObjectID, object.Name) + d.cache.UpdateObject(object) + } + } else { + if _, err := client.Files.Update(object.ObjectID, nil).RemoveParents(parent).Do(); nil != err { + Log.Debugf("%v", err) + Log.Warningf("Could not unsubscribe object %v (%v) from API", object.ObjectID, object.Name) + d.cache.UpdateObject(object) + } + } + }() + return nil } From ba19fa36f9eca791f9ba19c97e80e992ee8b554e Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Tue, 1 Aug 2017 19:29:39 +0200 Subject: [PATCH 44/46] removed client stuff and added release scripts --- ci/compile-nightly.yml | 17 +++++++ ci/{compile.yml => compile-release.yml} | 4 +- ci/pipeline.yml | 46 ++++++++++++++++--- ci/scripts/{compile.sh => compile-nightly.sh} | 0 ci/scripts/compile-release.sh | 27 +++++++++++ ci/test-nightly.yml | 13 ++++++ ci/{test.yml => test-release.yml} | 2 +- config/config.go | 40 +++++++--------- drive/drive.go | 18 +++----- main.go | 8 +--- 10 files changed, 126 insertions(+), 49 deletions(-) create mode 100644 ci/compile-nightly.yml rename ci/{compile.yml => compile-release.yml} (86%) rename ci/scripts/{compile.sh => compile-nightly.sh} (100%) create mode 100755 ci/scripts/compile-release.sh create mode 100644 ci/test-nightly.yml rename ci/{test.yml => test-release.yml} (89%) diff --git a/ci/compile-nightly.yml b/ci/compile-nightly.yml new file mode 100644 index 0000000..a7caa7e --- /dev/null +++ b/ci/compile-nightly.yml @@ -0,0 +1,17 @@ +platform: linux + +image_resource: + type: docker-image + source: + repository: golang + +inputs: +- name: plexdrive-develop + path: go/src/github.com/dweidenfeld/plexdrive + +run: + path: go/src/github.com/dweidenfeld/plexdrive/ci/scripts/compile-release.sh + +outputs: +- name: release +- name: metadata \ No newline at end of file diff --git a/ci/compile.yml b/ci/compile-release.yml similarity index 86% rename from ci/compile.yml rename to ci/compile-release.yml index 42be99c..aa510a7 100644 --- a/ci/compile.yml +++ b/ci/compile-release.yml @@ -6,11 +6,11 @@ image_resource: repository: golang inputs: -- name: plexdrive +- name: plexdrive-master path: go/src/github.com/dweidenfeld/plexdrive run: - path: go/src/github.com/dweidenfeld/plexdrive/ci/scripts/compile.sh + path: go/src/github.com/dweidenfeld/plexdrive/ci/scripts/compile-release.sh outputs: - name: release diff --git a/ci/pipeline.yml b/ci/pipeline.yml index 49cf695..887ce05 100644 --- a/ci/pipeline.yml +++ b/ci/pipeline.yml @@ -6,12 +6,18 @@ resource_types: tag: latest resources: -- name: plexdrive +- name: plexdrive-develop type: git source: uri: https://github.com/dweidenfeld/plexdrive branch: develop +- name: plexdrive-master + type: git + source: + uri: https://github.com/dweidenfeld/plexdrive + branch: master + - name: github-nightly-release type: github-release source: @@ -21,7 +27,16 @@ resources: release: false pre_release: true -- name: slack-nightly-notification +- name: github-release + type: github-relase + source: + owner: dweidenfeld + repository: plexdrive + access_token: {{gh-access-token}} + release: true + pre_release: false + +- name: slack-notification type: slack-notification source: url: https://hooks.slack.com/services/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 @@ -29,12 +44,12 @@ resources: jobs: - name: build plan: - - get: plexdrive + - get: plexdrive-develop trigger: true - task: test - file: plexdrive/ci/test.yml + file: plexdrive/ci/test-nightly.yml - task: compile - file: plexdrive/ci/compile.yml + file: plexdrive/ci/compile-nightly.yml - put: github-nightly-release params: name: metadata/version @@ -42,6 +57,25 @@ jobs: body: metadata/version globs: - release/* - - put: slack-nightly-notification + - put: slack-notification + params: + text_file: metadata/notification + +- name: release + plan: + - get: plexdrive-master + trigger: true + - task: test + file: plexdrive/ci/test-release.yml + - task: compile + file: plexdrive/ci/compile-release.yml + - put: github-release + params: + name: metadata/version + tag: metadata/version + body: metadata/version + globs: + - release/* + - put: slack-notification params: text_file: metadata/notification \ No newline at end of file diff --git a/ci/scripts/compile.sh b/ci/scripts/compile-nightly.sh similarity index 100% rename from ci/scripts/compile.sh rename to ci/scripts/compile-nightly.sh diff --git a/ci/scripts/compile-release.sh b/ci/scripts/compile-release.sh new file mode 100755 index 0000000..382ab5a --- /dev/null +++ b/ci/scripts/compile-release.sh @@ -0,0 +1,27 @@ +#!/bin/bash +set -xe + +# Configuration +export ORIGIN=$PWD +export GOPATH=$PWD/go +export PATH=$GOPATH/bin:$PATH +cd $GOPATH/src/github.com/dweidenfeld/plexdrive + +# Version +export VERSION="$(cat ci/meta/version)" +echo "Got version $VERSION" + +sed -i.bak s/%VERSION%/$VERSION/g main.go +echo $VERSION > $ORIGIN/metadata/version +sed s/%VERSION%/$VERSION/g ci/meta/notification > $ORIGIN/metadata/notification + +# Build +go get -v +./ci/scripts/go-build-all + +mv plexdrive-* $ORIGIN/release + +# Check +cd $ORIGIN +ls -lah release +ls -lah metadata \ No newline at end of file diff --git a/ci/test-nightly.yml b/ci/test-nightly.yml new file mode 100644 index 0000000..e0590e4 --- /dev/null +++ b/ci/test-nightly.yml @@ -0,0 +1,13 @@ +platform: linux + +image_resource: + type: docker-image + source: + repository: golang + +inputs: +- name: plexdrive-develop + path: go/src/github.com/dweidenfeld/plexdrive + +run: + path: go/src/github.com/dweidenfeld/plexdrive/ci/scripts/test.sh \ No newline at end of file diff --git a/ci/test.yml b/ci/test-release.yml similarity index 89% rename from ci/test.yml rename to ci/test-release.yml index 730b43c..4232d40 100644 --- a/ci/test.yml +++ b/ci/test-release.yml @@ -6,7 +6,7 @@ image_resource: repository: golang inputs: -- name: plexdrive +- name: plexdrive-master path: go/src/github.com/dweidenfeld/plexdrive run: diff --git a/config/config.go b/config/config.go index 77bb326..d636572 100644 --- a/config/config.go +++ b/config/config.go @@ -27,30 +27,24 @@ func Read(configPath string) (*Config, error) { } // Create creates the configuration by requesting from stdin -func Create(configPath, clientID, clientSecret string) (*Config, error) { +func Create(configPath string) (*Config, error) { var config Config - - if "" == clientID || "" == clientSecret { - fmt.Println("1. Please go to https://console.developers.google.com/") - fmt.Println("2. Create a new project") - fmt.Println("3. Go to library and activate the Google Drive API") - fmt.Println("4. Go to credentials and create an OAuth client ID") - fmt.Println("5. Set the application type to 'other'") - fmt.Println("6. Specify some name and click create") - fmt.Printf("7. Enter your generated client ID: ") - - if _, err := fmt.Scan(&config.ClientID); err != nil { - Log.Debugf("%v", err) - return nil, fmt.Errorf("Unable to read client id") - } - fmt.Printf("8. Enter your generated client secret: ") - if _, err := fmt.Scan(&config.ClientSecret); err != nil { - Log.Debugf("%v", err) - return nil, fmt.Errorf("Unable to read client secret") - } - } else { - config.ClientID = clientID - config.ClientSecret = clientSecret + fmt.Println("1. Please go to https://console.developers.google.com/") + fmt.Println("2. Create a new project") + fmt.Println("3. Go to library and activate the Google Drive API") + fmt.Println("4. Go to credentials and create an OAuth client ID") + fmt.Println("5. Set the application type to 'other'") + fmt.Println("6. Specify some name and click create") + fmt.Printf("7. Enter your generated client ID: ") + + if _, err := fmt.Scan(&config.ClientID); err != nil { + Log.Debugf("%v", err) + return nil, fmt.Errorf("Unable to read client id") + } + fmt.Printf("8. Enter your generated client secret: ") + if _, err := fmt.Scan(&config.ClientSecret); err != nil { + Log.Debugf("%v", err) + return nil, fmt.Errorf("Unable to read client secret") } configJSON, err := json.Marshal(&config) diff --git a/drive/drive.go b/drive/drive.go index fbc8f7d..9323f17 100644 --- a/drive/drive.go +++ b/drive/drive.go @@ -32,7 +32,7 @@ type Client struct { } // NewClient creates a new Google Drive client -func NewClient(config *config.Config, cache *Cache, refreshInterval time.Duration, rootNodeID string, suppressOutput bool) (*Client, error) { +func NewClient(config *config.Config, cache *Cache, refreshInterval time.Duration, rootNodeID string) (*Client, error) { client := Client{ cache: cache, context: context.Background(), @@ -54,7 +54,7 @@ func NewClient(config *config.Config, cache *Cache, refreshInterval time.Duratio client.rootNodeID = "root" } - if err := client.authorize(suppressOutput); nil != err { + if err := client.authorize(); nil != err { return nil, err } @@ -163,14 +163,14 @@ func (d *Client) checkChanges(firstCheck bool) { d.changesChecking = false } -func (d *Client) authorize(suppressOutput bool) error { +func (d *Client) authorize() error { Log.Debugf("Authorizing against Google Drive API") token, err := d.cache.LoadToken() if nil != err { Log.Debugf("Token could not be found, fetching new one") - t, err := getTokenFromWeb(d.config, suppressOutput) + t, err := getTokenFromWeb(d.config) if nil != err { return err } @@ -186,14 +186,10 @@ func (d *Client) authorize(suppressOutput bool) error { // getTokenFromWeb uses Config to request a Token. // It returns the retrieved Token. -func getTokenFromWeb(config *oauth2.Config, suppressOutput bool) (*oauth2.Token, error) { +func getTokenFromWeb(config *oauth2.Config) (*oauth2.Token, error) { authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline) - if !suppressOutput { - fmt.Printf("Go to the following link in your browser %v\n", authURL) - fmt.Printf("Paste the authorization code: ") - } else { - fmt.Printf("%v\n", authURL) - } + fmt.Printf("Go to the following link in your browser %v\n", authURL) + fmt.Printf("Paste the authorization code: ") var code string if _, err := fmt.Scan(&code); err != nil { diff --git a/main.go b/main.go index 3207645..b030cbf 100644 --- a/main.go +++ b/main.go @@ -50,8 +50,6 @@ func main() { argUID := flag.Int64("uid", -1, "Set the mounts UID (-1 = default permissions)") argGID := flag.Int64("gid", -1, "Set the mounts GID (-1 = default permissions)") argUmask := flag.Uint32("umask", 0, "Override the default file permissions") - argClientID := flag.String("client-id", "", "The client-id of your Google Drive API") - argClientSecret := flag.String("client-secret", "", "The client-secret of your Google Drive API") // argDownloadSpeedLimit := flag.String("speed-limit", "", "This value limits the download speed, e.g. 5M = 5MB/s per chunk (units: B, K, M, G)") flag.Parse() @@ -121,8 +119,6 @@ func main() { Log.Debugf("UID : %v", uid) Log.Debugf("GID : %v", gid) Log.Debugf("umask : %v", umask) - Log.Debugf("client-id : %v", *argClientID) - Log.Debugf("client-secret : %v", *argClientSecret) // Log.Debugf("speed-limit : %v", *argDownloadSpeedLimit) // version missing here @@ -149,7 +145,7 @@ func main() { configPath := filepath.Join(*argConfigPath, "config.json") cfg, err := config.Read(configPath) if nil != err { - cfg, err = config.Create(configPath, *argClientID, *argClientSecret) + cfg, err = config.Create(configPath) if nil != err { Log.Errorf("Could not read configuration") Log.Debugf("%v", err) @@ -164,7 +160,7 @@ func main() { } defer cache.Close() - client, err := drive.NewClient(cfg, cache, *argRefreshInterval, *argRootNodeID, "" != *argClientID && "" != *argClientSecret) + client, err := drive.NewClient(cfg, cache, *argRefreshInterval, *argRootNodeID) if nil != err { Log.Errorf("%v", err) os.Exit(4) From 1c08a293ed1eb1a58fa00bc3aeada2201f7e1f66 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Tue, 1 Aug 2017 19:35:29 +0200 Subject: [PATCH 45/46] preparation for multiple commands --- main.go | 230 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 118 insertions(+), 112 deletions(-) diff --git a/main.go b/main.go index b030cbf..aeff9e3 100644 --- a/main.go +++ b/main.go @@ -59,130 +59,136 @@ func main() { return } - // check if mountpoint is specified - argMountPoint := flag.Arg(0) - if "" == argMountPoint { - flag.Usage() - fmt.Println() - panic(fmt.Errorf("Mountpoint not specified")) - } + argCommand := flag.Arg(0) + + if argCommand == "mount" { + // check if mountpoint is specified + argMountPoint := flag.Arg(1) + if "" == argMountPoint { + flag.Usage() + fmt.Println() + panic(fmt.Errorf("Mountpoint not specified")) + } - // calculate uid / gid - uid := uint32(unix.Geteuid()) - gid := uint32(unix.Getegid()) - if *argUID > -1 { - uid = uint32(*argUID) - } - if *argGID > -1 { - gid = uint32(*argGID) - } + // calculate uid / gid + uid := uint32(unix.Geteuid()) + gid := uint32(unix.Getegid()) + if *argUID > -1 { + uid = uint32(*argUID) + } + if *argGID > -1 { + gid = uint32(*argGID) + } - // parse filemode - umask := os.FileMode(*argUmask) + // parse filemode + umask := os.FileMode(*argUmask) - // parse the mount options - var mountOptions []string - if "" != *argMountOptions { - mountOptions = strings.Split(*argMountOptions, ",") - } + // parse the mount options + var mountOptions []string + if "" != *argMountOptions { + mountOptions = strings.Split(*argMountOptions, ",") + } - // initialize the logger with the specific log level - var logLevel loggo.Level - switch *argLogLevel { - case 0: - logLevel = loggo.Error - case 1: - logLevel = loggo.Warning - case 2: - logLevel = loggo.Info - case 3: - logLevel = loggo.Debug - case 4: - logLevel = loggo.Trace - default: - logLevel = loggo.Warning - } - Log.SetLevel(logLevel) - - // debug all given parameters - Log.Debugf("verbosity : %v", logLevel) - Log.Debugf("root-node-id : %v", *argRootNodeID) - Log.Debugf("config : %v", *argConfigPath) - Log.Debugf("cache-file : %v", *argCacheFile) - Log.Debugf("chunk-size : %v", *argChunkSize) - Log.Debugf("chunk-load-threads : %v", *argChunkLoadThreads) - Log.Debugf("chunk-check-threads : %v", *argChunkCheckThreads) - Log.Debugf("chunk-load-ahead : %v", *argChunkLoadAhead) - Log.Debugf("max-chunks : %v", *argMaxChunks) - Log.Debugf("refresh-interval : %v", *argRefreshInterval) - Log.Debugf("fuse-options : %v", *argMountOptions) - Log.Debugf("UID : %v", uid) - Log.Debugf("GID : %v", gid) - Log.Debugf("umask : %v", umask) - // Log.Debugf("speed-limit : %v", *argDownloadSpeedLimit) - // version missing here - - // create all directories - if err := os.MkdirAll(*argConfigPath, 0766); nil != err { - Log.Errorf("Could not create configuration directory") - Log.Debugf("%v", err) - os.Exit(1) - } - if err := os.MkdirAll(filepath.Dir(*argCacheFile), 0766); nil != err { - Log.Errorf("Could not create cache file directory") - Log.Debugf("%v", err) - os.Exit(1) - } + // initialize the logger with the specific log level + var logLevel loggo.Level + switch *argLogLevel { + case 0: + logLevel = loggo.Error + case 1: + logLevel = loggo.Warning + case 2: + logLevel = loggo.Info + case 3: + logLevel = loggo.Debug + case 4: + logLevel = loggo.Trace + default: + logLevel = loggo.Warning + } + Log.SetLevel(logLevel) + + // debug all given parameters + Log.Debugf("verbosity : %v", logLevel) + Log.Debugf("root-node-id : %v", *argRootNodeID) + Log.Debugf("config : %v", *argConfigPath) + Log.Debugf("cache-file : %v", *argCacheFile) + Log.Debugf("chunk-size : %v", *argChunkSize) + Log.Debugf("chunk-load-threads : %v", *argChunkLoadThreads) + Log.Debugf("chunk-check-threads : %v", *argChunkCheckThreads) + Log.Debugf("chunk-load-ahead : %v", *argChunkLoadAhead) + Log.Debugf("max-chunks : %v", *argMaxChunks) + Log.Debugf("refresh-interval : %v", *argRefreshInterval) + Log.Debugf("fuse-options : %v", *argMountOptions) + Log.Debugf("UID : %v", uid) + Log.Debugf("GID : %v", gid) + Log.Debugf("umask : %v", umask) + // Log.Debugf("speed-limit : %v", *argDownloadSpeedLimit) + // version missing here + + // create all directories + if err := os.MkdirAll(*argConfigPath, 0766); nil != err { + Log.Errorf("Could not create configuration directory") + Log.Debugf("%v", err) + os.Exit(1) + } + if err := os.MkdirAll(filepath.Dir(*argCacheFile), 0766); nil != err { + Log.Errorf("Could not create cache file directory") + Log.Debugf("%v", err) + os.Exit(1) + } - // set the global buffer configuration - chunkSize, err := parseSizeArg(*argChunkSize) - if nil != err { - Log.Errorf("%v", err) - os.Exit(2) - } + // set the global buffer configuration + chunkSize, err := parseSizeArg(*argChunkSize) + if nil != err { + Log.Errorf("%v", err) + os.Exit(2) + } - // read the configuration - configPath := filepath.Join(*argConfigPath, "config.json") - cfg, err := config.Read(configPath) - if nil != err { - cfg, err = config.Create(configPath) + // read the configuration + configPath := filepath.Join(*argConfigPath, "config.json") + cfg, err := config.Read(configPath) if nil != err { - Log.Errorf("Could not read configuration") - Log.Debugf("%v", err) - os.Exit(3) + cfg, err = config.Create(configPath) + if nil != err { + Log.Errorf("Could not read configuration") + Log.Debugf("%v", err) + os.Exit(3) + } } - } - cache, err := drive.NewCache(*argCacheFile, *argConfigPath, *argLogLevel > 3) - if nil != err { - Log.Errorf("%v", err) - os.Exit(4) - } - defer cache.Close() + cache, err := drive.NewCache(*argCacheFile, *argConfigPath, *argLogLevel > 3) + if nil != err { + Log.Errorf("%v", err) + os.Exit(4) + } + defer cache.Close() - client, err := drive.NewClient(cfg, cache, *argRefreshInterval, *argRootNodeID) - if nil != err { - Log.Errorf("%v", err) - os.Exit(4) - } + client, err := drive.NewClient(cfg, cache, *argRefreshInterval, *argRootNodeID) + if nil != err { + Log.Errorf("%v", err) + os.Exit(4) + } - chunkManager, err := chunk.NewManager( - chunkSize, - *argChunkLoadAhead, - *argChunkCheckThreads, - *argChunkLoadThreads, - client, - *argMaxChunks) - if nil != err { - Log.Errorf("%v", err) - os.Exit(4) - } + chunkManager, err := chunk.NewManager( + chunkSize, + *argChunkLoadAhead, + *argChunkCheckThreads, + *argChunkLoadThreads, + client, + *argMaxChunks) + if nil != err { + Log.Errorf("%v", err) + os.Exit(4) + } - // check os signals like SIGINT/TERM - checkOsSignals(argMountPoint) - if err := mount.Mount(client, chunkManager, argMountPoint, mountOptions, uid, gid, umask); nil != err { - Log.Debugf("%v", err) - os.Exit(5) + // check os signals like SIGINT/TERM + checkOsSignals(argMountPoint) + if err := mount.Mount(client, chunkManager, argMountPoint, mountOptions, uid, gid, umask); nil != err { + Log.Debugf("%v", err) + os.Exit(5) + } + } else { + Log.Errorf("Command %v not found", argCommand) } } From b32feac9c57b5e11ab5b7246ba14404bb980e837 Mon Sep 17 00:00:00 2001 From: Dominik Weidenfeld Date: Tue, 1 Aug 2017 19:37:48 +0200 Subject: [PATCH 46/46] updated readme --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index df9c093..5663c94 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ _If you like the project, feel free to make a small [donation via PayPal](https: 3. Create your own client id and client secret (see [https://rclone.org/drive/#making-your-own-client-id](https://rclone.org/drive/#making-your-own-client-id)). 4. Sample command line for plexdrive ``` -./plexdrive -t /mnt/plexdrive-cache/ -c /root/.plexdrive -o allow_other /mnt/plexdrive +./plexdrive mount -c /root/.plexdrive -o allow_other /mnt/plexdrive ``` ### Crypted mount with rclone @@ -28,7 +28,7 @@ You can use [this tutorial](TUTORIAL.md) for instruction how to mount an encrypt ## Usage ``` -Usage of ./plexdrive: +Usage of ./plexdrive mount: --cache-file string Path the the cache file (default "~/.plexdrive/cache.bolt") --chunk-check-threads int