Skip to content

Commit

Permalink
extracted file and catalog building
Browse files Browse the repository at this point in the history
  • Loading branch information
256dpi committed Dec 20, 2019
1 parent 2e5c1eb commit e081606
Show file tree
Hide file tree
Showing 2 changed files with 126 additions and 96 deletions.
117 changes: 117 additions & 0 deletions file.go
@@ -0,0 +1,117 @@
package lungo

import (
"fmt"
"strings"
"time"

"github.com/256dpi/lungo/bsonkit"
"github.com/256dpi/lungo/mongokit"
)

// File is a format for storing catalogs in a single structure.
type File struct {
Namespaces map[string]FileNamespace `bson:"namespaces"`
}

// FileNamespace is a single namespace stored in a file.
type FileNamespace struct {
Documents bsonkit.List `bson:"documents"`
Indexes map[string]FileIndex `bson:"indexes"`
}

// FileIndex is a single index stored in a file.
type FileIndex struct {
Key bsonkit.Doc `bson:"key"`
Unique bool `bson:"unique"`
Partial bsonkit.Doc `bson:"partial"`
Expiry time.Duration `bson:"expiry"`
}

// BuildFile will build a new file from the provided catalog.
func BuildFile(catalog *Catalog) *File {
// prepare file
file := &File{
Namespaces: map[string]FileNamespace{},
}

// add namespaces
for handle, namespace := range catalog.Namespaces {
// collect indexes
indexes := map[string]FileIndex{}
for name, index := range namespace.Indexes {
// get config
config := index.Config()

// add index
indexes[name] = FileIndex{
Key: config.Key,
Unique: config.Unique,
Partial: config.Partial,
Expiry: config.Expiry,
}
}

// add namespace
file.Namespaces[handle.String()] = FileNamespace{
Documents: namespace.Documents.List,
Indexes: indexes,
}
}

return file
}

// BuildCatalog will build a new catalog from the file.
func (f *File) BuildCatalog() (*Catalog, error) {
// create catalog
catalog := NewCatalog()

// process namespaces
for name, ns := range f.Namespaces {
// split name
segments := strings.Split(name, ".")
if len(segments) != 2 {
return nil, fmt.Errorf("invalid namespace name %q", name)
}

// prepare handle
handle := Handle{segments[0], segments[1]}

// create namespace
namespace := mongokit.NewCollection(false)

// add documents
namespace.Documents = bsonkit.NewSet(ns.Documents)

// add indexes
for name, idx := range ns.Indexes {
// create index
index, err := mongokit.CreateIndex(mongokit.IndexConfig{
Key: idx.Key,
Unique: idx.Unique,
Partial: idx.Partial,
Expiry: idx.Expiry,
})
if err != nil {
return nil, err
}

// build index
ok, err := index.Build(ns.Documents)
if err != nil {
return nil, err
} else if !ok {
return nil, fmt.Errorf("duplicate document for index %q", name)
}

// add index
namespace.Indexes[name] = index
}

// add namespace
catalog.Namespaces[handle] = namespace
}

return catalog, nil
}
105 changes: 9 additions & 96 deletions store.go
Expand Up @@ -2,17 +2,12 @@ package lungo

import (
"bytes"
"fmt"
"io/ioutil"
"os"
"strings"
"time"

"go.mongodb.org/mongo-driver/bson"

"github.com/256dpi/lungo/bsonkit"
"github.com/256dpi/lungo/dbkit"
"github.com/256dpi/lungo/mongokit"
)

// Store is the interface that describes storage adapters.
Expand Down Expand Up @@ -44,25 +39,6 @@ func (m MemoryStore) Store(data *Catalog) error {
return nil
}

// File is the format of the file stored by the file store.
type File struct {
Namespaces map[string]FileNamespace `bson:"namespaces"`
}

// FileNamespace is a single namespace stored in a file by the file store.
type FileNamespace struct {
Documents bsonkit.List `bson:"documents"`
Indexes map[string]FileIndex `bson:"indexes"`
}

// FileIndex is a single index stored in a file by the file store.
type FileIndex struct {
Key bsonkit.Doc `bson:"key"`
Unique bool `bson:"unique"`
Partial bsonkit.Doc `bson:"partial"`
Expiry time.Duration `bson:"expiry"`
}

// FileStore writes the catalog to a single file on disk.
type FileStore struct {
path string
Expand All @@ -77,7 +53,8 @@ func NewFileStore(path string, mode os.FileMode) *FileStore {
}
}

// Load will read the catalog from disk and return it.
// Load will read the catalog from disk and return it. If no file exists at the
// specified location an empty catalog is returned.
func (s *FileStore) Load() (*Catalog, error) {
// load file
buf, err := ioutil.ReadFile(s.path)
Expand All @@ -94,83 +71,19 @@ func (s *FileStore) Load() (*Catalog, error) {
return nil, err
}

// create catalog
catalog := NewCatalog()

// process namespaces
for name, ns := range file.Namespaces {
// create handle
segments := strings.Split(name, ".")
handle := Handle{segments[0], segments[1]}

// create namespace
namespace := mongokit.NewCollection(false)

// add documents
namespace.Documents = bsonkit.NewSet(ns.Documents)

// add indexes
for name, idx := range ns.Indexes {
// create index
index, err := mongokit.CreateIndex(mongokit.IndexConfig{
Key: idx.Key,
Unique: idx.Unique,
Partial: idx.Partial,
Expiry: idx.Expiry,
})
if err != nil {
return nil, err
}

// build index
ok, err := index.Build(ns.Documents)
if err != nil {
return nil, err
} else if !ok {
return nil, fmt.Errorf("duplicate document for index %q", name)
}

// add index
namespace.Indexes[name] = index
}

// add namespace
catalog.Namespaces[handle] = namespace
// build catalog from file
catalog, err := file.BuildCatalog()
if err != nil {
return nil, err
}

return catalog, nil
}

// Store will atomically write the catalog to disk.
func (s *FileStore) Store(data *Catalog) error {
// create file
file := File{
Namespaces: map[string]FileNamespace{},
}

// add namespaces
for handle, namespace := range data.Namespaces {
// collect indexes
indexes := map[string]FileIndex{}
for name, index := range namespace.Indexes {
// get config
config := index.Config()

// add index
indexes[name] = FileIndex{
Key: config.Key,
Unique: config.Unique,
Partial: config.Partial,
Expiry: config.Expiry,
}
}

// add namespace
file.Namespaces[handle.String()] = FileNamespace{
Documents: namespace.Documents.List,
Indexes: indexes,
}
}
func (s *FileStore) Store(catalog *Catalog) error {
// build file from catalog
file := BuildFile(catalog)

// encode file
buf, err := bson.Marshal(file)
Expand Down

0 comments on commit e081606

Please sign in to comment.