Skip to content

Commit

Permalink
Merge 81afa11 into dee3a38
Browse files Browse the repository at this point in the history
  • Loading branch information
timshannon committed Jan 30, 2019
2 parents dee3a38 + 81afa11 commit 03ae794
Show file tree
Hide file tree
Showing 8 changed files with 77 additions and 51 deletions.
9 changes: 5 additions & 4 deletions README.md
Expand Up @@ -8,12 +8,13 @@ Badger DB for customizing as you wish. By default the encoding used is Gob, so
interface for faster serialization. Or, alternately, you can use any serialization you want by supplying encode / decode
funcs to the `Options` struct on Open.

One Go Type will have one bucket, and multiple index buckets in a BadgerDB file, so you can store multiple Go Types in the
same database.
One Go Type will be prefixed with it's type name, so you can store multiple types in a single Badger database with
conflicts.

This project is a rewrite of the [BoltHold](https://github.com/timshannon/bolthold) project on the Badger KV database
instead of [Bolt](https://github.com/etcd-io/bbolt). For a comparison between bolt and badger, see
https://blog.dgraph.io/post/badger-lmdb-boltdb/.
instead of [Bolt](https://github.com/etcd-io/bbolt). For a performance comparison between bolt and badger, see
https://blog.dgraph.io/post/badger-lmdb-boltdb/. I've written up my own comparison of the two focusing on
characteristics *other* than performance here: https://tech.townsourced.com/post/bolddb-vs-badger/.

## Indexes
Indexes allow you to skip checking any records that don't meet your index criteria. If you have 1000 records and only
Expand Down
4 changes: 0 additions & 4 deletions delete_test.go
Expand Up @@ -49,10 +49,6 @@ func TestDeleteMatching(t *testing.T) {

err := store.DeleteMatching(&ItemTest{}, tst.query)
if err != nil {
if tst.writeError {
// error is expected on this test
return
}
t.Fatalf("Error deleting data from badgerhold: %s", err)
}

Expand Down
13 changes: 5 additions & 8 deletions find_test.go
Expand Up @@ -181,10 +181,9 @@ var testData = []ItemTest{
}

type test struct {
name string
query *badgerhold.Query
result []int // indices of test data to be found
writeError bool // if the query will error on writable transactions
name string
query *badgerhold.Query
result []int // indices of test data to be found
}

var testResults = []test{
Expand Down Expand Up @@ -347,8 +346,7 @@ var testResults = []test{

return false, nil
}),
result: []int{14, 15},
writeError: true,
result: []int{14, 15},
},
test{
name: "Time Comparison",
Expand Down Expand Up @@ -471,8 +469,7 @@ var testResults = []test{
grp[0].Max("ID", max)
return ra.Field().(int) == max.ID, nil
}),
result: []int{11, 14, 15},
writeError: true,
result: []int{11, 14, 15},
},
test{
name: "Indexed in",
Expand Down
42 changes: 36 additions & 6 deletions index.go
Expand Up @@ -167,15 +167,30 @@ type iterator struct {
keyCache [][]byte
nextKeys func(*badger.Iterator) ([][]byte, error)
iter *badger.Iterator
bookmark *iterBookmark
lastSeek []byte
tx *badger.Txn
err error
}

func newIterator(tx *badger.Txn, typeName string, query *Query) *iterator {
// iterBookmark stores a seek location in a specific iterator
// so that a single RW iterator can be shared within a single transaction
type iterBookmark struct {
iter *badger.Iterator
seekKey []byte
}

func newIterator(tx *badger.Txn, typeName string, query *Query, bookmark *iterBookmark) *iterator {
i := &iterator{
tx: tx,
iter: tx.NewIterator(badger.DefaultIteratorOptions),
tx: tx,
}

if bookmark != nil {
i.iter = bookmark.iter
} else {
i.iter = tx.NewIterator(badger.DefaultIteratorOptions)
}

var prefix []byte

if query.index != "" {
Expand Down Expand Up @@ -203,7 +218,7 @@ func newIterator(tx *badger.Txn, typeName string, query *Query) *iterator {

item := iter.Item()
key := item.KeyCopy(nil)
ok := false
var ok bool
if len(criteria) == 0 {
// nothing to check return key for value testing
ok = true
Expand All @@ -228,6 +243,7 @@ func newIterator(tx *badger.Txn, typeName string, query *Query) *iterator {
nKeys = append(nKeys, key)

}
i.lastSeek = key
iter.Next()
}
return nKeys, nil
Expand All @@ -248,10 +264,10 @@ func newIterator(tx *badger.Txn, typeName string, query *Query) *iterator {
}

item := iter.Item()

key := item.KeyCopy(nil)
// no currentRow on indexes as it refers to multiple rows
// remove index prefix for matching
ok, err := matchesAllCriteria(criteria, item.Key()[len(prefix):], true, "", nil)
ok, err := matchesAllCriteria(criteria, key[len(prefix):], true, "", nil)
if err != nil {
return nil, err
}
Expand All @@ -269,6 +285,8 @@ func newIterator(tx *badger.Txn, typeName string, query *Query) *iterator {
return nil
})
}

i.lastSeek = key
iter.Next()

}
Expand All @@ -279,6 +297,13 @@ func newIterator(tx *badger.Txn, typeName string, query *Query) *iterator {
return i
}

func (i *iterator) createBookmark() *iterBookmark {
return &iterBookmark{
iter: i.iter,
seekKey: i.lastSeek,
}
}

// Next returns the next key value that matches the iterators criteria
// If no more kv's are available the return nil, if there is an error, they return nil
// and iterator.Error() will return the error
Expand Down Expand Up @@ -328,5 +353,10 @@ func (i *iterator) Error() error {
}

func (i *iterator) Close() {
if i.bookmark != nil {
i.iter.Seek(i.bookmark.seekKey)
return
}

i.iter.Close()
}
2 changes: 1 addition & 1 deletion put.go
Expand Up @@ -12,7 +12,7 @@ import (
)

// ErrKeyExists is the error returned when data is being Inserted for a Key that already exists
var ErrKeyExists = errors.New("This Key already exists in this badgerhold for this type")
var ErrKeyExists = errors.New("This Key already exists in badgerhold for this type")

// sequence tells badgerhold to insert the key as the next sequence in the bucket
type sequence struct{}
Expand Down
4 changes: 0 additions & 4 deletions put_test.go
Expand Up @@ -236,10 +236,6 @@ func TestUpdateMatching(t *testing.T) {
})

if err != nil {
if tst.writeError {
// error is expected
return
}
t.Fatalf("Error updating data from badgerhold: %s", err)
}

Expand Down
50 changes: 27 additions & 23 deletions query.go
Expand Up @@ -46,8 +46,9 @@ type Query struct {
badIndex bool
dataType reflect.Type
tx *badger.Txn
iterator *badger.Iterator
writable bool // if the query is part of a writable transaction
writable bool
subquery bool
bookmark *iterBookmark

limit int
skip int
Expand Down Expand Up @@ -166,7 +167,7 @@ func (q *Query) SortBy(fields ...string) *Query {
if fields[i] == Key {
panic("Cannot sort by Key.")
}
found := false
var found bool
for k := range q.sort {
if q.sort[k] == fields[i] {
found = true
Expand Down Expand Up @@ -333,10 +334,9 @@ type MatchFunc func(ra *RecordAccess) (bool, error)
// RecordAccess allows access to the current record, field or allows running a subquery within a
// MatchFunc
type RecordAccess struct {
tx *badger.Txn
record interface{}
field interface{}
writeable bool
record interface{}
field interface{}
query *Query
}

// Field is the current field being queried
Expand All @@ -352,20 +352,17 @@ func (r *RecordAccess) Record() interface{} {
// SubQuery allows you to run another query in the same transaction for each
// record in a parent query
func (r *RecordAccess) SubQuery(result interface{}, query *Query) error {
if r.writeable {
return fmt.Errorf("Subqueries are currently not supported from within writable transactions")
}
return findQuery(r.tx, result, query)
query.subquery = true
query.bookmark = r.query.bookmark
return findQuery(r.query.tx, result, query)
}

// SubAggregateQuery allows you to run another aggregate query in the same transaction for each
// record in a parent query
func (r *RecordAccess) SubAggregateQuery(query *Query, groupBy ...string) ([]*AggregateResult, error) {
if r.writeable {
return nil, fmt.Errorf("Subqueries are currently not supported from within writable transactions")
}

return aggregateQuery(r.tx, r.record, query, groupBy...)
query.subquery = true
query.bookmark = r.query.bookmark
return aggregateQuery(r.query.tx, r.record, query, groupBy...)
}

// MatchFunc will test if a field matches the passed in function
Expand Down Expand Up @@ -428,10 +425,9 @@ func (c *Criterion) test(testValue interface{}, encoded bool, keyType string, cu
return c.value.(*regexp.Regexp).Match([]byte(fmt.Sprintf("%s", value))), nil
case fn:
return c.value.(MatchFunc)(&RecordAccess{
field: value,
record: currentRow,
tx: c.query.tx,
writeable: c.query.writable,
field: value,
record: currentRow,
query: c.query,
})
case isnil:
return reflect.ValueOf(value).IsNil(), nil
Expand Down Expand Up @@ -463,6 +459,7 @@ func (c *Criterion) test(testValue interface{}, encoded bool, keyType string, cu

func matchesAllCriteria(criteria []*Criterion, value interface{}, encoded bool, keyType string,
currentRow interface{}) (bool, error) {

for i := range criteria {
ok, err := criteria[i].test(value, encoded, keyType, currentRow)
if err != nil {
Expand Down Expand Up @@ -563,8 +560,15 @@ func runQuery(tx *badger.Txn, dataType interface{}, query *Query, retrievedKeys
return runQuerySort(tx, dataType, query, action)
}

iter := newIterator(tx, storer.Type(), query)
defer iter.Close()
iter := newIterator(tx, storer.Type(), query, query.bookmark)
if (query.writable || query.subquery) && query.bookmark == nil {
query.bookmark = iter.createBookmark()
}

defer func() {
iter.Close()
query.bookmark = nil
}()

if query.index != "" && query.badIndex {
return fmt.Errorf("The index %s does not exist", query.index)
Expand Down Expand Up @@ -763,6 +767,7 @@ func findQuery(tx *badger.Txn, result interface{}, query *Query) error {
}

query.writable = false

resultVal := reflect.ValueOf(result)
if resultVal.Kind() != reflect.Ptr || resultVal.Elem().Kind() != reflect.Slice {
panic("result argument must be a slice address")
Expand Down Expand Up @@ -865,7 +870,6 @@ func updateQuery(tx *badger.Txn, dataType interface{}, query *Query, update func
}

query.writable = true

var records []*record

err := runQuery(tx, dataType, query, nil, query.skip,
Expand Down
4 changes: 3 additions & 1 deletion store.go
Expand Up @@ -28,6 +28,8 @@ type Options struct {
badger.Options
}

// DefaultOptions are a default set of options for opening a BadgerHold database
// Includes badgers own default options
var DefaultOptions = Options{
Options: badger.DefaultOptions,
Encoder: DefaultEncode,
Expand Down Expand Up @@ -76,7 +78,7 @@ func (s *Store) Close() error {

/*
NOTE: Not going to implement ReIndex and Remove index
I had originally created these to make the transision from a plain bolt or badger DB easier
I had originally created these to make the transition from a plain bolt or badger DB easier
but there is too much chance for lost data, and it's probably better that any conversion be
done by the developer so they can directly manage how they want data to be migrated.
If you disagree, feel free to open an issue and we can revisit this.
Expand Down

0 comments on commit 03ae794

Please sign in to comment.