Skip to content

Commit

Permalink
Doc: Add godoc to history and hyper packages. WIP.
Browse files Browse the repository at this point in the history
Refers to #40
  • Loading branch information
Jose Luis Lucas committed Jun 18, 2019
1 parent 875425c commit 28bc133
Show file tree
Hide file tree
Showing 6 changed files with 93 additions and 69 deletions.
4 changes: 0 additions & 4 deletions balloon/history/print_visitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,6 @@ type printVisitor struct {
height uint16
}

func newPrintVisitor(height uint16) *printVisitor {
return &printVisitor{tokens: make([]string, 1), height: height}
}

func (v *printVisitor) Result() string {
return fmt.Sprintf("\n%s", strings.Join(v.tokens[:], "\n"))
}
Expand Down
14 changes: 14 additions & 0 deletions balloon/history/tree.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@
limitations under the License.
*/

// Package history implements the history tree (a merkel tree, append only structure)
// life cycle, its operations, different visitors to navigate the tree, as well as
// the functionality of request and verify membership and incremental proofs.
package history

import (
Expand Down Expand Up @@ -45,6 +48,9 @@ func NewHistoryTree(hasherF func() hashing.Hasher, store storage.Store, cacheSiz
}
}

// Add function adds an event digest into the history tree.
// It builds an insert visitor, calculates the expected root hash, and returns it along
// with the storage mutations to be done at balloon level.
func (t *HistoryTree) Add(eventDigest hashing.Digest, version uint64) (hashing.Digest, []*storage.Mutation, error) {

// log.Debugf("Adding new event digest %x with version %d", eventDigest, version)
Expand All @@ -56,6 +62,9 @@ func (t *HistoryTree) Add(eventDigest hashing.Digest, version uint64) (hashing.D
return rh, visitor.Result(), nil
}

// AddBulk function adds a bulk of event digests (one after another) into the history tree.
// It builds an insert visitor, calculates the expected bulk of root hashes, and returns them along
// with the storage mutations to be done at balloon level.
func (t *HistoryTree) AddBulk(eventDigests []hashing.Digest, initialVersion uint64) ([]hashing.Digest, []*storage.Mutation, error) {

visitor := newInsertVisitor(t.hasher, t.writeCache, storage.HistoryTable)
Expand All @@ -69,6 +78,8 @@ func (t *HistoryTree) AddBulk(eventDigests []hashing.Digest, initialVersion uint

}

// ProveMembership function builds the membership proof of the given index against the given
// version. It builds an audit-path visitor to build the proof.
func (t *HistoryTree) ProveMembership(index, version uint64) (*MembershipProof, error) {

//log.Debugf("Proving membership for index %d with version %d", index, version)
Expand All @@ -85,6 +96,8 @@ func (t *HistoryTree) ProveMembership(index, version uint64) (*MembershipProof,
return proof, nil
}

// ProveConsistency function builds the incremental proof between the given event versions.
// It builds an audit-path visitor to build the proof.
func (t *HistoryTree) ProveConsistency(start, end uint64) (*IncrementalProof, error) {

//log.Debugf("Proving consistency between versions %d and %d", start, end)
Expand All @@ -98,6 +111,7 @@ func (t *HistoryTree) ProveConsistency(start, end uint64) (*IncrementalProof, er
return proof, nil
}

// Close function resets history tree's write and read caches, and hasher.
func (t *HistoryTree) Close() {
t.hasher = nil
t.writeCache = nil
Expand Down
6 changes: 3 additions & 3 deletions balloon/history/tree_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ func TestProveMembership(t *testing.T) {

for _, c := range testCases {
_, mutations, err := tree.Add(c.eventDigest, c.index)
store.Mutate(mutations)
_ = store.Mutate(mutations)
require.NoError(t, err)
}

Expand Down Expand Up @@ -425,7 +425,7 @@ func TestProveConsistency(t *testing.T) {
index := uint64(i)
_, mutations, err := tree.Add(c.eventDigest, index)
require.NoError(t, err)
store.Mutate(mutations)
_ = store.Mutate(mutations)

start := uint64(max(0, i-1))
end := index
Expand Down Expand Up @@ -495,7 +495,7 @@ func TestProveConsistencySameVersions(t *testing.T) {
for i, c := range testCases {
_, mutations, err := tree.Add(c.eventDigest, c.index)
require.NoError(t, err)
store.Mutate(mutations)
_ = store.Mutate(mutations)

proof, err := tree.ProveConsistency(c.index, c.index)
require.NoError(t, err)
Expand Down
16 changes: 15 additions & 1 deletion balloon/hyper/tree.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@
limitations under the License.
*/

// Package hyper implements the available operations in the hyper-tree (sparse merkel tree).
// Package hyper implements the history tree (a sparse merkel tree)
// life cycle, its operations, as well as
// the functionality of request and verify a membersip proof.
package hyper

import (
Expand Down Expand Up @@ -73,6 +75,9 @@ func NewHyperTree(hasherF func() hashing.Hasher, store storage.Store, cache cach
return tree
}

// Add function adds an event digest into the hyper tree.
// It builds a stack of operations and then interpret it to calculates the expected
// root hash, and returns it along with the storage mutations to be done at balloon level.
func (t *HyperTree) Add(eventDigest hashing.Digest, version uint64) (hashing.Digest, []*storage.Mutation, error) {
t.Lock()
defer t.Unlock()
Expand All @@ -96,6 +101,9 @@ func (t *HyperTree) Add(eventDigest hashing.Digest, version uint64) (hashing.Dig
return rh, ctx.Mutations, nil
}

// AddBulk function adds a bulk of event digests into the hyper tree.
// It builds a stack of operations and then interpret it to calculates the expected
// root hash, and returns it along with the storage mutations to be done at balloon level.
func (t *HyperTree) AddBulk(eventDigests []hashing.Digest, initialVersion uint64) (hashing.Digest, []*storage.Mutation, error) {
t.Lock()
defer t.Unlock()
Expand All @@ -122,6 +130,9 @@ func (t *HyperTree) AddBulk(eventDigests []hashing.Digest, initialVersion uint64
return rh, ctx.Mutations, nil
}

// QueryMembership function builds the membership proof of the given event digest.
// It builds a stack of operations and then interpret it to generate and return the audit
// path.
func (t *HyperTree) QueryMembership(eventDigest hashing.Digest) (proof *QueryProof, err error) {
t.Lock()
defer t.Unlock()
Expand All @@ -144,6 +155,8 @@ func (t *HyperTree) QueryMembership(eventDigest hashing.Digest) (proof *QueryPro
return NewQueryProof(eventDigest, ctx.Value, ctx.AuditPath, t.hasherF()), nil
}

// RebuildCache function reads the hypercache rocksDB table to create indexes and cache.
// It builds a stack of operations and then interpret it to rebuild the cache.
func (t *HyperTree) RebuildCache() {
t.Lock()
defer t.Unlock()
Expand Down Expand Up @@ -183,6 +196,7 @@ func (t *HyperTree) RebuildCache() {

}

// Close function resets all hyper tree stuff.
func (t *HyperTree) Close() {
t.Lock()
defer t.Unlock()
Expand Down
58 changes: 29 additions & 29 deletions storage/bplus/bplus_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,6 @@ func NewBPlusTreeStore() *BPlusTreeStore {
return &BPlusTreeStore{btree.New(2)}
}

type KVItem struct {
Key, Value []byte
}

func (p KVItem) Less(b btree.Item) bool {
return bytes.Compare(p.Key, b.(KVItem).Key) < 0
}

func (s *BPlusTreeStore) Mutate(mutations []*storage.Mutation) error {
for _, m := range mutations {
key := append([]byte{m.Table.Prefix()}, m.Key...)
Expand Down Expand Up @@ -96,6 +88,35 @@ func (s BPlusTreeStore) GetAll(table storage.Table) storage.KVPairReader {
return NewBPlusKVPairReader(table, s.db)
}

func (s BPlusTreeStore) Close() error {
s.db.Clear(false)
return nil
}

func (s BPlusTreeStore) Backup(w io.Writer, id uint64) error {
panic("Not implemented")
}

func (s BPlusTreeStore) Load(r io.Reader) error {
panic("Not implemented")
}

func (s BPlusTreeStore) Snapshot() (uint64, error) {
panic("Not implemented")
}

func (s BPlusTreeStore) RegisterMetrics(registry metrics.Registry) {
panic("Not implemented")
}

type KVItem struct {
Key, Value []byte
}

func (p KVItem) Less(b btree.Item) bool {
return bytes.Compare(p.Key, b.(KVItem).Key) < 0
}

type BPlusKVPairReader struct {
prefix byte
db *btree.BTree
Expand Down Expand Up @@ -131,24 +152,3 @@ func (r *BPlusKVPairReader) Read(buffer []*storage.KVPair) (n int, err error) {
func (r *BPlusKVPairReader) Close() {
r.db = nil
}

func (s BPlusTreeStore) Close() error {
s.db.Clear(false)
return nil
}

func (s BPlusTreeStore) Backup(w io.Writer, id uint64) error {
panic("Not implemented")
}

func (s BPlusTreeStore) Load(r io.Reader) error {
panic("Not implemented")
}

func (s BPlusTreeStore) Snapshot() (uint64, error) {
panic("Not implemented")
}

func (s BPlusTreeStore) RegisterMetrics(registry metrics.Registry) {
panic("Not implemented")
}
64 changes: 32 additions & 32 deletions storage/rocks/rocksdb_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -377,38 +377,6 @@ func (s *RocksDBStore) GetLast(table storage.Table) (*storage.KVPair, error) {
return nil, storage.ErrKeyNotFound
}

type RocksDBKVPairReader struct {
it *rocksdb.Iterator
}

func NewRocksDBKVPairReader(cfHandle *rocksdb.ColumnFamilyHandle, db *rocksdb.DB) *RocksDBKVPairReader {
opts := rocksdb.NewDefaultReadOptions()
opts.SetFillCache(false)
it := db.NewIteratorCF(opts, cfHandle)
it.SeekToFirst()
return &RocksDBKVPairReader{it}
}

func (r *RocksDBKVPairReader) Read(buffer []*storage.KVPair) (n int, err error) {
for n = 0; r.it.Valid() && n < len(buffer); r.it.Next() {
keySlice := r.it.Key()
valueSlice := r.it.Value()
key := make([]byte, keySlice.Size())
value := make([]byte, valueSlice.Size())
copy(key, keySlice.Data())
copy(value, valueSlice.Data())
keySlice.Free()
valueSlice.Free()
buffer[n] = &storage.KVPair{Key: key, Value: value}
n++
}
return n, err
}

func (r *RocksDBKVPairReader) Close() {
r.it.Close()
}

func (s *RocksDBStore) GetAll(table storage.Table) storage.KVPairReader {
return NewRocksDBKVPairReader(s.cfHandles[table], s.db)
}
Expand Down Expand Up @@ -600,3 +568,35 @@ func writeTo(entry *pb.KVPair, w io.Writer) error {
_, err = w.Write(buf)
return err
}

type RocksDBKVPairReader struct {
it *rocksdb.Iterator
}

func NewRocksDBKVPairReader(cfHandle *rocksdb.ColumnFamilyHandle, db *rocksdb.DB) *RocksDBKVPairReader {
opts := rocksdb.NewDefaultReadOptions()
opts.SetFillCache(false)
it := db.NewIteratorCF(opts, cfHandle)
it.SeekToFirst()
return &RocksDBKVPairReader{it}
}

func (r *RocksDBKVPairReader) Read(buffer []*storage.KVPair) (n int, err error) {
for n = 0; r.it.Valid() && n < len(buffer); r.it.Next() {
keySlice := r.it.Key()
valueSlice := r.it.Value()
key := make([]byte, keySlice.Size())
value := make([]byte, valueSlice.Size())
copy(key, keySlice.Data())
copy(value, valueSlice.Data())
keySlice.Free()
valueSlice.Free()
buffer[n] = &storage.KVPair{Key: key, Value: value}
n++
}
return n, err
}

func (r *RocksDBKVPairReader) Close() {
r.it.Close()
}

0 comments on commit 28bc133

Please sign in to comment.