-
Notifications
You must be signed in to change notification settings - Fork 578
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
WIP: Added Export() to export a cache.
This is a work-in-progress for exporting the entire cache so it can be stored on disk. This layout/format isn't set in stone, nor is it terribly performant currently. Would love some feedback on how to improve the performance. I think it would be good to pipeline the data export a bit, so instead of trying to serialize the entire cache, you can pipeline it with a io.Writer and channels. Signed-off-by: Mike Lloyd <mike@reboot3times.org>
- Loading branch information
1 parent
ca9a59e
commit 93bc3d9
Showing
9 changed files
with
2,011 additions
and
79 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
package bigcache | ||
|
||
import ( | ||
"crypto/sha256" | ||
"time" | ||
pb "github.com/allegro/bigcache/pb" | ||
qpb "github.com/allegro/bigcache/queue/pb" | ||
"fmt" | ||
) | ||
|
||
const ( | ||
fileVersion = 1 | ||
) | ||
|
||
// base file layout | ||
type Layout struct { | ||
// which version of the layout we are working with. | ||
Version int `json:"version"` | ||
// when the file was last written. | ||
Date string `json:"date"` | ||
// a checksum of each shard segment to make sure it's not been tampered with. | ||
DataChecksum map[int64]string `json:"data_checksum"` | ||
// each shard's data encoded as a gob. | ||
Data map[int64][]byte `json:"data"` | ||
} | ||
|
||
// Exports data in a standardized and easy to understand format. | ||
func (c *BigCache) export() (Layout, error) { | ||
fl := Layout{ | ||
Data: make(map[int64][]byte), | ||
DataChecksum: make(map[int64]string), | ||
} | ||
fl.Version = fileVersion | ||
hasher := sha256.New() | ||
|
||
for idx, shard := range c.shards { | ||
// prepare the shard for encoding. | ||
// todo (mxplusb): optimise this a bit. not sure how else to do that. | ||
p := pb.CacheShard{ | ||
Hashmap: shard.hashmap, | ||
Entries: &qpb.BytesQueue{ | ||
Array: shard.entries.Array, | ||
Queuecapacity: uint64(shard.entries.QueueCapacity), | ||
MaxCapacity: uint64(shard.entries.MaxCapacity), | ||
Head: uint64(shard.entries.Head), | ||
Tail: uint64(shard.entries.Tail), | ||
Count: uint64(shard.entries.Count), | ||
RightMargin: uint64(shard.entries.RightMargin), | ||
HeaderBuffer: shard.entries.HeaderBuffer, | ||
Verbose: shard.entries.Verbose, | ||
InitialCapacity: uint64(shard.entries.InitialCapacity), | ||
}, | ||
Entrybuffer: shard.entryBuffer, | ||
IsVerbose: shard.isVerbose, | ||
LifeWindow: shard.lifeWindow, | ||
Stats: &pb.Stats{ | ||
Hits: shard.stats.Hits, | ||
Misses: shard.stats.Misses, | ||
DelHits: shard.stats.DelHits, | ||
DelMisses: shard.stats.DelMisses, | ||
Collisions: shard.stats.Collisions, | ||
}, | ||
} | ||
// encode the shard. | ||
shardData, err := p.Marshal() | ||
if err != nil { | ||
return Layout{}, err | ||
} | ||
// get the hash of the encoded shard so we can validate the shard is untouched. | ||
fl.Data[int64(idx)] = shardData | ||
fl.DataChecksum[int64(idx)] = fmt.Sprintf("%x", hasher.Sum(shardData)) | ||
hasher.Reset() | ||
} | ||
// always use UTC time. | ||
fl.Date = time.Now().UTC().Format(time.RFC3339) | ||
return fl, nil | ||
} | ||
|
||
// Exports the data to work with downstream. | ||
func (c *BigCache) Export() (Layout, error) { | ||
return c.export() | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,63 @@ | ||
package bigcache | ||
|
||
import ( | ||
"fmt" | ||
"testing" | ||
"time" | ||
"crypto/sha256" | ||
"github.com/magiconair/properties/assert" | ||
pb "github.com/allegro/bigcache/pb" | ||
qpb "github.com/allegro/bigcache/queue/pb" | ||
) | ||
|
||
func TestBigCacheExport(t *testing.T) { | ||
cache, _ := NewBigCache(DefaultConfig(5 * time.Second)) | ||
hasher := sha256.New() | ||
|
||
firstShard := pb.CacheShard{ | ||
Hashmap: cache.shards[0].hashmap, | ||
Entries: &qpb.BytesQueue{ | ||
Array: cache.shards[0].entries.Array, | ||
Queuecapacity: uint64(cache.shards[0].entries.QueueCapacity), | ||
MaxCapacity: uint64(cache.shards[0].entries.MaxCapacity), | ||
Head: uint64(cache.shards[0].entries.Head), | ||
Tail: uint64(cache.shards[0].entries.Tail), | ||
Count: uint64(cache.shards[0].entries.Count), | ||
RightMargin: uint64(cache.shards[0].entries.RightMargin), | ||
HeaderBuffer: cache.shards[0].entries.HeaderBuffer, | ||
Verbose: cache.shards[0].entries.Verbose, | ||
InitialCapacity: uint64(cache.shards[0].entries.InitialCapacity), | ||
}, | ||
Entrybuffer: cache.shards[0].entryBuffer, | ||
IsVerbose: cache.shards[0].isVerbose, | ||
LifeWindow: cache.shards[0].lifeWindow, | ||
Stats: &pb.Stats{ | ||
Hits: cache.shards[0].stats.Hits, | ||
Misses: cache.shards[0].stats.Misses, | ||
DelHits: cache.shards[0].stats.DelHits, | ||
DelMisses: cache.shards[0].stats.DelMisses, | ||
Collisions: cache.shards[0].stats.Collisions, | ||
}, | ||
} | ||
|
||
shardData, err := firstShard.Marshal() | ||
if err != nil { | ||
t.Error(err) | ||
} | ||
|
||
testData := shardData | ||
testChecksum := fmt.Sprintf("%x", hasher.Sum(shardData)) | ||
hasher.Reset() | ||
|
||
for i := 0; i < 10; i++ { | ||
cache.Set(fmt.Sprintf("%d", i), []byte("value")) | ||
} | ||
|
||
target, err := cache.Export() | ||
if err != nil { | ||
t.Error(err) | ||
} | ||
assert.Equal(t, testChecksum, target.DataChecksum[0]) | ||
assert.Equal(t, testData, target.Data[0]) | ||
} | ||
|
Oops, something went wrong.