Skip to content
Permalink
Browse files

cache: re-add deleted pkg

  • Loading branch information...
tsileo committed Feb 25, 2019
1 parent 43b839c commit cf3c2d4e46ec2f836032107edd0ea363919c73c7
Showing with 303 additions and 1 deletion.
  1. +1 −1 extras/cross_compile.sh
  2. +160 −0 pkg/cache/cache.go
  3. +142 −0 pkg/cache/cache_test.go
@@ -13,7 +13,7 @@ echo '
exename="blobstash-${os}-${arch}"
[ "$os" != "windows" ] || exename="${exename}.exe"
echo "Building $exename"
env GOOS=$os GOARCH=$arch /usr/local/go/bin/go build -mod=vendor -o "releases/$exename" || {
env GOOS=$os GOARCH=$arch go build -mod=vendor -o "releases/$exename" || {
echo "FAILED FOR $os $arch" >&2
continue
}
@@ -0,0 +1,160 @@
// Package cache implements a disk-backed LRU cache for "big" binary blob
package cache // import "a4.io/blobstash/pkg/cache"

import (
"container/list"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"time"
)

// Cache holds a cache instance, backed by a single file and an in-memory list
type Cache struct {
evict *list.List
items map[string]*list.Element
maxSize int64
currentSize int64
path string
}

type element struct {
key string
size int64
lastAccess int64
}

// New initializes a new LRU cache
func New(dir, name string, maxSize int64) (*Cache, error) {
if !(maxSize > 0) {
return nil, fmt.Errorf("maxSize should be greater than 0")
}
path := filepath.Join(dir, name)

if _, err := os.Stat(path); os.IsNotExist(err) {
if err := os.MkdirAll(path, 0700); err != nil {
return nil, err
}
}
cache := &Cache{
maxSize: maxSize,
evict: list.New(),
items: map[string]*list.Element{},
path: path,
}
files, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
elements := []*element{}

for _, file := range files {
e := &element{
key: file.Name(),
lastAccess: int64(file.ModTime().Unix()),
size: file.Size(),
}

elements = append(elements, e)
}
sort.Slice(elements, func(i, j int) bool { return elements[i].lastAccess < elements[j].lastAccess })
for _, e := range elements {
entry := cache.evict.PushFront(e)
cache.items[e.key] = entry
cache.currentSize += e.size
}

return cache, nil
}

// Close closes the cache
func (c *Cache) Close() error {
return nil
}

// Stat returns true if the key is stored
func (c *Cache) Stat(key string) (bool, error) {
if _, err := os.Stat(filepath.Join(c.path, key)); os.IsNotExist(err) {
return false, nil
}
return true, nil
}

// Get returns the given key if present
func (c *Cache) Get(key string) ([]byte, bool, error) {
if elm, ok := c.items[key]; ok {
c.evict.MoveToFront(elm)
data, err := c.dbGet(key)
if err != nil || data == nil {
return nil, false, err
}
return data, true, nil
}
return nil, false, nil
}

func (c *Cache) dbDelete(key string) error {
return os.Remove(filepath.Join(c.path, key))
}

func (c *Cache) dbGet(key string) ([]byte, error) {
dat, err := ioutil.ReadFile(filepath.Join(c.path, key))
if err != nil {
return nil, err
}

return dat, nil
}

// Add adds/updates the given key/value pair
func (c *Cache) Add(key string, value []byte) error {
lastAccess := time.Now().UnixNano()
// Check for existing item
size := int64(len(value))
if elm, ok := c.items[key]; ok {
c.evict.MoveToFront(elm)
c.currentSize += size - elm.Value.(*element).size
elm.Value.(*element).size = size
elm.Value.(*element).lastAccess = lastAccess
return c.doEviction()
}

// Add new item
elm := &element{key, size, lastAccess}
entry := c.evict.PushFront(elm)
c.items[key] = entry
c.currentSize += size
if err := ioutil.WriteFile(filepath.Join(c.path, key), value, 0600); err != nil {
return err
}

return c.doEviction()
}

func (c *Cache) doEviction() error {
for c.currentSize > c.maxSize {
elm := c.evict.Back()
if elm != nil {
entry := elm.Value.(*element)
if err := c.dbDelete(entry.key); err != nil {
return err
}
c.currentSize -= entry.size
c.evict.Remove(elm)
delete(c.items, entry.key)
}
}
return nil
}

// Len returns the number of items stored
func (c *Cache) Len() int {
return len(c.items)
}

// Size returns the disk usage of the cache file
func (c *Cache) Size() int64 {
return c.currentSize
}
@@ -0,0 +1,142 @@
package cache

import (
"bytes"
"crypto/rand"
"fmt"
"os"
"testing"
"time"
)

func check(e error) {
if e != nil {
panic(e)
}
}

var c = "."

func TestCacheFileStorage(t *testing.T) {
cache, err := New(c, "test.cache", 1000000)
check(err)
defer func() {
os.RemoveAll("test.cache")
}()

t.Logf("cache=%v", cache)

for i := 0; i < 50; i++ {
key := fmt.Sprintf("%d-ok", i)
val := make([]byte, 500000)
if _, err := rand.Reader.Read(val[:]); err != nil {
panic(err)
}

check(cache.Add(key, val))
val2, err := cache.dbGet(key)
check(err)

if !bytes.Equal(val, val2) {
t.Errorf("big val error (%d/%d)", len(val), len(val2))
}
}
}

func TestCacheBasic(t *testing.T) {
cache, err := New(c, "test.cache", 1000000)
check(err)
defer func() {
os.RemoveAll("test.cache")
}()

t.Logf("cache=%v", cache)

val := []byte("value")
cache.Add("key2", val)

val2, ok, err := cache.Get("key2")
check(err)
if !ok {
t.Errorf("key should exist")
}
if !bytes.Equal(val, val2) {
t.Errorf("failed to retrieve data (%s/%s)", val, val2)
}

_, ok, err = cache.Get("key")
check(err)
if ok {
t.Errorf("key \"key\" should not exist")
}
}

func TestCacheLRU(t *testing.T) {
maxSize := int64(1000000)
cache, err := New(c, "test.cache", maxSize)
check(err)
defer func() {
os.RemoveAll("test.cache")
}()

t.Logf("cache=%v", cache)

kvs := map[string][]byte{}
for i := 0; i < 20; i++ {
key := fmt.Sprintf("ok-%d", i)
val := make([]byte, (maxSize/10)-1)
if _, err := rand.Reader.Read(val[:]); err != nil {
panic(err)
}
cache.Add(key, val)
if i > 9 {
kvs[key] = val
}
}

if cache.currentSize > cache.maxSize {
t.Errorf("should not exceed max size")
}

if len(cache.items) != 10 || len(kvs) != 10 {
t.Errorf("should not contain more than 10 items")
}

for i := 0; i < 10; i++ {
_, ok, err := cache.Get(fmt.Sprintf("ok-%d", i))
check(err)
if ok {
t.Errorf("key \"ok-%d\" should have been evicted", i)
}
}

for k, v := range kvs {
start := time.Now()
v2, _, err := cache.Get(k)
t.Logf("cache.Get %s", time.Since(start))
check(err)
if !bytes.Equal(v, v2) {
t.Errorf("key \"%s\" should be present", k)
}
}

size := cache.currentSize

cache.Close()
cache, err = New(c, "test.cache", maxSize)
check(err)

for k, v := range kvs {
start := time.Now()
v2, _, err := cache.Get(k)
t.Logf("cache.Get %s", time.Since(start))
check(err)
if !bytes.Equal(v, v2) {
t.Errorf("key \"%s\" should be present", k)
}
}

if cache.currentSize != size {
t.Errorf("size reloaded should be the same")
}
}

0 comments on commit cf3c2d4

Please sign in to comment.
You can’t perform that action at this time.