Skip to content

Commit

Permalink
Merge pull request #18 from lxzan/dev
Browse files Browse the repository at this point in the history
v1.2.0
  • Loading branch information
lxzan committed Dec 12, 2023
2 parents d585e85 + ad3b402 commit 16f20c6
Show file tree
Hide file tree
Showing 8 changed files with 164 additions and 119 deletions.
30 changes: 16 additions & 14 deletions benchmark/benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (

const (
sharding = 128
capacity = 10000
benchcount = 1 << 20
)

Expand All @@ -20,8 +21,9 @@ var (

options = []memorycache.Option{
memorycache.WithBucketNum(sharding),
memorycache.WithBucketSize(benchcount/sharding/10, benchcount/sharding),
memorycache.WithSwissTable(),
memorycache.WithBucketSize(capacity/10, capacity),
memorycache.WithSwissTable(true),
memorycache.WithLRU(true),
}
)

Expand Down Expand Up @@ -87,9 +89,9 @@ func BenchmarkMemoryCache_SetAndGet(b *testing.B) {

func BenchmarkRistretto_Set(b *testing.B) {
var mc, _ = ristretto.NewCache(&ristretto.Config{
NumCounters: benchcount * 10, // number of keys to track frequency of (10M).
MaxCost: 1 << 30, // maximum cost of cache (1GB).
BufferItems: 64, // number of keys per Get buffer.
NumCounters: capacity * sharding * 10, // number of keys to track frequency of (10M).
MaxCost: 1 << 30, // maximum cost of cache (1GB).
BufferItems: 64, // number of keys per Get buffer.
})
b.RunParallel(func(pb *testing.PB) {
var i = 0
Expand All @@ -103,9 +105,9 @@ func BenchmarkRistretto_Set(b *testing.B) {

func BenchmarkRistretto_Get(b *testing.B) {
var mc, _ = ristretto.NewCache(&ristretto.Config{
NumCounters: benchcount * 10, // number of keys to track frequency of (10M).
MaxCost: 1 << 30, // maximum cost of cache (1GB).
BufferItems: 64, // number of keys per Get buffer.
NumCounters: capacity * sharding * 10, // number of keys to track frequency of (10M).
MaxCost: 1 << 30, // maximum cost of cache (1GB).
BufferItems: 64, // number of keys per Get buffer.
})
for i := 0; i < benchcount; i++ {
mc.SetWithTTL(benchkeys[i%benchcount], 1, 1, time.Hour)
Expand All @@ -124,9 +126,9 @@ func BenchmarkRistretto_Get(b *testing.B) {

func BenchmarkRistretto_SetAndGet(b *testing.B) {
var mc, _ = ristretto.NewCache(&ristretto.Config{
NumCounters: benchcount * 10, // number of keys to track frequency of (10M).
MaxCost: 1 << 30, // maximum cost of cache (1GB).
BufferItems: 64, // number of keys per Get buffer.
NumCounters: capacity * sharding * 10, // number of keys to track frequency of (10M).
MaxCost: 1 << 30, // maximum cost of cache (1GB).
BufferItems: 64, // number of keys per Get buffer.
})
for i := 0; i < benchcount; i++ {
mc.SetWithTTL(benchkeys[i%benchcount], 1, 1, time.Hour)
Expand All @@ -148,7 +150,7 @@ func BenchmarkRistretto_SetAndGet(b *testing.B) {
}

func BenchmarkTheine_Set(b *testing.B) {
mc, _ := theine.NewBuilder[string, int](benchcount).Build()
mc, _ := theine.NewBuilder[string, int](sharding * capacity).Build()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
Expand All @@ -160,7 +162,7 @@ func BenchmarkTheine_Set(b *testing.B) {
}

func BenchmarkTheine_Get(b *testing.B) {
mc, _ := theine.NewBuilder[string, int](benchcount).Build()
mc, _ := theine.NewBuilder[string, int](sharding * capacity).Build()
for i := 0; i < benchcount; i++ {
mc.SetWithTTL(benchkeys[i%benchcount], 1, 1, time.Hour)
}
Expand All @@ -177,7 +179,7 @@ func BenchmarkTheine_Get(b *testing.B) {
}

func BenchmarkTheine_SetAndGet(b *testing.B) {
mc, _ := theine.NewBuilder[string, int](benchcount).Build()
mc, _ := theine.NewBuilder[string, int](sharding * capacity).Build()
for i := 0; i < benchcount; i++ {
mc.SetWithTTL(benchkeys[i%benchcount], 1, 1, time.Hour)
}
Expand Down
61 changes: 30 additions & 31 deletions cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ type MemoryCache[K comparable, V any] struct {
// New 创建缓存数据库实例
// Creating a Cached Database Instance
func New[K comparable, V any](options ...Option) *MemoryCache[K, V] {
var conf = &config{TimeCacheEnabled: true}
var conf = &config{CachedTime: true, LRU: true}
options = append(options, withInitialize())
for _, fn := range options {
fn(conf)
Expand All @@ -46,10 +46,10 @@ func New[K comparable, V any](options ...Option) *MemoryCache[K, V] {

for i, _ := range mc.storage {
mc.storage[i] = &bucket[K, V]{
MaxCapacity: conf.MaxCapacity,
Map: containers.NewMap[K, *Element[K, V]](conf.InitialSize, conf.SwissTable),
Heap: newHeap[K, V](conf.InitialSize),
List: new(queue[K, V]),
conf: conf,
Map: containers.NewMap[K, *Element[K, V]](conf.BucketSize, conf.SwissTable),
Heap: newHeap[K, V](conf.BucketSize),
List: newQueue[K, V](conf.LRU),
}
}

Expand All @@ -66,11 +66,11 @@ func New[K comparable, V any](options ...Option) *MemoryCache[K, V] {
case now := <-ticker.C:
var sum = 0
for _, b := range mc.storage {
sum += b.ExpireCheck(now.UnixMilli(), conf.MaxKeysDeleted)
sum += b.Check(now.UnixMilli(), conf.DeleteLimits)
}

// 删除数量超过阈值, 缩小时间间隔
if d1 := utils.SelectValue(sum > conf.BucketNum*conf.MaxKeysDeleted*7/10, conf.MinInterval, conf.MaxInterval); d1 != d0 {
if d1 := utils.SelectValue(sum > conf.BucketNum*conf.DeleteLimits*7/10, conf.MinInterval, conf.MaxInterval); d1 != d0 {
d0 = d1
ticker.Reset(d0)
}
Expand Down Expand Up @@ -102,9 +102,9 @@ func New[K comparable, V any](options ...Option) *MemoryCache[K, V] {
func (c *MemoryCache[K, V]) Clear() {
for _, b := range c.storage {
b.Lock()
b.Heap = newHeap[K, V](c.conf.InitialSize)
b.Map = containers.NewMap[K, *Element[K, V]](c.conf.InitialSize, c.conf.SwissTable)
b.List = new(queue[K, V])
b.Heap = newHeap[K, V](c.conf.BucketSize)
b.Map = containers.NewMap[K, *Element[K, V]](c.conf.BucketSize, c.conf.SwissTable)
b.List = newQueue[K, V](c.conf.LRU)
b.Unlock()
}
}
Expand All @@ -123,7 +123,7 @@ func (c *MemoryCache[K, V]) getBucket(key K) *bucket[K, V] {
}

func (c *MemoryCache[K, V]) getTimestamp() int64 {
if c.conf.TimeCacheEnabled {
if c.conf.CachedTime {
return c.timestamp.Load()
}
return time.Now().UnixMilli()
Expand Down Expand Up @@ -168,15 +168,17 @@ func (c *MemoryCache[K, V]) SetWithCallback(key K, value V, exp time.Duration, c
var expireAt = c.getExp(exp)
ele, ok := c.fetch(b, key)
if ok {
b.UpdateAll(ele, value, expireAt, cb)
ele.Value, ele.cb = value, cb
b.UpdateTTL(ele, expireAt)
return true
}

b.Insert(key, value, expireAt, cb)
return false
}

// Get
// Get 查询缓存
// query cache
func (c *MemoryCache[K, V]) Get(key K) (v V, exist bool) {
var b = c.getBucket(key)
b.Lock()
Expand Down Expand Up @@ -229,7 +231,8 @@ func (c *MemoryCache[K, V]) GetOrCreateWithCallback(key K, value V, exp time.Dur
return value, false
}

// Delete
// Delete 删除缓存
// delete cache
func (c *MemoryCache[K, V]) Delete(key K) (deleted bool) {
var b = c.getBucket(key)
b.Lock()
Expand All @@ -244,7 +247,8 @@ func (c *MemoryCache[K, V]) Delete(key K) (deleted bool) {
return true
}

// Range
// Range 遍历缓存. 注意: 不要在回调函数里面操作 MemoryCache[K, V] 实例, 可能会造成死锁.
// Traverse the cache. Note: Do not manipulate MemoryCache[K, V] instances inside callback functions, as this may cause deadlocks.
func (c *MemoryCache[K, V]) Range(f func(K, V) bool) {
var now = time.Now().UnixMilli()
for _, b := range c.storage {
Expand All @@ -262,8 +266,8 @@ func (c *MemoryCache[K, V]) Range(f func(K, V) bool) {
}
}

// Len 获取当前元素数量
// Get the number of Elements
// Len 快速获取当前缓存元素数量, 不做过期检查.
// Quickly gets the current number of cached elements, without checking for expiration.
func (c *MemoryCache[K, V]) Len() int {
var num = 0
for _, b := range c.storage {
Expand All @@ -276,14 +280,14 @@ func (c *MemoryCache[K, V]) Len() int {

type bucket[K comparable, V any] struct {
sync.Mutex
MaxCapacity int
Map containers.Map[K, *Element[K, V]]
Heap *heap[K, V]
List *queue[K, V]
conf *config
Map containers.Map[K, *Element[K, V]]
Heap *heap[K, V]
List *queue[K, V]
}

// ExpireCheck 过期时间检查
func (c *bucket[K, V]) ExpireCheck(now int64, num int) int {
// Check 过期时间检查
func (c *bucket[K, V]) Check(now int64, num int) int {
c.Lock()
defer c.Unlock()

Expand All @@ -302,20 +306,15 @@ func (c *bucket[K, V]) Delete(ele *Element[K, V], reason Reason) {
ele.cb(ele, reason)
}

func (c *bucket[K, V]) UpdateAll(ele *Element[K, V], value V, expireAt int64, cb CallbackFunc[*Element[K, V]]) {
ele.Value = value
ele.cb = cb
c.UpdateTTL(ele, expireAt)
}

func (c *bucket[K, V]) UpdateTTL(ele *Element[K, V], expireAt int64) {
c.Heap.UpdateTTL(ele, expireAt)
c.List.MoveToBack(ele)
}

func (c *bucket[K, V]) Insert(key K, value V, expireAt int64, cb CallbackFunc[*Element[K, V]]) {
if c.List.Len() >= c.MaxCapacity {
c.Delete(c.List.Front(), ReasonEvicted)
if c.Heap.Len() >= c.conf.BucketCap {
head := utils.SelectValue(c.conf.LRU, c.List.Front(), c.Heap.Front())
c.Delete(head, ReasonEvicted)
}

var ele = &Element[K, V]{Key: key, Value: value, ExpireAt: expireAt, cb: cb}
Expand Down
22 changes: 14 additions & 8 deletions cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func TestMemoryCache(t *testing.T) {
var db = New[string, any](
WithInterval(10*time.Millisecond, 10*time.Millisecond),
WithBucketNum(1),
WithTimeCache(false),
WithCachedTime(false),
)
db.Set("a", 1, 100*time.Millisecond)
db.Set("b", 1, 300*time.Millisecond)
Expand All @@ -45,7 +45,7 @@ func TestMemoryCache(t *testing.T) {
t.Run("", func(t *testing.T) {
var db = New[string, any](
WithInterval(10*time.Millisecond, 10*time.Millisecond),
WithTimeCache(false),
WithCachedTime(false),
)
db.Set("a", 1, 100*time.Millisecond)
db.Set("b", 1, 200*time.Millisecond)
Expand All @@ -62,7 +62,7 @@ func TestMemoryCache(t *testing.T) {
t.Run("", func(t *testing.T) {
var db = New[string, any](
WithInterval(10*time.Millisecond, 10*time.Millisecond),
WithTimeCache(false),
WithCachedTime(false),
)
db.Set("a", 1, 100*time.Millisecond)
db.Set("b", 1, 200*time.Millisecond)
Expand All @@ -80,7 +80,7 @@ func TestMemoryCache(t *testing.T) {
var mc = New[string, any](
WithInterval(10*time.Millisecond, 10*time.Millisecond),
WithBucketNum(1),
WithTimeCache(false),
WithCachedTime(false),
)
var m1 = make(map[string]int)
var m2 = make(map[string]int64)
Expand Down Expand Up @@ -121,9 +121,9 @@ func TestMemoryCache(t *testing.T) {
t.Run("expire", func(t *testing.T) {
var mc = New[string, any](
WithBucketNum(1),
WithMaxKeysDeleted(3),
WithDeleteLimits(3),
WithInterval(50*time.Millisecond, 100*time.Millisecond),
WithTimeCache(false),
WithCachedTime(false),
)
mc.Set("a", 1, 150*time.Millisecond)
mc.Set("b", 1, 150*time.Millisecond)
Expand Down Expand Up @@ -161,6 +161,7 @@ func TestMemoryCache_Set(t *testing.T) {
var mc = New[string, any](
WithBucketNum(1),
WithBucketSize(0, 2),
WithLRU(true),
)
mc.Set("ming", 1, 3*time.Hour)
mc.Set("hong", 1, 1*time.Hour)
Expand Down Expand Up @@ -578,7 +579,10 @@ func TestMemoryCache_Range(t *testing.T) {

func TestMemoryCache_LRU(t *testing.T) {
const count = 10000
var mc = New[string, int](WithBucketNum(1))
var mc = New[string, int](
WithBucketNum(1),
WithLRU(true),
)
var indexes []int
for i := 0; i < count; i++ {
indexes = append(indexes, i)
Expand All @@ -605,7 +609,9 @@ func TestMemoryCache_LRU(t *testing.T) {
}

func TestMemoryCache_Random(t *testing.T) {
var mc = New[string, int]()
var mc = New[string, int](
WithLRU(true),
)
const count = 10000
for i := 0; i < count; i++ {
var key = string(utils.AlphabetNumeric.Generate(3))
Expand Down
Loading

0 comments on commit 16f20c6

Please sign in to comment.