-
Notifications
You must be signed in to change notification settings - Fork 402
/
cache.go
127 lines (108 loc) · 2.8 KB
/
cache.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package lrucache
import (
"container/list"
"sync"
"time"
)
// Options controls the details of the expiration policy.
type Options struct {
// Expiration is how long an entry will be valid. It is not
// affected by LRU or anything: after this duration, the object
// is invalidated. A non-positive value means no expiration.
Expiration time.Duration
// Capacity is how many objects to keep in memory.
Capacity int
}
// cacheState contains all of the state for a cached entry.
type cacheState struct {
once sync.Once
when time.Time
order *list.Element
value interface{}
}
// ExpiringLRU caches values for string keys with a time based expiration and
// an LRU based eviciton policy.
type ExpiringLRU struct {
mu sync.Mutex
opts Options
data map[string]*cacheState
order *list.List
}
// New constructs an ExpiringLRU with the given options.
func New(opts Options) *ExpiringLRU {
return &ExpiringLRU{
opts: opts,
data: make(map[string]*cacheState, opts.Capacity),
order: list.New(),
}
}
// Get returns the value for some key if it exists and is valid. If not
// it will call the provided function. Concurrent calls will dedupe as
// best as they are able. If the function returns an error, it is not
// cached and further calls will try again.
func (e *ExpiringLRU) Get(key string, fn func() (interface{}, error)) (
value interface{}, err error) {
if e.opts.Capacity <= 0 {
return fn()
}
for {
e.mu.Lock()
state, ok := e.data[key]
switch {
case !ok:
for len(e.data) >= e.opts.Capacity {
back := e.order.Back()
delete(e.data, back.Value.(string))
e.order.Remove(back)
}
state = &cacheState{
when: time.Now(),
order: e.order.PushFront(key),
}
e.data[key] = state
case e.opts.Expiration > 0 && time.Since(state.when) > e.opts.Expiration:
delete(e.data, key)
e.order.Remove(state.order)
e.mu.Unlock()
continue
default:
e.order.MoveToFront(state.order)
}
e.mu.Unlock()
called := false
state.once.Do(func() {
called = true
value, err = fn()
if err == nil {
// careful because we don't want a `(*T)(nil) != nil` situation
// that's why we only assign to state.value if err == nil.
state.value = value
} else {
// the once has been used. delete it so that any other waiters
// will retry.
e.mu.Lock()
if e.data[key] == state {
delete(e.data, key)
e.order.Remove(state.order)
}
e.mu.Unlock()
}
})
if called || state.value != nil {
return state.value, err
}
}
}
// Delete explicitly removes a key from the cache if it exists.
func (e *ExpiringLRU) Delete(key string) {
e.mu.Lock()
defer e.mu.Unlock()
state, ok := e.data[key]
if !ok {
return
}
delete(e.data, key)
e.order.Remove(state.order)
}