-
-
Notifications
You must be signed in to change notification settings - Fork 0
/
lru.go
160 lines (142 loc) · 4.49 KB
/
lru.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
package eviction
// The Least Recently Used (LRU) eviction algorithm is a page replacement algorithm that discards the least recently used pages first.
// It works by maintaining a queue of pages in memory, with the most recently used page at the front of the queue and the least recently used page at the back.
// When a new page is added to memory and the memory is full, the page at the back of the queue (the least recently used page) is removed to make space for the new page.
// To implement the LRU eviction algorithm, a data structure called a doubly linked list is often used.
// Each page in the list is represented by a node, which contains a pointer to the previous node and a pointer to the next node.
// When a page is accessed, it is moved to the front of the list by updating the pointers of the surrounding nodes.
// This way, the page at the back of the list is always the least recently used page, and can be easily removed when necessary.
// The LRU eviction algorithm is widely used because it performs well in practice, with a low average page fault rate.
// However, it can be somewhat expensive to implement, as it requires updating the data structure every time a page is accessed.
import (
"sync"
"github.com/hyp3rd/hypercache/errors"
)
// lruCacheItem represents an item in the LRU cache
type lruCacheItem struct {
Key string
Value any
prev *lruCacheItem
next *lruCacheItem
}
// LRUCacheItemmPool is a pool of LRUCacheItemm values.
var LRUCacheItemmPool = sync.Pool{
New: func() interface{} {
return &lruCacheItem{}
},
}
// LRU represents a LRU cache
type LRU struct {
capacity int // The maximum number of items in the cache
items map[string]*lruCacheItem // The items in the cache
head *lruCacheItem // The head of the linked list
tail *lruCacheItem // The tail of the linked list
sync.RWMutex // The mutex used to protect the cache
}
// NewLRUAlgorithm creates a new LRU cache with the given capacity
func NewLRUAlgorithm(capacity int) (*LRU, error) {
if capacity < 0 {
return nil, errors.ErrInvalidCapacity
}
return &LRU{
capacity: capacity,
items: make(map[string]*lruCacheItem, capacity),
}, nil
}
// Get retrieves the value for the given key from the cache. If the key is not
func (lru *LRU) Get(key string) (any, bool) {
lru.RLock()
defer lru.RUnlock()
item, ok := lru.items[key]
if !ok {
return nil, false
}
lru.moveToFront(item)
return item.Value, true
}
// Set sets the value for the given key in the cache. If the key is not already in the cache, it is added.
// If the cache is full, the least recently used item is evicted.
func (lru *LRU) Set(key string, value any) {
lru.Lock()
defer lru.Unlock()
item, ok := lru.items[key]
if ok {
item.Value = value
lru.moveToFront(item)
return
}
if len(lru.items) == lru.capacity {
delete(lru.items, lru.tail.Key)
lru.removeFromList(lru.tail)
}
// get a new item from the pool
item = LRUCacheItemmPool.Get().(*lruCacheItem)
item.Key = key
item.Value = value
lru.items[key] = item
lru.addToFront(item)
}
// Evict removes the least recently used item from the cache and returns its key.
func (lru *LRU) Evict() (string, bool) {
lru.Lock()
defer lru.Unlock()
if lru.tail == nil {
return "", false
}
key := lru.tail.Key
LRUCacheItemmPool.Put(lru.tail)
lru.removeFromList(lru.tail)
delete(lru.items, key)
return key, true
}
// Delete removes the given key from the cache.
func (lru *LRU) Delete(key string) {
lru.Lock()
defer lru.Unlock()
item, ok := lru.items[key]
if !ok {
return
}
lru.removeFromList(item)
delete(lru.items, key)
}
// Len returns the number of items in the cache.
func (lru *LRU) Len() int {
lru.RLock()
defer lru.RUnlock()
return len(lru.items)
}
// moveToFront moves the given item to the front of the list.
func (lru *LRU) moveToFront(item *lruCacheItem) {
if item == lru.head {
return
}
lru.removeFromList(item)
lru.addToFront(item)
}
// removeFromList removes the given item from the list.
func (lru *LRU) removeFromList(item *lruCacheItem) {
if item == lru.head {
lru.head = item.next
} else {
item.prev.next = item.next
}
if item == lru.tail {
lru.tail = item.prev
} else {
item.next.prev = item.prev
}
item.prev = nil
item.next = nil
}
// addToFront adds the given item to the front of the list.
func (lru *LRU) addToFront(item *lruCacheItem) {
if lru.head == nil {
lru.head = item
lru.tail = item
return
}
item.next = lru.head
lru.head.prev = item
lru.head = item
}