forked from Workiva/go-datastructures
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cacher.go
133 lines (107 loc) · 3.11 KB
/
cacher.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
/*
Copyright 2014 Workiva, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package btree
import (
"sync"
"time"
"github.com/Workiva/go-datastructures/futures"
)
// cacher provides a convenient construct for retrieving,
// storing, and caching nodes; basically wrapper persister with a caching layer.
// This ensures that we don't have to constantly
// run to the persister to fetch nodes we are using over and over again.
// TODO: this should probably evict items from the cache if the cache gets
// too full.
type cacher struct {
lock sync.Mutex
cache map[string]*futures.Future
persister Persister
}
func (c *cacher) asyncLoadNode(t *Tr, key ID, completer chan interface{}) {
n, err := c.loadNode(t, key)
if err != nil {
completer <- err
return
}
if n == nil {
completer <- ErrNodeNotFound
return
}
completer <- n
}
// clear deletes all items from the cache.
func (c *cacher) clear() {
c.lock.Lock()
defer c.lock.Unlock()
c.cache = make(map[string]*futures.Future, 10)
}
// deleteFromCache will remove the provided ID from the cache. This
// is a threadsafe operation.
func (c *cacher) deleteFromCache(id ID) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.cache, string(id))
}
func (c *cacher) loadNode(t *Tr, key ID) (*Node, error) {
items, err := c.persister.Load(key)
if err != nil {
return nil, err
}
n, err := nodeFromBytes(t, items[0].Payload)
if err != nil {
return nil, err
}
return n, nil
}
// getNode will return a Node matching the provided id. An error is returned
// if the cacher could not go to the persister or the node could not be found.
// All found nodes are cached so subsequent calls should be faster than
// the initial. This blocks until the node is loaded, but is also threadsafe.
func (c *cacher) getNode(t *Tr, key ID, useCache bool) (*Node, error) {
if !useCache {
return c.loadNode(t, key)
}
c.lock.Lock()
future, ok := c.cache[string(key)]
if ok {
c.lock.Unlock()
ifc, err := future.GetResult()
if err != nil {
return nil, err
}
return ifc.(*Node), nil
}
completer := make(chan interface{}, 1)
future = futures.New(completer, 30*time.Second)
c.cache[string(key)] = future
c.lock.Unlock()
go c.asyncLoadNode(t, key, completer)
ifc, err := future.GetResult()
if err != nil {
c.deleteFromCache(key)
return nil, err
}
if err, ok := ifc.(error); ok {
c.deleteFromCache(key)
return nil, err
}
return ifc.(*Node), nil
}
// newCacher is the constructor for a cacher that caches nodes for
// an indefinite period of time.
func newCacher(persister Persister) *cacher {
return &cacher{
persister: persister,
cache: make(map[string]*futures.Future, 10),
}
}