-
Notifications
You must be signed in to change notification settings - Fork 48
/
cache.go
155 lines (132 loc) · 3.57 KB
/
cache.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
package promapi
import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
)
type cacheEntry struct {
data any
expiresAt time.Time
lastGet time.Time
}
type endpointStats struct {
hits int
misses int
}
func (e *endpointStats) hit() { e.hits++ }
func (e *endpointStats) miss() { e.misses++ }
func newQueryCache(maxStale time.Duration) *queryCache {
return &queryCache{
entries: map[uint64]*cacheEntry{},
stats: map[string]*endpointStats{},
maxStale: maxStale,
}
}
type queryCache struct {
entries map[uint64]*cacheEntry
stats map[string]*endpointStats
maxStale time.Duration
evictions int
mu sync.Mutex
}
func (c *queryCache) endpointStats(endpoint string) *endpointStats {
e, ok := c.stats[endpoint]
if ok {
return e
}
e = &endpointStats{}
c.stats[endpoint] = e
return e
}
func (c *queryCache) get(key uint64, endpoint string) (v any, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
var ce *cacheEntry
ce, ok = c.entries[key]
if !ok {
c.endpointStats(endpoint).miss()
return v, ok
}
ce.lastGet = time.Now()
c.endpointStats(endpoint).hit()
return ce.data, true
}
// Cache results if it was requested at least twice EVER - which means it's either
// popular and requested multiple times within a loop OR this cache key survives between loops.
func (c *queryCache) set(key uint64, val any, ttl time.Duration) {
c.mu.Lock()
defer c.mu.Unlock()
c.entries[key] = &cacheEntry{
data: val,
lastGet: time.Now(),
}
if ttl > 0 {
c.entries[key].expiresAt = time.Now().Add(ttl)
}
}
func (c *queryCache) gc() {
c.mu.Lock()
defer c.mu.Unlock()
entries := map[uint64]*cacheEntry{}
now := time.Now()
for key, ce := range c.entries {
if (!ce.expiresAt.IsZero() && ce.expiresAt.Before(now)) || now.Sub(ce.lastGet) >= c.maxStale {
c.evictions++
continue
}
entries[key] = ce
}
c.entries = entries
}
type cacheCollector struct {
cache *queryCache
entries *prometheus.Desc
hits *prometheus.Desc
misses *prometheus.Desc
evictions *prometheus.Desc
}
func newCacheCollector(cache *queryCache, name string) *cacheCollector {
return &cacheCollector{
cache: cache,
entries: prometheus.NewDesc(
"pint_prometheus_cache_size",
"Total number of entries currently stored in Prometheus query cache",
nil,
prometheus.Labels{"name": name},
),
hits: prometheus.NewDesc(
"pint_prometheus_cache_hits_total",
"Total number of query cache hits",
[]string{"endpoint"},
prometheus.Labels{"name": name},
),
misses: prometheus.NewDesc(
"pint_prometheus_cache_miss_total",
"Total number of query cache misses",
[]string{"endpoint"},
prometheus.Labels{"name": name},
),
evictions: prometheus.NewDesc(
"pint_prometheus_cache_evictions_total",
"Total number of times an entry was evicted from query cache due to size limit or TTL",
nil,
prometheus.Labels{"name": name},
),
}
}
func (c *cacheCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.entries
ch <- c.hits
ch <- c.misses
ch <- c.evictions
}
func (c *cacheCollector) Collect(ch chan<- prometheus.Metric) {
c.cache.mu.Lock()
defer c.cache.mu.Unlock()
ch <- prometheus.MustNewConstMetric(c.entries, prometheus.GaugeValue, float64(len(c.cache.entries)))
for endpoint, stats := range c.cache.stats {
ch <- prometheus.MustNewConstMetric(c.hits, prometheus.CounterValue, float64(stats.hits), endpoint)
ch <- prometheus.MustNewConstMetric(c.misses, prometheus.CounterValue, float64(stats.misses), endpoint)
}
ch <- prometheus.MustNewConstMetric(c.evictions, prometheus.CounterValue, float64(c.cache.evictions))
}