This repository has been archived by the owner on Oct 3, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
record.go
154 lines (129 loc) · 3.17 KB
/
record.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
package recache
import (
"compress/gzip"
"crypto/sha1"
"encoding/json"
"io"
"time"
)
// Describes record location in a cache
type recordLocation struct {
frontend uint
key Key
}
// Describes record location across all caches
type intercacheRecordLocation struct {
cache uint
recordLocation
}
// Kept separate from the record to localize locking regions
type recordWithMeta struct {
// Memory used by the record, not counting any contained references or
// storage infrastructure metadata.
memoryUsed int
// Time of most recent use of record
lastUsed time.Time
// Keep pointer to node in LRU list, so we can modify the list without
// itterating it to find this record's node.
node *node
// Records that include this record and should be evicted on this record's
// eviction
includedIn []intercacheRecordLocation
// The record itself. Has a separate lock and can be modified without the
// lock on the cache mutex held.
//
// Record must be a pointer, because it contains mutexes.
rec *record
}
// Data storage unit in the cache. Linked to a single Key on a Frontend.
type record struct {
semaphore semaphore
// Contained data and its SHA1 hash
data componentNode
hash [sha1.Size]byte
eTag string // generated from hash
// Error that occurred during initial data population. This will also be
// returned on any readers that are concurrent with population.
// Might cause error duplication, but better than returning nothing on
// concurrent reads.
populationError error
}
// Linked list node for storing components. This is optimal, as most of the time
// a record will only have one component.
type componentNode struct {
component
next *componentNode
}
func (r *record) WriteTo(w io.Writer) (n int64, err error) {
for c, m := &r.data, int64(0); c != nil; c = c.next {
m, err = c.WriteTo(w)
if err != nil {
return
}
n += m
}
return
}
func (r *record) NewReader() io.Reader {
return &recordReader{
next: r.data.next,
current: r.data.NewReader(),
record: r,
}
}
// Adapter for reading data from record w/o mutating it
type recordReader struct {
current io.Reader
next *componentNode
*record
}
func (r *recordReader) Read(p []byte) (n int, err error) {
if len(p) == 0 {
return
}
if r.current == nil {
if r.next == nil {
return 0, io.EOF
}
r.current = r.next.NewReader()
r.next = r.next.next
}
n, err = r.current.Read(p)
if err == io.EOF {
// Fully consumed current reader
err = nil
r.current = nil
}
return
}
// Adapter, that enables decoding the record as JSON
type recordDecoder struct {
*record
}
func (r recordDecoder) DecodeJSON(dst interface{}) (err error) {
uz := r.Unzip()
defer uz.Close()
return json.NewDecoder(uz).Decode(dst)
}
func (r recordDecoder) Unzip() io.ReadCloser {
var u recordUnzipper
u.Reader, u.error = gzip.NewReader(r.NewReader())
return u
}
func (r recordDecoder) SHA1() [sha1.Size]byte {
return r.record.hash
}
func (r recordDecoder) ETag() string {
return r.eTag
}
// Adapter for smoother error handling
type recordUnzipper struct {
*gzip.Reader
error
}
func (r recordUnzipper) Read(p []byte) (n int, err error) {
if r.error != nil {
return 0, r.error
}
return r.Reader.Read(p)
}