/
full_self_cacher.go
286 lines (241 loc) · 8.79 KB
/
full_self_cacher.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
package libkb
import (
"fmt"
"sync"
"time"
keybase1 "github.com/keybase/client/go/protocol/keybase1"
context "golang.org/x/net/context"
)
type FullSelfer interface {
WithSelf(ctx context.Context, f func(u *User) error) error
WithSelfForcePoll(ctx context.Context, f func(u *User) error) error
WithUser(arg LoadUserArg, f func(u *User) error) (err error)
HandleUserChanged(u keybase1.UID) error
Update(ctx context.Context, u *User) error
New() FullSelfer
OnLogin(mctx MetaContext) error
}
type UncachedFullSelf struct {
Contextified
}
var _ FullSelfer = (*UncachedFullSelf)(nil)
func (n *UncachedFullSelf) WithSelf(ctx context.Context, f func(u *User) error) error {
arg := NewLoadUserArg(n.G()).WithPublicKeyOptional().WithSelf(true).WithNetContext(ctx)
return n.WithUser(arg, f)
}
func (n *UncachedFullSelf) WithSelfForcePoll(ctx context.Context, f func(u *User) error) error {
arg := NewLoadUserArg(n.G()).WithPublicKeyOptional().WithSelf(true).WithNetContext(ctx).WithForcePoll(true)
return n.WithUser(arg, f)
}
func (n *UncachedFullSelf) WithUser(arg LoadUserArg, f func(u *User) error) error {
u, err := LoadUser(arg)
if err != nil {
return err
}
return f(u)
}
func (n *UncachedFullSelf) HandleUserChanged(u keybase1.UID) error { return nil }
func (n *UncachedFullSelf) OnLogin(mctx MetaContext) error { return nil }
func (n *UncachedFullSelf) Update(ctx context.Context, u *User) error { return nil }
func (n *UncachedFullSelf) New() FullSelfer { return NewUncachedFullSelf(n.G()) }
func NewUncachedFullSelf(g *GlobalContext) *UncachedFullSelf {
return &UncachedFullSelf{NewContextified(g)}
}
// CachedFullSelf caches a full-on *User for the "me" or "self" user.
// Because it's a full-on *User, it contains many pointers and can't
// reasonably be deep-copied. So we're going to insist that access to the
// cached user is protected inside a lock.
type CachedFullSelf struct {
Contextified
sync.Mutex
me *User
cachedAt time.Time
TestDeadlocker func()
}
var _ FullSelfer = (*CachedFullSelf)(nil)
// NewCachedFullSelf makes a new full self cacher in the given GlobalContext
func NewCachedFullSelf(g *GlobalContext) *CachedFullSelf {
return &CachedFullSelf{
Contextified: NewContextified(g),
}
}
func (m *CachedFullSelf) New() FullSelfer { return NewCachedFullSelf(m.G()) }
func (m *CachedFullSelf) isSelfLoad(arg LoadUserArg) bool {
if arg.self {
return true
}
if arg.name != "" && NewNormalizedUsername(arg.name).Eq(m.me.GetNormalizedName()) {
return true
}
if arg.uid.Exists() && arg.uid.Equal(m.me.GetUID()) {
return true
}
return false
}
// WithSelf loads only the self user, and maybe hits the cache.
// It takes a closure, in which the user object is locked and accessible,
// but we should be sure the user never escapes this closure. If the user
// is fresh-loaded, then it is stored in memory.
func (m *CachedFullSelf) WithSelf(ctx context.Context, f func(u *User) error) error {
arg := NewLoadUserArg(m.G()).WithPublicKeyOptional().WithSelf(true).WithNetContext(ctx)
return m.WithUser(arg, f)
}
// WithSelfForcePoll is like WithSelf but forces a poll. I.e., it will always go to the server for
// a merkle check, regardless of when the existing self was cached.
func (m *CachedFullSelf) WithSelfForcePoll(ctx context.Context, f func(u *User) error) error {
arg := NewLoadUserArg(m.G()).WithPublicKeyOptional().WithSelf(true).WithNetContext(ctx).WithForcePoll(true)
return m.WithUser(arg, f)
}
func (m *CachedFullSelf) maybeClearCache(ctx context.Context, arg *LoadUserArg) {
var err error
m.G().Log.CDebugf(ctx, "CachedFullSelf#maybeClearCache(%+v)", arg)
now := m.G().Clock().Now()
diff := now.Sub(m.cachedAt)
if diff < CachedUserTimeout && !arg.forcePoll {
m.G().Log.CDebugf(ctx, "| was fresh, last loaded %s ago", diff)
return
}
var sigHints *SigHints
var leaf *MerkleUserLeaf
sigHints, leaf, err = lookupSigHintsAndMerkleLeaf(NewMetaContext(ctx, m.G()), arg.uid, true, MerkleOpts{})
if err != nil {
m.me = nil
m.G().Log.CDebugf(ctx, "| CachedFullSelf error querying merkle tree, will nil-out cache: %s", err)
return
}
arg.sigHints = sigHints
arg.merkleLeaf = leaf
var idVersion int64
if idVersion, err = m.me.GetIDVersion(); err != nil {
m.me = nil
m.G().Log.CDebugf(ctx, "| CachedFullSelf: error get id version, will nil-out cache: %s", err)
return
}
if leaf.public != nil && leaf.public.Seqno == m.me.GetSigChainLastKnownSeqno() && leaf.idVersion == idVersion {
m.G().Log.CDebugf(ctx, "| CachedFullSelf still fresh at seqno=%d, idVersion=%d", leaf.public.Seqno, leaf.idVersion)
return
}
m.G().Log.CDebugf(ctx, "| CachedFullSelf was out of date")
m.me = nil
}
// WithUser loads any old user. If it happens to be the self user, then it behaves
// as in WithSelf. Otherwise, it will just load the user, and throw it out when done.
// WithUser supports other so that code doesn't need to change if we're doing the
// operation for the user or someone else.
func (m *CachedFullSelf) WithUser(arg LoadUserArg, f func(u *User) error) (err error) {
ctx := arg.GetNetContext()
ctx = WithLogTag(ctx, "SELF")
arg = arg.WithNetContext(ctx)
m.G().Log.CDebugf(ctx, "+ CachedFullSelf#WithUser(%+v)", arg)
m.Lock()
defer func() {
m.G().Log.CDebugf(ctx, "- CachedFullSelf#WithUser")
m.Unlock()
}()
var u *User
if m.me != nil && m.isSelfLoad(arg) {
// This UID might be nil. Or it could be wrong, so just overwrite it with the current
// self that we have loaded into the full self cacher.
arg.uid = m.me.GetUID()
m.maybeClearCache(ctx, &arg)
}
if m.me == nil || !m.isSelfLoad(arg) {
if m.TestDeadlocker != nil {
m.TestDeadlocker()
}
u, err = LoadUser(arg)
if err != nil {
return err
}
// WARNING! You can't call m.G().GetMyUID() if this function is called from
// within the Account/LoginState inner loop. Because m.G().GetMyUID() calls
// back into Account, it will deadlock.
if arg.self || u.GetUID().Equal(m.G().GetMyUID()) {
m.G().Log.CDebugf(ctx, "| CachedFullSelf#WithUser: cache populate")
m.cacheMe(u)
if ldr := m.G().GetUPAKLoader(); ldr != nil {
if err := ldr.PutUserToCache(ctx, u); err != nil {
m.G().Log.CDebugf(ctx, "| CachedFullSelf#WithUser: continuing past error putting user to cache: %s", err)
}
}
} else {
m.G().Log.CDebugf(ctx, "| CachedFullSelf#WithUser: other user")
}
} else {
m.G().Log.CDebugf(ctx, "| CachedFullSelf#WithUser: cache hit")
u = m.me
}
return f(u)
}
func (m *CachedFullSelf) cacheMe(u *User) {
m.me = u
m.cachedAt = m.G().Clock().Now()
}
// Update updates the CachedFullSelf with a User loaded from someplace else -- let's
// say the UPAK loader. We throw away objects for other users or that aren't newer than
// the one we have.
//
// CALLER BEWARE! You must only provide this function with a user you know to be "self".
// This function will not do any checking along those lines (see comment below).
func (m *CachedFullSelf) Update(ctx context.Context, u *User) (err error) {
// NOTE(max) 20171101: We used to do this:
//
// if !u.GetUID().Equal(m.G().GetMyUID()) {
// return
// }
//
// BUT IT IS DEADLY! The problem is that m.G().GetMyUID() calls into LoginState, but often
// we're being called from a LoginState context, so we get a circular locking situation.
// So the onus is on the caller to check that we're actually loading self.
defer m.G().CTrace(ctx, fmt.Sprintf("CachedFullSelf#Update(%s)", u.GetUID()), &err)()
m.Lock()
defer m.Unlock()
if m.me == nil {
m.G().Log.CDebugf(ctx, "Updating user, since our copy was null")
m.cacheMe(u)
} else {
var newer bool
newer, err = u.IsNewerThan(m.me)
if err != nil {
return err
}
if newer {
m.G().Log.CDebugf(ctx, "Updating user, since we got a newer copy")
m.cacheMe(u)
} else {
m.G().Log.CDebugf(ctx, "CachedFullSelf#Update called with older user")
}
}
return nil
}
// HandleUserChanged clears the cached self user if it's the UID of the self user.
func (m *CachedFullSelf) HandleUserChanged(u keybase1.UID) error {
m.Lock()
defer m.Unlock()
if m.me != nil && m.me.GetUID().Equal(u) {
m.G().Log.Debug("| CachedFullSelf#HandleUserChanged: Invalidating me for UID=%s", u)
m.me = nil
} else {
m.G().Log.Debug("| CachedFullSelf#HandleUserChanged: Ignoring cache bust for UID=%s", u)
}
return nil
}
// OnLogin clears the cached self user if it differs from what's already cached.
func (m *CachedFullSelf) OnLogin(mctx MetaContext) error {
m.Lock()
defer m.Unlock()
if m.me != nil && !m.me.GetUID().Equal(m.G().GetMyUID()) {
m.me = nil
}
return nil
}
func LoadSelfForTeamSignatures(ctx context.Context, g *GlobalContext) (ret UserForSignatures, err error) {
err = g.GetFullSelfer().WithSelf(ctx, func(u *User) (err error) {
if u == nil {
return LoginRequiredError{"no self in FullSelfCacher"}
}
ret, err = u.ToUserForSignatures()
return err
})
return ret, err
}