/
limitconcurrency.go
127 lines (120 loc) · 3.45 KB
/
limitconcurrency.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
// Copyright © Hraban Luyat <hraban@0brg.net>
//
// License for use of this code is detailed in the LICENSE file
package lrucache
import (
"errors"
)
// reqGet contains a single request for a key to a worker routine
type reqGet struct {
id string
// If the key is found the value is pushed down this channel after which it
// is closed immediately. If the value is not found, OnMiss is called. If
// that does not work (OnMiss is not defined, or it returns nil) the
// error is set to ErrNotFound. Otherwise the result is set to whatever
// OnMiss returned. One way or another, exactly one value is pushed down
// this channel, after which it is closed.
reply chan<- replyGet
}
// replyGet contains all data to reply to a Get request
type replyGet struct {
val Cacheable
err error
}
// Process operations concurrently except for those with an identical key.
func nocondupesMainloop(f OnMissHandler, opchan chan reqGet) {
// Push result of call to wrapped function down this channel
waiting := map[string]chan replyGet{}
type fullReply struct {
replyGet
id string
}
donechan := make(chan fullReply)
for donechan != nil {
select {
// A new subscriber appears!
case r, ok := <-opchan:
if !ok {
// Stop bothering with incoming operations
opchan = nil
break
}
oldreplychan, inprogress := waiting[r.id]
newreplychan := make(chan replyGet)
waiting[r.id] = newreplychan
if !inprogress {
// Launch a seed
// Explicit argument to deal with Go closure semantics
go func(r reqGet) {
var reply fullReply
reply.id = r.id
reply.val, reply.err = f(r.id)
donechan <- reply
}(r)
}
// Launch a consumer
go func(r reqGet) {
reply := <-newreplychan
// Pass the result to the waiting call to wrapper
r.reply <- reply
close(r.reply)
if oldreplychan != nil {
// Forward the reply to the next listener
oldreplychan <- reply
close(oldreplychan)
}
}(r)
break
case full := <-donechan:
waiting[full.id] <- full.replyGet
delete(waiting, full.id)
if opchan == nil && len(waiting) == 0 {
close(donechan)
donechan = nil
}
break
}
}
return
}
// Concurrent duplicate calls (same arg) are unified into one call. The result
// is returned to all callers by the wrapper. Intended for wrapping OnMiss
// handlers.
//
// The second return value is the quit channel. Send any value down that
// channel to stop the wrapper. Running operations will complete but it is an
// error to invoke this function after that. Not panic, just an error.
func NoConcurrentDupes(f OnMissHandler) (OnMissHandler, chan<- bool) {
errClosed := errors.New("NoConcurrentDupes wrapper has been closed")
opchan := make(chan reqGet)
go nocondupesMainloop(f, opchan)
quit := make(chan bool, 1)
wrap := func(key string) (Cacheable, error) {
if opchan == nil {
return nil, errClosed
}
select {
case <-quit:
close(opchan)
opchan = nil
return nil, errClosed
default:
}
replychan := make(chan replyGet)
opchan <- reqGet{key, replychan}
reply := <-replychan
return reply.val, reply.err
}
return wrap, quit
}
// Wrapper function that limits the number of concurrent calls to f. Intended
// for wrapping OnMiss handlers.
func ThrottleConcurrency(f OnMissHandler, maxconcurrent uint) OnMissHandler {
block := make(chan struct{}, maxconcurrent)
return func(key string) (Cacheable, error) {
block <- struct{}{}
defer func() { <-block }()
res, err := f(key)
return res, err
}
}