-
Notifications
You must be signed in to change notification settings - Fork 0
/
pool.go
143 lines (119 loc) · 2.83 KB
/
pool.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
package routinepool
import (
"context"
"sync"
"sync/atomic"
)
type Pool interface {
// Name returns the corresponding pool name.
Name() string
// SetCap sets the goroutine capacity of the pool.
SetCap(cap int32)
// Go executes f.
Go(f RoutineFunc)
// CtxGo executes f and accepts the context.
CtxGo(ctx context.Context, f RoutineFunc)
// SetPanicHandler sets the panic handler.
SetPanicHandler(f func(context.Context, error))
}
type RoutineFunc func(context.Context)
var taskPool sync.Pool
func init() {
taskPool.New = newTask
}
type task struct {
ctx context.Context
f func(context.Context)
next *task
}
func (t *task) zero() {
t.ctx = nil
t.f = nil
t.next = nil
}
func (t *task) Recycle() {
t.zero()
taskPool.Put(t)
}
func newTask() interface{} {
return &task{}
}
type taskList struct {
sync.Mutex
taskHead *task
taskTail *task
}
type pool struct {
// The name of the pool
name string
// capacity of the pool, the maximum number of goroutines that are actually working
cap int32
// Configuration information
config *Config
// linked list of tasks
taskHead *task
taskTail *task
taskLock sync.Mutex
taskCount int32
// Record the number of running workers
workerCount int32
// This method will be called when the worker panic
panicHandler func(context.Context, error)
}
// NewPool creates a new pool with the given name, cap and config.
func NewPool(name string, cap int32, config *Config) Pool {
p := &pool{
name: name,
cap: cap,
config: config,
}
return p
}
func (p *pool) Name() string {
return p.name
}
func (p *pool) SetCap(cap int32) {
atomic.StoreInt32(&p.cap, cap)
}
func (p *pool) Go(f RoutineFunc) {
p.CtxGo(context.Background(), f)
}
func (p *pool) CtxGo(ctx context.Context, f RoutineFunc) {
t := taskPool.Get().(*task)
t.ctx = ctx
t.f = f
p.taskLock.Lock()
if p.taskHead == nil {
p.taskHead = t
p.taskTail = t
} else {
p.taskTail.next = t
p.taskTail = t
}
p.taskLock.Unlock()
cnt := atomic.AddInt32(&p.taskCount, 1)
_metricQueueSize.Set(float64(cnt), p.name)
// The following two conditions are met:
// 1. the number of tasks is greater than the threshold.
// 2. The current number of workers is less than the upper limit p.cap.
// or there are currently no workers.
if (atomic.LoadInt32(&p.taskCount) >= p.config.ScaleThreshold && p.WorkerCount() < atomic.LoadInt32(&p.cap)) || p.WorkerCount() == 0 {
p.incWorkerCount()
w := workerPool.Get().(*worker)
w.pool = p
w.run()
}
}
// SetPanicHandler the func here will be called after the panic has been recovered.
func (p *pool) SetPanicHandler(f func(context.Context, error)) {
p.panicHandler = f
}
func (p *pool) WorkerCount() int32 {
return atomic.LoadInt32(&p.workerCount)
}
func (p *pool) incWorkerCount() {
atomic.AddInt32(&p.workerCount, 1)
}
func (p *pool) decWorkerCount() {
atomic.AddInt32(&p.workerCount, -1)
}