/
background.go
104 lines (81 loc) · 2.09 KB
/
background.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
// Package background implements functions to execute tasks in a separate
// goroutine.
package background
import (
"context"
"runtime"
"sync"
"golang.org/x/exp/event"
"golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore"
)
var (
processedTotal = event.NewCounter("processed_total", &event.MetricOptions{
Description: "the number of processed async message",
})
workersCount = event.NewFloatGauge("workers_count", &event.MetricOptions{
Description: "the number of background workers running",
})
)
type Worker[T any] struct {
sem *semaphore.Weighted
wg sync.WaitGroup
handler func(ctx context.Context, v T)
}
type Option[T any] struct {
Handler func(ctx context.Context, v T)
MaxWorkers int
}
// New returns a new background manager.
func New[T any](opt Option[T]) (*Worker[T], func()) {
if opt.MaxWorkers <= 0 {
opt.MaxWorkers = runtime.GOMAXPROCS(0)
}
w := &Worker[T]{
sem: semaphore.NewWeighted(int64(opt.MaxWorkers)),
handler: opt.Handler,
}
return w, w.stop
}
// Exec sends a new message to the channel.
func (w *Worker[T]) Exec(ctx context.Context, v T) {
processedTotal.Record(ctx, 1)
w.exec(ctx, v)
}
func (w *Worker[T]) BatchExec(ctx context.Context, vs ...T) {
processedTotal.Record(ctx, int64(len(vs)))
for _, v := range vs {
w.exec(ctx, v)
}
}
// stop stops the channel and waits for the channel messages to be flushed.
func (w *Worker[T]) stop() {
w.wg.Wait()
}
func (w *Worker[T]) exec(ctx context.Context, v T) {
ctx = context.WithoutCancel(ctx)
if err := w.sem.Acquire(context.Background(), 1); err != nil {
// Execute the handler immediately if we fail to acquire semaphore.
w.handler(ctx, v)
return
}
w.wg.Add(1)
go func() {
defer w.wg.Done()
defer w.sem.Release(1)
workersCount.Record(ctx, 1)
defer workersCount.Record(ctx, -1)
w.handler(ctx, v)
}()
}
func BatchExecN[T any](ctx context.Context, h func(ctx context.Context, v T) error, n int, vs ...T) error {
g, ctx := errgroup.WithContext(ctx)
g.SetLimit(n)
for _, v := range vs {
v := v
g.Go(func() error {
return h(ctx, v)
})
}
return g.Wait()
}