forked from ChristophGraham/go-ext
-
Notifications
You must be signed in to change notification settings - Fork 0
/
parallel.go
89 lines (76 loc) · 1.58 KB
/
parallel.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
package msync
import (
"sync"
"sync/atomic"
)
// Parallel spawns `workers` goroutines and invokes the function in each
// routine from `start` to `end`. It blocks until all functions return.
func Parallel(count, workers int, fn func(i int)) {
ParallelChunks(count, 1, workers, func(start, _ int) { fn(start) })
}
// ParallelRanges divides up the the domain into segments and dispatches
// them to worker functions.
func ParallelRanges(count, workers int, fn func(start, end int)) {
if count == 0 {
return
}
if count < workers {
fn(0, count)
return
}
segment := count / workers
var wg sync.WaitGroup
for i := 0; i < workers; i++ {
a := i * segment
b := (i + 1) * segment
if i == workers-1 {
b = count
}
if b-a == 0 {
continue
}
wg.Add(1)
go func() {
fn(a, b)
wg.Done()
}()
}
wg.Wait()
}
// ParallelChunks divides the domain into chunks of "chunkSize" and feeds it
// to the function, with bounded concurrency of `worker` functions.
func ParallelChunks(count, chunkSize, workers int, fn func(start, end int)) {
if count == 0 {
return
}
if chunkSize == 0 {
panic("Cannot pass a chunkSize of 0 to ParallelChunks")
}
if count < chunkSize {
fn(0, count)
return
}
var (
wg sync.WaitGroup
offset int64
chunk64 = int64(chunkSize)
)
wg.Add(workers)
for i := 0; i < workers; i++ {
go func() {
for {
upper := int(atomic.AddInt64(&offset, chunk64))
lower := upper - chunkSize
if lower >= count {
wg.Done()
return
}
if upper > count {
upper = count
}
fn(lower, upper)
}
}()
}
wg.Wait()
}