/
stream_benchmark_definitions.go
106 lines (89 loc) · 2.59 KB
/
stream_benchmark_definitions.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
package integration
import (
"fmt"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/benthosdev/benthos/v4/internal/message"
)
// StreamBenchSend benchmarks the speed at which messages are sent over the
// templated output and then subsequently received from the input with a given
// batch size and parallelism.
func StreamBenchSend(batchSize, parallelism int) StreamBenchDefinition {
return namedBench(
fmt.Sprintf("send message batches %v with parallelism %v", batchSize, parallelism),
func(b *testing.B, env *streamTestEnvironment) {
require.Greater(b, parallelism, 0)
tranChan := make(chan message.Transaction)
input, output := initConnectors(b, tranChan, env)
b.Cleanup(func() {
closeConnectors(b, env, input, output)
})
sends := b.N / batchSize
set := map[string][]string{}
for j := 0; j < sends; j++ {
for i := 0; i < batchSize; i++ {
payload := fmt.Sprintf("hello world %v", j*sends+i)
set[payload] = nil
}
}
b.ResetTimer()
batchChan := make(chan []string)
var wg sync.WaitGroup
for k := 0; k < parallelism; k++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
batch, open := <-batchChan
if !open {
return
}
assert.NoError(b, sendBatch(env.ctx, b, tranChan, batch))
}
}()
}
wg.Add(1)
go func() {
defer wg.Done()
for len(set) > 0 {
messagesInSet(b, true, true, receiveBatch(env.ctx, b, input.TransactionChan(), nil), set)
}
}()
for j := 0; j < sends; j++ {
payloads := []string{}
for i := 0; i < batchSize; i++ {
payload := fmt.Sprintf("hello world %v", j*sends+i)
payloads = append(payloads, payload)
}
batchChan <- payloads
}
close(batchChan)
wg.Wait()
},
)
}
// StreamBenchWrite benchmarks the speed at which messages can be written to the
// output, with no attempt made to consume the written data.
func StreamBenchWrite(batchSize int) StreamBenchDefinition {
return namedBench(
fmt.Sprintf("write message batches %v without reading", batchSize),
func(b *testing.B, env *streamTestEnvironment) {
tranChan := make(chan message.Transaction)
output := initOutput(b, tranChan, env)
b.Cleanup(func() {
closeConnectors(b, env, nil, output)
})
sends := b.N / batchSize
b.ResetTimer()
batch := make([]string, batchSize)
for j := 0; j < sends; j++ {
for i := 0; i < batchSize; i++ {
batch[i] = fmt.Sprintf(`{"content":"hello world","id":%v}`, j*sends+i)
}
assert.NoError(b, sendBatch(env.ctx, b, tranChan, batch))
}
},
)
}