@@ -17,7 +17,6 @@ import (
17
17
"time"
18
18
19
19
"github.com/hyperledger/fabric/common/tools/configtxgen/localconfig"
20
- "github.com/hyperledger/fabric/common/tools/configtxgen/provisional"
21
20
"github.com/hyperledger/fabric/orderer/common/localconfig"
22
21
perf "github.com/hyperledger/fabric/orderer/common/performance"
23
22
cb "github.com/hyperledger/fabric/protos/common"
@@ -26,10 +25,10 @@ import (
26
25
27
26
// Usage: BENCHMARK=true go test -run=TestOrdererBenchmark[Solo|Kafka][Broadcast|Deliver]
28
27
//
29
- // Benchmark test makes [ch] channels, creates [bc] clients per client per orderer. There are
30
- // [ord] orderer instances in total. A client ONLY interacts with ONE channel and ONE
31
- // orderer, so the number of client in total is [ch * bc * ord]. Note that all clients are
32
- // concurrent .
28
+ // Benchmark test makes [ch] channels, creates [bc] clients per client per channel per
29
+ // orderer. There are [ord] orderer instances in total. A client ONLY interacts with ONE
30
+ // channel and ONE orderer, so the number of client in total is [ch * bc * ord]. Note that
31
+ // all clients execute concurrently .
33
32
//
34
33
// The test sends [tx] transactions of size [kb] in total. These tx are evenly distributed
35
34
// among all clients, which gives us [tx / (ch * bc * ord)] tx per client.
@@ -56,7 +55,7 @@ import (
56
55
// ordered. This is important for evaluating elapsed time of async broadcast operations.
57
56
//
58
57
// Again, each deliver client only interacts with one channel and one orderer, which
59
- // results in [a * f * e ] deliver clients in total.
58
+ // results in [ch * dc * ord ] deliver clients in total.
60
59
//
61
60
// ch -> channelCounts
62
61
// bc -> broadcastClientPerChannel
@@ -70,8 +69,10 @@ import (
70
69
// as deliver is effectively retrieving pre-generated blocks, so it shouldn't be choked
71
70
// by slower broadcast.
72
71
//
73
- // Note: a Kafka broker listening on localhost:9092 is required to run Kafka based benchmark
74
- // TODO(jay_guo) use ephemeral kafka container for test
72
+ // Note: At least three Kafka brokers listening on localhost:[9092-9094] are required to
73
+ // run the Kafka-based benchmark. This is set in the `envvars` map and can be adjusted
74
+ // if need be.
75
+ // TODO Spin up ephemeral Kafka containers for test
75
76
76
77
const (
77
78
MaxMessageCount = 10
@@ -87,10 +88,11 @@ var envvars = map[string]string{
87
88
"ORDERER_GENERAL_GENESISPROFILE": localconfig.SampleDevModeSoloProfile,
88
89
"ORDERER_GENERAL_LEDGERTYPE": "file",
89
90
"ORDERER_GENERAL_LOGLEVEL": "error",
91
+ "ORDERER_KAFKA_VERBOSE": "false",
90
92
localconfig.Prefix + "_ORDERER_BATCHSIZE_MAXMESSAGECOUNT": strconv.Itoa(MaxMessageCount),
91
93
localconfig.Prefix + "_ORDERER_BATCHSIZE_ABSOLUTEMAXBYTES": strconv.Itoa(AbsoluteMaxBytes) + " KB",
92
94
localconfig.Prefix + "_ORDERER_BATCHSIZE_PREFERREDMAXBYTES": strconv.Itoa(PreferredMaxBytes) + " KB",
93
- localconfig.Prefix + "_ORDERER_KAFKA_BROKERS": "[localhost:9092]",
95
+ localconfig.Prefix + "_ORDERER_KAFKA_BROKERS": "[localhost:9092, localhost:9093, localhost:9094 ]",
94
96
}
95
97
96
98
type factors struct {
@@ -119,16 +121,13 @@ func (f factors) String() string {
119
121
// As benchmark tests are skipped by default, we put this test here to catch
120
122
// potential code changes that might break benchmark tests. If this test fails,
121
123
// it is likely that benchmark tests need to be updated.
122
- func TestOrdererBenchmark(t *testing.T) {
123
- os.Setenv(localconfig.Prefix+"_ORDERER_ORDERERTYPE", provisional.ConsensusTypeSolo)
124
- defer os.Unsetenv(localconfig.Prefix + "_ORDERER_ORDERERTYPE")
125
-
124
+ func TestOrdererBenchmarkSolo(t *testing.T) {
126
125
for key, value := range envvars {
127
126
os.Setenv(key, value)
128
127
defer os.Unsetenv(key)
129
128
}
130
129
131
- t.Run("Benchmark Sample Test", func(t *testing.T) {
130
+ t.Run("Benchmark Sample Test (Solo) ", func(t *testing.T) {
132
131
benchmarkOrderer(t, 1, 5, PreferredMaxBytes, 1, 0, 1, true)
133
132
})
134
133
}
@@ -139,9 +138,6 @@ func TestOrdererBenchmarkSoloBroadcast(t *testing.T) {
139
138
t.Skip("Skipping benchmark test")
140
139
}
141
140
142
- os.Setenv(localconfig.Prefix+"_ORDERER_ORDERERTYPE", provisional.ConsensusTypeSolo)
143
- defer os.Unsetenv(localconfig.Prefix + "_ORDERER_ORDERERTYPE")
144
-
145
141
for key, value := range envvars {
146
142
os.Setenv(key, value)
147
143
defer os.Unsetenv(key)
@@ -187,9 +183,6 @@ func TestOrdererBenchmarkSoloDeliver(t *testing.T) {
187
183
t.Skip("Skipping benchmark test")
188
184
}
189
185
190
- os.Setenv(localconfig.Prefix+"_ORDERER_ORDERERTYPE", provisional.ConsensusTypeSolo)
191
- defer os.Unsetenv(localconfig.Prefix + "_ORDERER_ORDERERTYPE")
192
-
193
186
for key, value := range envvars {
194
187
os.Setenv(key, value)
195
188
defer os.Unsetenv(key)
@@ -235,14 +228,14 @@ func TestOrdererBenchmarkKafkaBroadcast(t *testing.T) {
235
228
t.Skip("Skipping benchmark test")
236
229
}
237
230
238
- os.Setenv(localconfig.Prefix+"_ORDERER_ORDERERTYPE", provisional.ConsensusTypeKafka)
239
- defer os.Unsetenv(localconfig.Prefix + "_ORDERER_ORDERERTYPE")
240
-
241
231
for key, value := range envvars {
242
232
os.Setenv(key, value)
243
233
defer os.Unsetenv(key)
244
234
}
245
235
236
+ os.Setenv("ORDERER_GENERAL_GENESISPROFILE", localconfig.SampleDevModeKafkaProfile)
237
+ defer os.Unsetenv("ORDERER_GENERAL_GENESISPROFILE")
238
+
246
239
var (
247
240
channelCounts = []int{1, 10}
248
241
totalTx = []int{10000}
@@ -283,14 +276,14 @@ func TestOrdererBenchmarkKafkaDeliver(t *testing.T) {
283
276
t.Skip("Skipping benchmark test")
284
277
}
285
278
286
- os.Setenv(localconfig.Prefix+"_ORDERER_ORDERERTYPE", provisional.ConsensusTypeKafka)
287
- defer os.Unsetenv(localconfig.Prefix + "_ORDERER_ORDERERTYPE")
288
-
289
279
for key, value := range envvars {
290
280
os.Setenv(key, value)
291
281
defer os.Unsetenv(key)
292
282
}
293
283
284
+ os.Setenv("ORDERER_GENERAL_GENESISPROFILE", localconfig.SampleDevModeKafkaProfile)
285
+ defer os.Unsetenv("ORDERER_GENERAL_GENESISPROFILE")
286
+
294
287
var (
295
288
channelCounts = []int{1, 10}
296
289
totalTx = []int{10000}
@@ -512,16 +505,16 @@ func benchmarkOrderer(
512
505
// Experiment shows that atomic counter is not bottleneck.
513
506
assert.Equal(t, uint64(totalTx), txCount, "Expected to send %d msg, but actually sent %d", uint64(totalTx), txCount)
514
507
515
- ordererType := os.Getenv(localconfig.Prefix + "_ORDERER_ORDERERTYPE ")
508
+ ordererProfile := os.Getenv("ORDERER_GENERAL_GENESISPROFILE ")
516
509
517
510
fmt.Printf(
518
- "Message : %6d Message Size: %3dKB Channels: %3d Orderer(%s): %2d | "+
511
+ "Messages : %6d Message Size: %3dKB Channels: %3d Orderer (%s): %2d | "+
519
512
"Broadcast Clients: %3d Write tps: %5.1f tx/s Elapsed Time: %0.2fs | "+
520
513
"Deliver clients: %3d Read tps: %8.1f blk/s Elapsed Time: %0.2fs\n",
521
514
totalTx,
522
515
msgSize,
523
516
numOfChannels,
524
- ordererType ,
517
+ ordererProfile ,
525
518
numOfOrderer,
526
519
broadcastClientPerChannel*numOfChannels*numOfOrderer,
527
520
float64(totalTx)/btime.Seconds(),
0 commit comments