@@ -64,7 +64,7 @@ func TestBlockbuilder_lookbackOnNoCommit(t *testing.T) {
64
64
})
65
65
66
66
client := newKafkaClient (t , cfg .IngestStorageConfig .Kafka )
67
- producedRecords := sendReq (t , ctx , client , util .FakeTenantID )
67
+ producedRecords := sendReq (ctx , t , client , util .FakeTenantID )
68
68
69
69
// Wait for record to be consumed and committed.
70
70
require .Eventually (t , func () bool {
@@ -246,7 +246,7 @@ func TestBlockbuilder_receivesOldRecords(t *testing.T) {
246
246
})
247
247
248
248
client := newKafkaClient (t , cfg .IngestStorageConfig .Kafka )
249
- producedRecords := sendReq (t , ctx , client , util .FakeTenantID )
249
+ producedRecords := sendReq (ctx , t , client , util .FakeTenantID )
250
250
251
251
// Wait for record to be consumed and committed.
252
252
require .Eventually (t , func () bool {
@@ -385,7 +385,7 @@ func TestBlockbuilder_retries_on_retriable_commit_error(t *testing.T) {
385
385
logger := test .NewTestingLogger (t )
386
386
387
387
client := newKafkaClient (t , cfg .IngestStorageConfig .Kafka )
388
- producedRecords := sendReq (t , ctx , client , util .FakeTenantID )
388
+ producedRecords := sendReq (ctx , t , client , util .FakeTenantID )
389
389
lastRecordOffset := producedRecords [len (producedRecords )- 1 ].Offset
390
390
391
391
b , err := New (cfg , logger , newPartitionRingReader (), & mockOverrides {}, store )
@@ -443,7 +443,7 @@ func TestBlockbuilder_retries_on_commit_error(t *testing.T) {
443
443
logger := test .NewTestingLogger (t )
444
444
445
445
client := newKafkaClient (t , cfg .IngestStorageConfig .Kafka )
446
- producedRecords := sendReq (t , ctx , client , util .FakeTenantID )
446
+ producedRecords := sendReq (ctx , t , client , util .FakeTenantID )
447
447
lastRecordOffset := producedRecords [len (producedRecords )- 1 ].Offset
448
448
449
449
b , err := New (cfg , logger , newPartitionRingReader (), & mockOverrides {}, store )
@@ -490,7 +490,7 @@ func TestBlockbuilder_noDoubleConsumption(t *testing.T) {
490
490
client := newKafkaClient (t , cfg .IngestStorageConfig .Kafka )
491
491
492
492
// Send a single record
493
- producedRecords := sendReq (t , ctx , client , util .FakeTenantID )
493
+ producedRecords := sendReq (ctx , t , client , util .FakeTenantID )
494
494
lastRecordOffset := producedRecords [len (producedRecords )- 1 ].Offset
495
495
496
496
// Create the block builder
@@ -510,7 +510,7 @@ func TestBlockbuilder_noDoubleConsumption(t *testing.T) {
510
510
requireLastCommitEquals (t , ctx , client , lastRecordOffset + 1 )
511
511
512
512
// Send another record
513
- newRecords := sendReq (t , ctx , client , util .FakeTenantID )
513
+ newRecords := sendReq (ctx , t , client , util .FakeTenantID )
514
514
newRecordOffset := newRecords [len (newRecords )- 1 ].Offset
515
515
516
516
// Wait for the new record to be consumed and committed
@@ -539,7 +539,7 @@ func TestBlockBuilder_honor_maxBytesPerCycle(t *testing.T) {
539
539
{
540
540
name : "Limited to 1 bytes per cycle" ,
541
541
maxBytesPerCycle : 1 ,
542
- expectedCommits : 1 ,
542
+ expectedCommits : 2 ,
543
543
expectedWrites : 2 ,
544
544
},
545
545
{
@@ -587,8 +587,8 @@ func TestBlockBuilder_honor_maxBytesPerCycle(t *testing.T) {
587
587
588
588
client := newKafkaClient (t , cfg .IngestStorageConfig .Kafka )
589
589
// We send two records with a size less than 30KB
590
- sendReq (t , ctx , client , util .FakeTenantID )
591
- producedRecords := sendReq (t , ctx , client , util .FakeTenantID )
590
+ sendReq (ctx , t , client , util .FakeTenantID )
591
+ producedRecords := sendReq (ctx , t , client , util .FakeTenantID )
592
592
593
593
require .Eventually (t , func () bool {
594
594
return kafkaCommits .Load () == tc .expectedCommits
@@ -731,8 +731,8 @@ func TestBlockbuilder_marksOldBlocksCompacted(t *testing.T) {
731
731
badTenantID = "2"
732
732
producedRecords []* kgo.Record
733
733
)
734
- producedRecords = append (producedRecords , sendReq (t , ctx , client , goodTenantID )... )
735
- producedRecords = append (producedRecords , sendReq (t , ctx , client , badTenantID )... )
734
+ producedRecords = append (producedRecords , sendReq (ctx , t , client , goodTenantID )... )
735
+ producedRecords = append (producedRecords , sendReq (ctx , t , client , badTenantID )... )
736
736
lastRecordOffset := producedRecords [len (producedRecords )- 1 ].Offset
737
737
738
738
// Simulate failures on the first cycle
@@ -824,9 +824,13 @@ func TestBlockbuilder_gracefulShutdown(t *testing.T) {
824
824
store := newStore (ctx , t )
825
825
cfg := blockbuilderConfig (t , address , []int32 {0 }) // Fix: Properly specify partition
826
826
827
+ // Send initial traces to ensure the partition has records
828
+ client := newKafkaClient (t , cfg .IngestStorageConfig .Kafka )
829
+ sendReq (ctx , t , client , util .FakeTenantID )
830
+
827
831
// Start sending traces in the background
828
832
go func () {
829
- sendTracesFor (t , ctx , newKafkaClient ( t , cfg . IngestStorageConfig . Kafka ) , 60 * time .Second , time .Second )
833
+ sendTracesFor (t , ctx , client , 60 * time .Second , time .Second )
830
834
}()
831
835
832
836
b , err := New (cfg , test .NewTestingLogger (t ), newPartitionRingReader (), & mockOverrides {}, store )
@@ -1004,15 +1008,30 @@ func countFlushedTraces(store storage.Store) int {
1004
1008
return count
1005
1009
}
1006
1010
1011
+ type reqOpts struct {
1012
+ partition int32
1013
+ time time.Time
1014
+ tenantID string
1015
+ }
1016
+
1017
+ func (r * reqOpts ) applyDefaults () {
1018
+ if r .tenantID == "" {
1019
+ r .tenantID = util .FakeTenantID
1020
+ }
1021
+ if r .time .IsZero () {
1022
+ r .time = time .Now ()
1023
+ }
1024
+ }
1025
+
1007
1026
// nolint: revive
1008
- func sendReq ( t testing. TB , ctx context. Context , client * kgo.Client , tenantID string ) []* kgo.Record {
1027
+ func sendReqWithOpts ( ctx context. Context , t testing. TB , client * kgo.Client , opts reqOpts ) []* kgo.Record {
1009
1028
traceID := generateTraceID (t )
1029
+ opts .applyDefaults ()
1010
1030
1011
- now := time .Now ()
1012
- startTime := uint64 (now .UnixNano ())
1013
- endTime := uint64 (now .Add (time .Second ).UnixNano ())
1031
+ startTime := uint64 (opts .time .UnixNano ())
1032
+ endTime := uint64 (opts .time .Add (time .Second ).UnixNano ())
1014
1033
req := test .MakePushBytesRequest (t , 10 , traceID , startTime , endTime )
1015
- records , err := ingest .Encode (0 , tenantID , req , 1_000_000 )
1034
+ records , err := ingest .Encode (opts . partition , opts . tenantID , req , 1_000_000 )
1016
1035
require .NoError (t , err )
1017
1036
1018
1037
res := client .ProduceSync (ctx , records ... )
@@ -1021,6 +1040,10 @@ func sendReq(t testing.TB, ctx context.Context, client *kgo.Client, tenantID str
1021
1040
return records
1022
1041
}
1023
1042
1043
+ func sendReq (ctx context.Context , t testing.TB , client * kgo.Client , tenantID string ) []* kgo.Record {
1044
+ return sendReqWithOpts (ctx , t , client , reqOpts {partition : 0 , time : time .Now (), tenantID : tenantID })
1045
+ }
1046
+
1024
1047
// nolint: revive,unparam
1025
1048
func sendTracesFor (t * testing.T , ctx context.Context , client * kgo.Client , dur , interval time.Duration ) []* kgo.Record {
1026
1049
ticker := time .NewTicker (interval )
@@ -1038,7 +1061,7 @@ func sendTracesFor(t *testing.T, ctx context.Context, client *kgo.Client, dur, i
1038
1061
case <- timer .C : // Exit the function when the timer is done
1039
1062
return producedRecords
1040
1063
case <- ticker .C :
1041
- records := sendReq (t , ctx , client , util .FakeTenantID )
1064
+ records := sendReq (ctx , t , client , util .FakeTenantID )
1042
1065
producedRecords = append (producedRecords , records ... )
1043
1066
}
1044
1067
}
@@ -1112,7 +1135,7 @@ func BenchmarkBlockBuilder(b *testing.B) {
1112
1135
b .StopTimer ()
1113
1136
size := 0
1114
1137
for i := 0 ; i < 1000 ; i ++ {
1115
- for _ , r := range sendReq (b , ctx , client , util .FakeTenantID ) {
1138
+ for _ , r := range sendReq (ctx , b , client , util .FakeTenantID ) {
1116
1139
size += len (r .Value )
1117
1140
}
1118
1141
}
@@ -1124,3 +1147,100 @@ func BenchmarkBlockBuilder(b *testing.B) {
1124
1147
b .SetBytes (int64 (size ))
1125
1148
}
1126
1149
}
1150
+
1151
+ type slowStore struct {
1152
+ storage.Store
1153
+ wait chan struct {}
1154
+ }
1155
+
1156
+ func (s * slowStore ) WriteBlock (ctx context.Context , block tempodb.WriteableBlock ) error {
1157
+ s .wait <- struct {}{} // send a signal to a goroutine
1158
+ <- s .wait // wait for the signal from the goroutine
1159
+ return s .Store .WriteBlock (ctx , block )
1160
+ }
1161
+
1162
+ // lock waits for the signal from WriteBlock locking the operation
1163
+ func (s * slowStore ) lock () {
1164
+ <- s .wait
1165
+ }
1166
+
1167
+ // unlock sends a signal to WriteBlock unlocking the operation
1168
+ func (s * slowStore ) unlock () {
1169
+ s .wait <- struct {}{}
1170
+ }
1171
+
1172
+ // TestBlockbuilder_twoPartitions_secondEmpty verifies correct handling of two Kafka
1173
+ // partitions where the second partition is initially empty and receives data later.
1174
+ // It uses a channel-gated store to step consumption, injects records between consume
1175
+ // cycles (sleeping longer than ConsumeCycleDuration), and asserts that:
1176
+ // - both partitions are assigned,
1177
+ // - three blocks are flushed (p0 initial, p0 later, p1 later), and
1178
+ // - committed offsets equal number of sent records.
1179
+ // The test highly coupled with the blockbuilder implementation. In case it stuck
1180
+ // due to channel, it is advised to debug consume cycle step by step.
1181
+ func TestBlockbuilder_twoPartitions_secondEmpty (t * testing.T ) {
1182
+ ctx , cancel := context .WithCancelCause (context .Background ())
1183
+ t .Cleanup (func () { cancel (errors .New ("test done" )) })
1184
+ reqTime := time .Now ().Add (- 1 * time .Minute ) // to be sure it won't be filtered out by cycle duration check
1185
+
1186
+ // Create a Kafka cluster with 2 partitions
1187
+ _ , address := testkafka .CreateCluster (t , 2 , testTopic )
1188
+
1189
+ // Setup block-builder
1190
+ ch := make (chan struct {})
1191
+ store := & slowStore {Store : newStore (ctx , t ), wait : ch }
1192
+ cfg := blockbuilderConfig (t , address , []int32 {0 , 1 })
1193
+ cfg .ConsumeCycleDuration = time .Second
1194
+ partitionRing := newPartitionRingReaderWithPartitions (map [int32 ]ring.PartitionDesc {
1195
+ 0 : {Id : 0 , State : ring .PartitionActive },
1196
+ 1 : {Id : 1 , State : ring .PartitionActive },
1197
+ })
1198
+
1199
+ client := newKafkaClient (t , cfg .IngestStorageConfig .Kafka )
1200
+ // First, produce to partition 0
1201
+ sendReqWithOpts (ctx , t , client , reqOpts {partition : 0 , time : reqTime , tenantID : util .FakeTenantID })
1202
+
1203
+ // And only then create block builder
1204
+ b , err := New (cfg , test .NewTestingLogger (t ), partitionRing , & mockOverrides {}, store )
1205
+ require .NoError (t , err )
1206
+
1207
+ // Verify builder is listening to both partitions
1208
+ parts := b .getAssignedPartitions ()
1209
+ require .ElementsMatch (t , []int32 {0 , 1 }, parts )
1210
+
1211
+ require .NoError (t , services .StartAndAwaitRunning (ctx , b ))
1212
+ t .Cleanup (func () {
1213
+ require .NoError (t , services .StopAndAwaitTerminated (ctx , b ))
1214
+ })
1215
+
1216
+ // after initial consumption, add more records
1217
+ store .lock ()
1218
+ sendReqWithOpts (ctx , t , client , reqOpts {partition : 0 , time : reqTime , tenantID : util .FakeTenantID })
1219
+ store .unlock ()
1220
+
1221
+ // after processing the first partition, add more records to the second partition
1222
+ store .lock ()
1223
+ sendReqWithOpts (ctx , t , client , reqOpts {partition : 1 , time : reqTime , tenantID : util .FakeTenantID })
1224
+ store .unlock ()
1225
+
1226
+ // wait for the second partition to finish
1227
+ store .lock ()
1228
+ store .unlock ()
1229
+
1230
+ // Wait for the block to be flushed (one block per each consumePartition call)
1231
+ require .Eventually (t , func () bool {
1232
+ return len (store .BlockMetas (util .FakeTenantID )) == 3 && countFlushedTraces (store ) == 3
1233
+ }, 20 * time .Second , time .Second )
1234
+
1235
+ // Verify offsets
1236
+ offsets , err := kadm .NewClient (client ).FetchOffsetsForTopics (ctx , testConsumerGroup , testTopic )
1237
+ require .NoError (t , err )
1238
+ for partition , expectedOffset := range map [int32 ]int64 {
1239
+ 0 : 2 ,
1240
+ 1 : 1 ,
1241
+ } {
1242
+ offset , ok := offsets .Lookup (testTopic , partition )
1243
+ require .True (t , ok , "partition %d should have a committed offset" , partition )
1244
+ require .Equal (t , expectedOffset , offset .At )
1245
+ }
1246
+ }
0 commit comments