-
Notifications
You must be signed in to change notification settings - Fork 17
fix: Avoid out of bounds error. #125
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3,4 +3,5 @@ external/ | |
node_modules | ||
coverage | ||
.eslintcache | ||
.env | ||
.env | ||
.vscode |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -825,3 +825,106 @@ test('parseResponse parses record data', () => { | |
// Verify value is a Buffer | ||
ok(Buffer.isBuffer(record.value)) | ||
}) | ||
|
||
test('parseResponse handles truncated records', () => { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should test when the truncation happens in the nth recordsBatch There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It already does. It fails in the 2nd batch. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think this test the error. When the truncation happens in
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. See above, if the record is truncated, we discard and in the next run we get it from the beginning, including the already transmitted truncated part. |
||
// Create a response with records data | ||
// First create a record batch | ||
const timestamp = BigInt(Date.now()) | ||
const recordsBatch = Writer.create() | ||
// Record batch structure | ||
.appendInt64(0n) // firstOffset | ||
.appendInt32(60) // length - this would be dynamically computed in real usage | ||
.appendInt32(0) // partitionLeaderEpoch | ||
.appendInt8(2) // magic (record format version) | ||
.appendUnsignedInt32(0) // crc - would be computed properly in real code | ||
.appendInt16(0) // attributes | ||
.appendInt32(0) // lastOffsetDelta | ||
.appendInt64(timestamp) // firstTimestamp | ||
.appendInt64(timestamp) // maxTimestamp | ||
.appendInt64(-1n) // producerId - not specified | ||
.appendInt16(0) // producerEpoch | ||
.appendInt32(0) // firstSequence | ||
.appendInt32(1) // number of records | ||
// Single record | ||
.appendVarInt(8) // length of the record | ||
.appendInt8(0) // attributes | ||
.appendVarInt64(0n) // timestampDelta | ||
.appendVarInt(0) // offsetDelta | ||
.appendVarIntBytes(null) // key | ||
.appendVarIntBytes(Buffer.from('test-value')) // value | ||
.appendVarIntArray([], () => {}) // No headers | ||
// Truncated batch | ||
.appendInt64(0n) // firstOffset | ||
.appendInt32(60) // length | ||
|
||
// Now create the full response | ||
const writer = Writer.create() | ||
.appendInt32(0) // throttleTimeMs | ||
.appendInt16(0) // errorCode (success) | ||
.appendInt32(123) // sessionId | ||
// Responses array - using tagged fields format | ||
.appendArray( | ||
[ | ||
{ | ||
topicId: '12345678-1234-1234-1234-123456789abc', | ||
partitions: [ | ||
{ | ||
partitionIndex: 0, | ||
errorCode: 0, | ||
highWatermark: 100n, | ||
lastStableOffset: 100n, | ||
logStartOffset: 0n, | ||
abortedTransactions: [], | ||
preferredReadReplica: -1, | ||
recordsBatch | ||
} | ||
] | ||
} | ||
], | ||
(w, topic) => { | ||
w.appendUUID(topic.topicId) | ||
// Partitions array | ||
.appendArray(topic.partitions, (w, partition) => { | ||
w.appendInt32(partition.partitionIndex) | ||
.appendInt16(partition.errorCode) | ||
.appendInt64(partition.highWatermark) | ||
.appendInt64(partition.lastStableOffset) | ||
.appendInt64(partition.logStartOffset) | ||
// Aborted transactions array (empty) | ||
.appendArray(partition.abortedTransactions, () => {}) | ||
.appendInt32(partition.preferredReadReplica) | ||
|
||
// Add records batch | ||
.appendUnsignedVarInt(partition.recordsBatch.length + 1) | ||
.appendFrom(partition.recordsBatch) | ||
}) | ||
} | ||
) | ||
.appendInt8(0) // Root tagged fields | ||
|
||
const response = parseResponse(1, 1, 17, Reader.from(writer)) | ||
|
||
// Verify the records were parsed correctly | ||
ok(response.responses[0].partitions[0].records, 'Records should be defined') | ||
|
||
const batch = response.responses[0].partitions[0].records[0]! | ||
const record = batch.records[0] | ||
|
||
deepStrictEqual( | ||
{ | ||
firstOffset: batch.firstOffset, | ||
recordsLength: batch.records.length, | ||
offsetDelta: record.offsetDelta, | ||
valueString: record.value.toString() | ||
}, | ||
{ | ||
firstOffset: 0n, | ||
recordsLength: 1, | ||
offsetDelta: 0, | ||
valueString: 'test-value' | ||
} | ||
) | ||
|
||
// Verify value is a Buffer | ||
ok(Buffer.isBuffer(record.value)) | ||
}) |
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm not sure it works. When the error happens, if the reader position is not moved backwards, the already read records will be lost
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nope, that's not how it works.
The already read records have been added. Only the truncated part is discarded.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So I think the record in between of the truncation will be lost
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Which is fine because on the next iteration Kafka will not send the rest of the message but rather start from the beginning.