Skip to content

Commit

Permalink
KAFKA-15498: bump snappy-java version to 1.1.10.5 (apache#14434) (#962)
Browse files Browse the repository at this point in the history
bump snappy-java version to 1.1.10.5, and add more tests to verify the compressed data can be correctly decompressed and read.

For LogCleanerParameterizedIntegrationTest, we increased the message size for snappy decompression since in the new version of snappy, the decompressed size is increasing compared with the previous version. But since the compression algorithm is not kafka's scope, all we need to do is to make sure the compressed data can be successfully decompressed and parsed/read.

Reviewers: Divij Vaidya <diviv@amazon.com>, Ismael Juma <ismael@juma.me.uk>, Josep Prat <josep.prat@aiven.io>, Kamal Chandraprakash <kamal.chandraprakash@gmail.com>

Co-authored-by: Luke Chen <showuon@gmail.com>
  • Loading branch information
ShivsundarR and showuon committed Oct 12, 2023
1 parent d8faa80 commit 011f512
Show file tree
Hide file tree
Showing 4 changed files with 56 additions and 10 deletions.
2 changes: 1 addition & 1 deletion LICENSE-binary
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ scala-library-2.13.10
scala-logging_2.13-3.9.2
scala-reflect-2.13.5
scala-java8-compat_2.13-0.9.1
snappy-java-1.1.8.1
snappy-java-1.1.10.5
zookeeper-3.5.9
zookeeper-jute-3.5.9

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,15 +21,21 @@ import kafka.server.{KafkaConfig, KafkaServer}
import kafka.utils.TestUtils
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.clients.producer.RecordMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.header.Header
import org.apache.kafka.common.header.internals.{RecordHeader, RecordHeaders}
import org.apache.kafka.common.serialization.ByteArraySerializer
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach}
import org.junit.jupiter.params.ParameterizedTest
import org.junit.jupiter.params.provider.{Arguments, MethodSource}

import java.util.concurrent.Future
import java.util.{Collections, Properties}
import scala.jdk.CollectionConverters._
import scala.collection.mutable.ListBuffer
import scala.util.Random

class ProducerCompressionTest extends ZooKeeperTestHarness {

Expand Down Expand Up @@ -75,13 +81,28 @@ class ProducerCompressionTest extends ZooKeeperTestHarness {
TestUtils.createTopic(zkClient, topic, 1, 1, List(server))
val partition = 0

def messageValue(length: Int): String = {
val random = new Random(0)
new String(random.alphanumeric.take(length).toArray)
}

// prepare the messages
val messageValues = (0 until numRecords).map(i => "value" + i)
val messageValues = (0 until numRecords).map(i => messageValue(i))
val headerArr = Array[Header](new RecordHeader("key", "value".getBytes))
val headers = new RecordHeaders(headerArr)

// make sure the returned messages are correct
val now = System.currentTimeMillis()
val responses = for (message <- messageValues)
yield producer.send(new ProducerRecord(topic, null, now, null, message.getBytes))
val responses: ListBuffer[Future[RecordMetadata]] = new ListBuffer[Future[RecordMetadata]]()

for (message <- messageValues) {
// 1. send message without key and header
responses += producer.send(new ProducerRecord(topic, null, now, null, message.getBytes))
// 2. send message with key, without header
responses += producer.send(new ProducerRecord(topic, null, now, message.length.toString.getBytes, message.getBytes))
// 3. send message with key and header
responses += producer.send(new ProducerRecord(topic, null, now, message.length.toString.getBytes, message.getBytes, headers))
}
for ((future, offset) <- responses.zipWithIndex) {
assertEquals(offset.toLong, future.get.offset)
}
Expand All @@ -90,12 +111,37 @@ class ProducerCompressionTest extends ZooKeeperTestHarness {
// make sure the fetched message count match
consumer.assign(Collections.singleton(tp))
consumer.seek(tp, 0)
val records = TestUtils.consumeRecords(consumer, numRecords)
val records = TestUtils.consumeRecords(consumer, numRecords*3)

for (i <- 0 until numRecords) {
val messageValue = messageValues(i)
// 1. verify message without key and header
var offset = i * 3
var record = records(offset)
assertNull(record.key())
assertEquals(messageValue, new String(record.value))
assertEquals(0, record.headers().toArray.length)
assertEquals(now, record.timestamp)
assertEquals(offset.toLong, record.offset)

// 2. verify message with key, without header
offset = i * 3 + 1
record = records(offset)
assertEquals(messageValue.length.toString, new String(record.key()))
assertEquals(messageValue, new String(record.value))
assertEquals(0, record.headers().toArray.length)
assertEquals(now, record.timestamp)
assertEquals(offset.toLong, record.offset)

for (((messageValue, record), index) <- messageValues.zip(records).zipWithIndex) {
// 3. verify message with key and header
offset = i * 3 + 2
record = records(offset)
assertEquals(messageValue.length.toString, new String(record.key()))
assertEquals(messageValue, new String(record.value))
assertEquals(1, record.headers().toArray.length)
assertEquals(headerArr.apply(0), record.headers().toArray.apply(0))
assertEquals(now, record.timestamp)
assertEquals(index.toLong, record.offset)
assertEquals(offset.toLong, record.offset)
}
} finally {
producer.close()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,9 +138,9 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati
case _ =>
// the broker assigns absolute offsets for message format 0 which potentially causes the compressed size to
// increase because the broker offsets are larger than the ones assigned by the client
// adding `5` to the message set size is good enough for this test: it covers the increased message size while
// adding `6` to the message set size is good enough for this test: it covers the increased message size while
// still being less than the overhead introduced by the conversion from message format version 0 to 1
largeMessageSet.sizeInBytes + 5
largeMessageSet.sizeInBytes + 6
}

cleaner = makeCleaner(partitions = topicPartitions, maxMessageSize = maxMessageSize)
Expand Down
2 changes: 1 addition & 1 deletion gradle/dependencies.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ versions += [
scoveragePlugin: "5.0.0",
shadowPlugin: "6.1.0",
slf4j: "1.7.36",
snappy: "1.1.10.1",
snappy: "1.1.10.5",
spotbugs: "4.1.4",
spotbugsPlugin: "4.6.0",
spotlessPlugin: "5.8.2",
Expand Down

0 comments on commit 011f512

Please sign in to comment.