diff --git a/okapi-benchmarks/build.gradle.kts b/okapi-benchmarks/build.gradle.kts index d1c27b0..51eac44 100644 --- a/okapi-benchmarks/build.gradle.kts +++ b/okapi-benchmarks/build.gradle.kts @@ -41,7 +41,19 @@ jmh { timeOnIteration = "30s" resultFormat = "JSON" resultsFile = layout.buildDirectory.file("reports/jmh/results.json") - jvmArgs = listOf("-Xms2g", "-Xmx2g", "-XX:+UseG1GC") + jvmArgs = listOf( + // Throughput-mode microbenchmarks call deliver() in a tight loop and re-deserialize + // KafkaDeliveryInfo via Jackson + Kotlin reflection per invocation; with -Xmx2g this + // OOMs within the first measurement iteration. 8g leaves room for GC under sustained + // allocation pressure without skewing the benchmark with promotion stalls. + "-Xms8g", + "-Xmx8g", + "-XX:+UseG1GC", + // okapi-postgres.jar and the fat JMH jar both end up on the classpath; both carry + // the Liquibase changelog. Liquibase 4.x treats this as an error by default. The + // files are identical (same jar source on the classpath twice), so WARN is safe. + "-Dliquibase.duplicateFileMode=WARN", + ) } // ktlint should not lint JMH-generated sources. diff --git a/okapi-benchmarks/src/jmh/kotlin/com/softwaremill/okapi/benchmarks/DelivererMicroBenchmark.kt b/okapi-benchmarks/src/jmh/kotlin/com/softwaremill/okapi/benchmarks/DelivererMicroBenchmark.kt index 81c258b..8b74742 100644 --- a/okapi-benchmarks/src/jmh/kotlin/com/softwaremill/okapi/benchmarks/DelivererMicroBenchmark.kt +++ b/okapi-benchmarks/src/jmh/kotlin/com/softwaremill/okapi/benchmarks/DelivererMicroBenchmark.kt @@ -15,6 +15,8 @@ import com.softwaremill.okapi.http.ServiceUrlResolver import com.softwaremill.okapi.kafka.KafkaDeliveryInfo import com.softwaremill.okapi.kafka.KafkaMessageDeliverer import org.apache.kafka.clients.producer.MockProducer +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.clients.producer.RecordMetadata import org.apache.kafka.common.serialization.StringSerializer import org.openjdk.jmh.annotations.Benchmark import org.openjdk.jmh.annotations.BenchmarkMode @@ -25,6 +27,7 @@ import org.openjdk.jmh.annotations.Setup import org.openjdk.jmh.annotations.State import org.openjdk.jmh.annotations.TearDown import java.time.Instant +import java.util.concurrent.Future import java.util.concurrent.TimeUnit /** @@ -52,7 +55,18 @@ open class DelivererMicroBenchmark { @Setup(org.openjdk.jmh.annotations.Level.Trial) fun setupTrial() { - val mockProducer = MockProducer(true, null, StringSerializer(), StringSerializer()) + // MockProducer.send() appends every record to an internal `sent` list (exposed as + // history()) and never drops it. In throughput-mode at ~1M ops/s for 30s × multiple + // iterations × forks that list grows to GBs and OOMs the JVM regardless of -Xmx. + // Override send() to discard history after each call — for microbench we don't need + // to inspect what was sent, only to measure deliver() overhead. + val mockProducer = object : MockProducer(true, null, StringSerializer(), StringSerializer()) { + override fun send(record: ProducerRecord): Future { + val future = super.send(record) + clear() + return future + } + } kafkaDeliverer = KafkaMessageDeliverer(mockProducer) wiremock = WireMockServer(wireMockConfig().dynamicPort()).also { it.start() }